blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
949c6d9f07351de9dce2dce42d9cd57e27bac03d | 0920b50773cfd231137d2383695a6730d0678628 | /pylib/keys.py | e9b88d82a59a5096cf7a1651b31216abd9793056 | [] | no_license | chyser/bin | 05b67cf299b0e427e253abc42ca015fcdec8e84c | b54f23c6c5f1f19e426ee06c9e9faf9f561ee9a9 | refs/heads/master | 2021-01-19T19:35:05.801722 | 2015-08-19T17:58:29 | 2015-08-19T17:58:29 | 17,319,228 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,443 | py | #!/usr/bin/env python
"""
Library:
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
import string
__QuickChars = '0123456789abcdefghijkmnopqrstuvwxyzABCDEFGHIJKLMNPQRSTUVWXYZ-_+$'
__sd = {}
for idx, ch in enumerate(__QuickChars):
__sd[ch] = idx
#-------------------------------------------------------------------------------
def cvtNum2QChars(num, length=None):
#-------------------------------------------------------------------------------
if num == 0:
s = ['0']
else:
s = []
while num > 0:
s.insert(0, __QuickChars[num & 0b00111111])
num >>= 6
if length:
l = length - len(s)
if l > 0:
s = (['0']*l) + s
#s.reverse()
return ''.join(s)
#-------------------------------------------------------------------------------
def cvtQChars2Num(s):
#-------------------------------------------------------------------------------
num = 0
for ch in s:
num = num << 6 | __sd[ch]
return num
__SimpleChars = string.digits + string.letters
__ManyChars = __SimpleChars + '_()[]+-@!~:;{}|'
__PrintableChars = string.printable[:94]
#-------------------------------------------------------------------------------
def cvtNum2Chars(num, srcChars):
#-------------------------------------------------------------------------------
s = []
mod = len(srcChars)
while num > 0:
num, idx = divmod(num, mod)
s.append(srcChars[idx])
return ''.join(s)
#-------------------------------------------------------------------------------
def cvtNum2AllChars(num):
#-------------------------------------------------------------------------------
return cvtNum2Chars(num, __PrintableChars)
#-------------------------------------------------------------------------------
def cvtNum2SimpleChars(num):
#-------------------------------------------------------------------------------
return cvtNum2Chars(num, __SimpleChars)
#-------------------------------------------------------------------------------
def cvtNum2ManyChars(num):
#-------------------------------------------------------------------------------
return cvtNum2Chars(num, __ManyChars)
#-------------------------------------------------------------------------------
def __test__(verbose=False):
#-------------------------------------------------------------------------------
"""
used for automated module testing. see L{tester}
"""
import pylib.tester as tester
import random
for i in range(100):
n = random.randint(0, 9999999999999999999999999999999999999999999)
s = cvtNum2QChars(n)
a = cvtQChars2Num(s)
print(s, a)
tester.Assert(n == a)
for i in range(100):
n = random.randint(0, 9999999)
s = cvtNum2QChars(n)
a = cvtQChars2Num(s)
print(s, a)
tester.Assert(n == a)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
#-------------------------------------------------------------------------------
import pylib.osscripts as oss
args, opts = oss.gopt(oss.argv[1:], [], [], __test__.__doc__)
s = cvtNum2SChars(-123456789, 16)
print(s)
print(cvtSChars2Num(s))
res = not __test__(verbose=True)
#oss.exit(res)
| [
"chris.hyser@oracle.com"
] | chris.hyser@oracle.com |
b17ff877803df569c734b00023bb306e5ed63be5 | e0c8e66af3a72a1cc534d7a90fead48754d266b3 | /pandas/core/internals.py | 1071ebcc1ba70401b25fd6e5215de510cd51775e | [
"BSD-3-Clause"
] | permissive | gwtaylor/pandas | e12b0682347b9f03a24d6bff3e14f563cb7a3758 | 7b0349f0545011a6cac2422b8d8d0f409ffd1e15 | refs/heads/master | 2021-01-15T17:51:47.147334 | 2012-01-13T17:53:56 | 2012-01-13T17:53:56 | 3,174,111 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 33,514 | py | import itertools
from numpy import nan
import numpy as np
from pandas.core.index import Index, _ensure_index
import pandas.core.common as com
import pandas._tseries as lib
class Block(object):
"""
Canonical n-dimensional unit of homogeneous dtype contained in a pandas data
structure
Index-ignorant; let the container take care of that
"""
__slots__ = ['items', 'ref_items', '_ref_locs', 'values', 'ndim']
def __init__(self, values, items, ref_items, ndim=2,
do_integrity_check=False):
if issubclass(values.dtype.type, basestring):
values = np.array(values, dtype=object)
assert(values.ndim == ndim)
assert(len(items) == len(values))
self.values = values
self.ndim = ndim
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
if do_integrity_check:
self._check_integrity()
def _check_integrity(self):
if len(self.items) < 2:
return
# monotonicity
return (self.ref_locs[1:] > self.ref_locs[:-1]).all()
_ref_locs = None
@property
def ref_locs(self):
if self._ref_locs is None:
indexer = self.ref_items.get_indexer(self.items)
assert((indexer != -1).all())
self._ref_locs = indexer
return self._ref_locs
def set_ref_items(self, ref_items, maybe_rename=True):
"""
If maybe_rename=True, need to set the items for this guy
"""
assert(isinstance(ref_items, Index))
if maybe_rename:
self.items = ref_items.take(self.ref_locs)
self.ref_items = ref_items
def __repr__(self):
shape = ' x '.join([str(s) for s in self.shape])
name = type(self).__name__
return '%s: %s, %s, dtype %s' % (name, self.items, shape, self.dtype)
def __contains__(self, item):
return item in self.items
def __len__(self):
return len(self.values)
def __getstate__(self):
# should not pickle generally (want to share ref_items), but here for
# completeness
return (self.items, self.ref_items, self.values)
def __setstate__(self, state):
items, ref_items, values = state
self.items = _ensure_index(items)
self.ref_items = _ensure_index(ref_items)
self.values = values
self.ndim = values.ndim
@property
def shape(self):
return self.values.shape
@property
def dtype(self):
return self.values.dtype
def copy(self, deep=True):
values = self.values
if deep:
values = values.copy()
return make_block(values, self.items, self.ref_items)
def merge(self, other):
assert(self.ref_items.equals(other.ref_items))
# Not sure whether to allow this or not
# if not union_ref.equals(other.ref_items):
# union_ref = self.ref_items + other.ref_items
return _merge_blocks([self, other], self.ref_items)
def reindex_axis(self, indexer, mask, needs_masking, axis=0):
"""
Reindex using pre-computed indexer information
"""
if self.values.size > 0:
new_values = com.take_fast(self.values, indexer, mask,
needs_masking, axis=axis)
else:
shape = list(self.shape)
shape[axis] = len(indexer)
new_values = np.empty(shape)
new_values.fill(np.nan)
return make_block(new_values, self.items, self.ref_items)
def reindex_items_from(self, new_ref_items, copy=True):
"""
Reindex to only those items contained in the input set of items
E.g. if you have ['a', 'b'], and the input items is ['b', 'c', 'd'],
then the resulting items will be ['b']
Returns
-------
reindexed : Block
"""
new_ref_items, indexer = self.items.reindex(new_ref_items)
if indexer is None:
new_items = new_ref_items
new_values = self.values.copy() if copy else self.values
else:
mask = indexer != -1
masked_idx = indexer[mask]
if self.values.ndim == 2:
new_values = com.take_2d(self.values, masked_idx, axis=0,
needs_masking=False)
else:
new_values = self.values.take(masked_idx, axis=0)
new_items = self.items.take(masked_idx)
return make_block(new_values, new_items, new_ref_items)
def get(self, item):
loc = self.items.get_loc(item)
return self.values[loc]
def set(self, item, value):
"""
Modify Block in-place with new item value
Returns
-------
None
"""
loc = self.items.get_loc(item)
self.values[loc] = value
def delete(self, item):
"""
Returns
-------
y : Block (new object)
"""
loc = self.items.get_loc(item)
new_items = self.items.delete(loc)
new_values = np.delete(self.values, loc, 0)
return make_block(new_values, new_items, self.ref_items)
def split_block_at(self, item):
"""
Split block around given column, for "deleting" a column without
having to copy data by returning views on the original array
Returns
-------
leftb, rightb : (Block or None, Block or None)
"""
loc = self.items.get_loc(item)
if len(self.items) == 1:
# no blocks left
return None, None
if loc == 0:
# at front
left_block = None
right_block = make_block(self.values[1:], self.items[1:].copy(),
self.ref_items)
elif loc == len(self.values) - 1:
# at back
left_block = make_block(self.values[:-1], self.items[:-1].copy(),
self.ref_items)
right_block = None
else:
# in the middle
left_block = make_block(self.values[:loc],
self.items[:loc].copy(), self.ref_items)
right_block = make_block(self.values[loc + 1:],
self.items[loc + 1:].copy(), self.ref_items)
return left_block, right_block
def fillna(self, value):
new_values = self.values.copy()
mask = com.isnull(new_values.ravel())
new_values.flat[mask] = value
return make_block(new_values, self.items, self.ref_items)
#-------------------------------------------------------------------------------
# Is this even possible?
class FloatBlock(Block):
def should_store(self, value):
# when inserting a column should not coerce integers to floats
# unnecessarily
return issubclass(value.dtype.type, np.floating)
class IntBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.integer)
class BoolBlock(Block):
def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
class ObjectBlock(Block):
def should_store(self, value):
return not issubclass(value.dtype.type,
(np.integer, np.floating, np.bool_))
def make_block(values, items, ref_items, do_integrity_check=False):
dtype = values.dtype
vtype = dtype.type
if issubclass(vtype, np.floating):
klass = FloatBlock
elif issubclass(vtype, np.integer):
if vtype != np.int64:
values = values.astype('i8')
klass = IntBlock
elif dtype == np.bool_:
klass = BoolBlock
else:
klass = ObjectBlock
return klass(values, items, ref_items, ndim=values.ndim,
do_integrity_check=do_integrity_check)
# TODO: flexible with index=None and/or items=None
class BlockManager(object):
"""
Core internal data structure to implement DataFrame
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = ['axes', 'blocks', 'ndim']
def __init__(self, blocks, axes, do_integrity_check=True):
self.axes = [_ensure_index(ax) for ax in axes]
self.blocks = blocks
ndim = len(axes)
for block in blocks:
assert(ndim == block.values.ndim)
if do_integrity_check:
self._verify_integrity()
@property
def ndim(self):
return len(self.axes)
def is_mixed_dtype(self):
counts = set()
for block in self.blocks:
counts.add(block.dtype)
if len(counts) > 1:
return True
return False
def set_axis(self, axis, value):
cur_axis = self.axes[axis]
if len(value) != len(cur_axis):
raise Exception('Length mismatch (%d vs %d)'
% (len(value), len(cur_axis)))
self.axes[axis] = _ensure_index(value)
if axis == 0:
for block in self.blocks:
block.set_ref_items(self.items, maybe_rename=True)
# make items read only for now
def _get_items(self):
return self.axes[0]
items = property(fget=_get_items)
def set_items_norename(self, value):
value = _ensure_index(value)
self.axes[0] = value
for block in self.blocks:
block.set_ref_items(value, maybe_rename=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [b.items for b in self.blocks]
axes_array = [ax for ax in self.axes]
return axes_array, block_values, block_items
def __setstate__(self, state):
# discard anything after 3rd, support beta pickling format for a little
# while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [_ensure_index(ax) for ax in ax_arrays]
blocks = []
for values, items in zip(bvalues, bitems):
blk = make_block(values, items, self.axes[0],
do_integrity_check=True)
blocks.append(blk)
self.blocks = blocks
def __len__(self):
return len(self.items)
def __repr__(self):
output = 'BlockManager'
for i, ax in enumerate(self.axes):
if i == 0:
output += '\nItems: %s' % ax
else:
output += '\nAxis %d: %s' % (i, ax)
for block in self.blocks:
output += '\n%s' % repr(block)
return output
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
def _verify_integrity(self):
_union_block_items(self.blocks)
mgr_shape = self.shape
for block in self.blocks:
assert(block.values.shape[1:] == mgr_shape[1:])
tot_items = sum(len(x.items) for x in self.blocks)
assert(len(self.items) == tot_items)
def astype(self, dtype):
new_blocks = []
for block in self.blocks:
newb = make_block(block.values.astype(dtype), block.items,
block.ref_items)
new_blocks.append(newb)
new_mgr = BlockManager(new_blocks, self.axes)
return new_mgr.consolidate()
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
dtypes = [blk.dtype for blk in self.blocks]
return len(dtypes) == len(set(dtypes))
def get_slice(self, slobj, axis=0):
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
if axis == 0:
new_items = new_axes[0]
if len(self.blocks) == 1:
blk = self.blocks[0]
newb = make_block(blk.values[slobj], new_items,
new_items)
new_blocks = [newb]
else:
return self.reindex_items(new_items)
else:
new_blocks = self._slice_blocks(slobj, axis)
return BlockManager(new_blocks, new_axes, do_integrity_check=False)
def _slice_blocks(self, slobj, axis):
new_blocks = []
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = slobj
slicer = tuple(slicer)
for block in self.blocks:
newb = make_block(block.values[slicer], block.items,
block.ref_items)
new_blocks.append(newb)
return new_blocks
def get_series_dict(self):
# For DataFrame
return _blocks_to_series_dict(self.blocks, self.axes[1])
@classmethod
def from_blocks(cls, blocks, index):
# also checks for overlap
items = _union_block_items(blocks)
return BlockManager(blocks, [items, index])
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean, default True
If False, return shallow copy (do not copy data)
Returns
-------
copy : BlockManager
"""
copy_blocks = [block.copy(deep=deep) for block in self.blocks]
# copy_axes = [ax.copy() for ax in self.axes]
copy_axes = list(self.axes)
return BlockManager(copy_blocks, copy_axes, do_integrity_check=False)
def as_matrix(self, items=None):
if len(self.blocks) == 0:
mat = np.empty(self.shape, dtype=float)
elif len(self.blocks) == 1:
blk = self.blocks[0]
if items is None or blk.items.equals(items):
# if not, then just call interleave per below
mat = blk.values
else:
mat = self.reindex_items(items).as_matrix()
else:
if items is None:
mat = self._interleave(self.items)
else:
mat = self.reindex_items(items).as_matrix()
return mat
def _interleave(self, items):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
items = _ensure_index(items)
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(len(items), dtype=bool)
# By construction, all of the item should be covered by one of the
# blocks
for block in self.blocks:
indexer = items.get_indexer(block.items)
assert((indexer != -1).all())
result[indexer] = block.values
itemmask[indexer] = 1
assert(itemmask.all())
return result
def xs(self, key, axis=1, copy=True):
assert(axis >= 1)
loc = self.axes[axis].get_loc(key)
slicer = [slice(None, None) for _ in range(self.ndim)]
slicer[axis] = loc
slicer = tuple(slicer)
new_axes = list(self.axes)
# could be an array indexer!
if isinstance(loc, (slice, np.ndarray)):
new_axes[axis] = new_axes[axis][loc]
else:
new_axes.pop(axis)
new_blocks = []
if len(self.blocks) > 1:
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
for blk in self.blocks:
newb = make_block(blk.values[slicer], blk.items, blk.ref_items)
new_blocks.append(newb)
elif len(self.blocks) == 1:
vals = self.blocks[0].values[slicer]
if copy:
vals = vals.copy()
new_blocks = [make_block(vals, self.items, self.items)]
return BlockManager(new_blocks, new_axes)
def fast_2d_xs(self, loc, copy=False):
"""
"""
if len(self.blocks) == 1:
result = self.blocks[0].values[:, loc]
if copy:
result = result.copy()
return result
if not copy:
raise Exception('cannot get view of mixed-type or '
'non-consolidated DataFrame')
dtype = _interleaved_dtype(self.blocks)
items = self.items
n = len(items)
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
values = blk.values
for j, item in enumerate(blk.items):
i = items.get_loc(item)
result[i] = values[j, loc]
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
new_blocks = _consolidate(self.blocks, self.items)
return BlockManager(new_blocks, self.axes)
def get(self, item):
_, block = self._find_block(item)
return block.get(item)
def get_scalar(self, tup):
"""
Retrieve single item
"""
item = tup[0]
_, blk = self._find_block(item)
# this could obviously be seriously sped up in cython
item_loc = blk.items.get_loc(item),
full_loc = item_loc + tuple(ax.get_loc(x)
for ax, x in zip(self.axes[1:], tup[1:]))
return blk.values[full_loc]
def delete(self, item):
i, _ = self._find_block(item)
loc = self.items.get_loc(item)
new_items = Index(np.delete(np.asarray(self.items), loc))
self._delete_from_block(i, item)
self.set_items_norename(new_items)
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
if value.ndim == self.ndim - 1:
value = value.reshape((1,) + value.shape)
assert(value.shape[1:] == self.shape[1:])
if item in self.items:
i, block = self._find_block(item)
if not block.should_store(value):
# delete from block, create and append new block
self._delete_from_block(i, item)
self._add_new_block(item, value)
else:
block.set(item, value)
else:
# insert at end
self.insert(len(self.items), item, value)
def insert(self, loc, item, value):
if item in self.items:
raise Exception('cannot insert %s, already exists' % item)
new_items = self.items.insert(loc, item)
self.set_items_norename(new_items)
# new block
self._add_new_block(item, value)
def _delete_from_block(self, i, item):
"""
Delete and maybe remove the whole block
"""
block = self.blocks.pop(i)
new_left, new_right = block.split_block_at(item)
if new_left is not None:
self.blocks.append(new_left)
if new_right is not None:
self.blocks.append(new_right)
def _add_new_block(self, item, value):
# Do we care about dtype at the moment?
# hm, elaborate hack?
loc = self.items.get_loc(item)
new_block = make_block(value, self.items[loc:loc+1].copy(),
self.items)
self.blocks.append(new_block)
def _find_block(self, item):
self._check_have(item)
for i, block in enumerate(self.blocks):
if item in block:
return i, block
def _check_have(self, item):
if item not in self.items:
raise KeyError('no item named %s' % str(item))
def reindex_axis(self, new_axis, method=None, axis=0, copy=True):
new_axis = _ensure_index(new_axis)
cur_axis = self.axes[axis]
if new_axis.equals(cur_axis):
if copy:
result = self.copy(deep=True)
result.axes[axis] = new_axis
return result
else:
return self
if axis == 0:
assert(method is None)
return self.reindex_items(new_axis)
new_axis, indexer = cur_axis.reindex(new_axis, method)
return self.reindex_indexer(new_axis, indexer, axis=axis)
def reindex_indexer(self, new_axis, indexer, axis=1):
"""
pandas-indexer with -1's only.
"""
if axis == 0:
return self._reindex_indexer_items(new_axis, indexer)
mask = indexer == -1
# TODO: deal with length-0 case? or does it fall out?
needs_masking = len(new_axis) > 0 and mask.any()
new_blocks = []
for block in self.blocks:
newb = block.reindex_axis(indexer, mask, needs_masking,
axis=axis)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(new_blocks, new_axes)
def _reindex_indexer_items(self, new_items, indexer):
# TODO: less efficient than I'd like
item_order = com.take_1d(self.items.values, indexer)
# keep track of what items aren't found anywhere
mask = np.zeros(len(item_order), dtype=bool)
new_blocks = []
for blk in self.blocks:
blk_indexer = blk.items.get_indexer(item_order)
selector = blk_indexer != -1
# update with observed items
mask |= selector
if not selector.any():
continue
new_block_items = new_items.take(selector.nonzero()[0])
new_values = com.take_fast(blk.values, blk_indexer[selector],
None, False, axis=0)
new_blocks.append(make_block(new_values, new_block_items,
new_items))
if not mask.all():
na_items = new_items[-mask]
na_block = self._make_na_block(na_items, new_items)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def reindex_items(self, new_items, copy=True):
"""
"""
new_items = _ensure_index(new_items)
data = self
if not data.is_consolidated():
data = data.consolidate()
return data.reindex_items(new_items)
# TODO: this part could be faster (!)
new_items, indexer = self.items.reindex(new_items)
# could have some pathological (MultiIndex) issues here
new_blocks = []
if indexer is None:
for blk in self.blocks:
if copy:
new_blocks.append(blk.reindex_items_from(new_items))
else:
new_blocks.append(blk)
else:
for block in self.blocks:
newb = block.reindex_items_from(new_items, copy=copy)
if len(newb.items) > 0:
new_blocks.append(newb)
mask = indexer == -1
if mask.any():
extra_items = new_items[mask]
na_block = self._make_na_block(extra_items, new_items)
new_blocks.append(na_block)
new_blocks = _consolidate(new_blocks, new_items)
return BlockManager(new_blocks, [new_items] + self.axes[1:])
def _make_na_block(self, items, ref_items):
block_shape = list(self.shape)
block_shape[0] = len(items)
block_values = np.empty(block_shape, dtype=np.float64)
block_values.fill(nan)
na_block = make_block(block_values, items, ref_items,
do_integrity_check=True)
return na_block
def take(self, indexer, axis=1):
if axis == 0:
raise NotImplementedError
indexer = np.asarray(indexer, dtype='i4')
n = len(self.axes[axis])
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_axes = list(self.axes)
new_axes[axis] = self.axes[axis].take(indexer)
new_blocks = []
for blk in self.blocks:
new_values = com.take_fast(blk.values, indexer,
None, False, axis=axis)
newb = make_block(new_values, blk.items, self.items)
new_blocks.append(newb)
return BlockManager(new_blocks, new_axes)
def merge(self, other, lsuffix=None, rsuffix=None):
assert(self._is_indexed_like(other))
this, other = self._maybe_rename_join(other, lsuffix, rsuffix)
cons_items = this.items + other.items
consolidated = _consolidate(this.blocks + other.blocks, cons_items)
new_axes = list(this.axes)
new_axes[0] = cons_items
return BlockManager(consolidated, new_axes)
def _maybe_rename_join(self, other, lsuffix, rsuffix, exclude=None,
copydata=True):
to_rename = self.items.intersection(other.items)
if exclude is not None and len(exclude) > 0:
to_rename = to_rename - exclude
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise Exception('columns overlap: %s' % to_rename)
def lrenamer(x):
if x in to_rename:
return '%s%s' % (x, lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '%s%s' % (x, rsuffix)
return x
# XXX: COPIES DATA!
this = self.rename_items(lrenamer, copydata=copydata)
other = other.rename_items(rrenamer, copydata=copydata)
else:
this = self
return this, other
def _is_indexed_like(self, other):
"""
Check all axes except items
"""
assert(self.ndim == other.ndim)
for ax, oax in zip(self.axes[1:], other.axes[1:]):
if not ax.equals(oax):
return False
return True
def rename_axis(self, mapper, axis=1):
new_axis = Index([mapper(x) for x in self.axes[axis]])
new_axis._verify_integrity()
new_axes = list(self.axes)
new_axes[axis] = new_axis
return BlockManager(self.blocks, new_axes)
def rename_items(self, mapper, copydata=True):
new_items = Index([mapper(x) for x in self.items])
new_items._verify_integrity()
new_blocks = []
for block in self.blocks:
newb = block.copy(deep=copydata)
newb.set_ref_items(new_items, maybe_rename=True)
new_blocks.append(newb)
new_axes = list(self.axes)
new_axes[0] = new_items
return BlockManager(new_blocks, new_axes)
def add_prefix(self, prefix):
f = (('%s' % prefix) + '%s').__mod__
return self.rename_items(f)
def add_suffix(self, suffix):
f = ('%s' + ('%s' % suffix)).__mod__
return self.rename_items(f)
def fillna(self, value):
"""
"""
new_blocks = [b.fillna(value) for b in self.blocks]
return BlockManager(new_blocks, self.axes)
@property
def block_id_vector(self):
# TODO
result = np.empty(len(self.items), dtype=int)
result.fill(-1)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
assert((indexer != -1).all())
result.put(indexer, i)
assert((result >= 0).all())
return result
@property
def item_dtypes(self):
result = np.empty(len(self.items), dtype='O')
mask = np.zeros(len(self.items), dtype=bool)
for i, blk in enumerate(self.blocks):
indexer = self.items.get_indexer(blk.items)
result.put(indexer, blk.values.dtype.name)
mask.put(indexer, 1)
assert(mask.all())
return result
def form_blocks(data, axes):
# pre-filter out items if we passed it
items = axes[0]
if len(data) < len(items):
extra_items = items - Index(data.keys())
else:
extra_items = []
# put "leftover" items in float bucket, where else?
# generalize?
float_dict = {}
int_dict = {}
bool_dict = {}
object_dict = {}
for k, v in data.iteritems():
if issubclass(v.dtype.type, np.floating):
float_dict[k] = v
elif issubclass(v.dtype.type, np.integer):
int_dict[k] = v
elif v.dtype == np.bool_:
bool_dict[k] = v
else:
object_dict[k] = v
blocks = []
if len(float_dict):
float_block = _simple_blockify(float_dict, items, np.float64)
blocks.append(float_block)
if len(int_dict):
int_block = _simple_blockify(int_dict, items, np.int64)
blocks.append(int_block)
if len(bool_dict):
bool_block = _simple_blockify(bool_dict, items, np.bool_)
blocks.append(bool_block)
if len(object_dict) > 0:
object_block = _simple_blockify(object_dict, items, np.object_)
blocks.append(object_block)
if len(extra_items):
shape = (len(extra_items),) + tuple(len(x) for x in axes[1:])
block_values = np.empty(shape, dtype=float)
block_values.fill(nan)
na_block = make_block(block_values, extra_items, items,
do_integrity_check=True)
blocks.append(na_block)
blocks = _consolidate(blocks, items)
return blocks
def _simple_blockify(dct, ref_items, dtype):
block_items, values = _stack_dict(dct, ref_items, dtype)
# CHECK DTYPE?
if values.dtype != dtype: # pragma: no cover
values = values.astype(dtype)
return make_block(values, block_items, ref_items, do_integrity_check=True)
def _stack_dict(dct, ref_items, dtype):
from pandas.core.series import Series
# fml
def _asarray_compat(x):
# asarray shouldn't be called on SparseSeries
if isinstance(x, Series):
return x.values
else:
return np.asarray(x)
def _shape_compat(x):
# sparseseries
if isinstance(x, Series):
return len(x),
else:
return x.shape
items = [x for x in ref_items if x in dct]
first = dct[items[0]]
shape = (len(dct),) + _shape_compat(first)
stacked = np.empty(shape, dtype=dtype)
for i, item in enumerate(items):
stacked[i] = _asarray_compat(dct[item])
# stacked = np.vstack([_asarray_compat(dct[k]) for k in items])
return items, stacked
def _blocks_to_series_dict(blocks, index=None):
from pandas.core.series import Series
series_dict = {}
for block in blocks:
for item, vec in zip(block.items, block.values):
series_dict[item] = Series(vec, index=index, name=item)
return series_dict
def _interleaved_dtype(blocks):
from collections import defaultdict
counts = defaultdict(lambda: 0)
for x in blocks:
counts[type(x)] += 1
have_int = counts[IntBlock] > 0
have_bool = counts[BoolBlock] > 0
have_object = counts[ObjectBlock] > 0
have_float = counts[FloatBlock] > 0
have_numeric = have_float or have_int
if have_object:
return np.object_
elif have_bool and have_numeric:
return np.object_
elif have_bool:
return np.bool_
elif have_int and not have_float:
return np.int64
else:
return np.float64
def _consolidate(blocks, items):
"""
Merge blocks having same dtype
"""
get_dtype = lambda x: x.dtype
# sort by dtype
grouper = itertools.groupby(sorted(blocks, key=get_dtype),
lambda x: x.dtype)
new_blocks = []
for dtype, group_blocks in grouper:
new_block = _merge_blocks(list(group_blocks), items)
new_blocks.append(new_block)
return new_blocks
# TODO: this could be much optimized
def _merge_blocks(blocks, items):
if len(blocks) == 1:
return blocks[0]
new_values = np.vstack([b.values for b in blocks])
new_items = blocks[0].items.append([b.items for b in blocks[1:]])
new_block = make_block(new_values, new_items, items,
do_integrity_check=True)
return new_block.reindex_items_from(items)
def _union_block_items(blocks):
tot_len = 0
all_items = []
slow = False
for b in blocks:
tot_len += len(b.items)
if type(b.items) != Index:
slow = True
all_items.append(b.items)
if slow:
the_union = _union_items_slow(all_items)
else:
the_union = Index(lib.fast_unique_multiple(all_items))
if tot_len > len(the_union):
raise Exception('item names overlap')
return the_union
def _union_items_slow(all_items):
seen = None
for items in all_items:
if seen is None:
seen = items
else:
seen = seen.union(items)
return seen
| [
"wesmckinn@gmail.com"
] | wesmckinn@gmail.com |
4559f5f956f5f1d1aca521001d1a56aa006e342c | c2e969a4a54d54426675639a1dc8e0cb86e7a272 | /mbed_devices/_internal/mbed_tools/list_connected_devices.py | 924000ba8a97dddb551a8d8bf57ce56ae2f90fbe | [
"Apache-2.0"
] | permissive | ARMmbed/mbed-devices | e773caf78b29c5f1eb2e59485c6e4a2847630eef | d9f459cbe47a341734c0813ebcdd25633237e1d9 | refs/heads/master | 2023-03-16T15:58:40.202451 | 2020-04-28T14:26:43 | 2020-04-28T14:26:43 | 215,789,280 | 3 | 0 | Apache-2.0 | 2020-07-09T21:34:01 | 2019-10-17T12:40:04 | Python | UTF-8 | Python | false | false | 3,148 | py | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""List all devices cli command."""
import click
import json
from operator import attrgetter
from typing import Iterable
from tabulate import tabulate
from mbed_devices import get_connected_devices, Device
from mbed_targets import Board
@click.command()
@click.option(
"--format", type=click.Choice(["table", "json"]), default="table", show_default=True, help="Set output format."
)
@click.option(
"--show-all",
"-a",
is_flag=True,
default=False,
help="Show all connected devices, even those which are not Mbed Boards.",
)
def list_connected_devices(format: str, show_all: bool) -> None:
"""Prints connected devices."""
connected_devices = get_connected_devices()
if show_all:
devices = _sort_devices(connected_devices.identified_devices + connected_devices.unidentified_devices)
else:
devices = _sort_devices(connected_devices.identified_devices)
output_builders = {
"table": _build_tabular_output,
"json": _build_json_output,
}
if devices:
output = output_builders[format](devices)
click.echo(output)
else:
click.echo("No connected Mbed devices found.")
def _sort_devices(devices: Iterable[Device]) -> Iterable[Device]:
"""Sort devices by board name and then serial number (in case there are multiple boards with the same name)."""
return sorted(devices, key=attrgetter("mbed_board.board_name", "serial_number"))
def _build_tabular_output(devices: Iterable[Device]) -> str:
headers = ["Board name", "Serial number", "Serial port", "Mount point(s)", "Build target(s)"]
devices_data = []
for device in devices:
devices_data.append(
[
device.mbed_board.board_name or "<unknown>",
device.serial_number,
device.serial_port or "<unknown>",
"\n".join(str(mount_point) for mount_point in device.mount_points),
"\n".join(_get_build_targets(device.mbed_board)),
]
)
return tabulate(devices_data, headers=headers)
def _build_json_output(devices: Iterable[Device]) -> str:
devices_data = []
for device in devices:
board = device.mbed_board
devices_data.append(
{
"serial_number": device.serial_number,
"serial_port": device.serial_port,
"mount_points": [str(m) for m in device.mount_points],
"mbed_board": {
"product_code": board.product_code,
"board_type": board.board_type,
"board_name": board.board_name,
"mbed_os_support": board.mbed_os_support,
"mbed_enabled": board.mbed_enabled,
"build_targets": _get_build_targets(board),
},
}
)
return json.dumps(devices_data, indent=4)
def _get_build_targets(board: Board) -> Iterable[str]:
return [f"{board.board_type}_{variant}" for variant in board.build_variant] + [board.board_type]
| [
"noreply@github.com"
] | ARMmbed.noreply@github.com |
60cc3428b450d6e43e6a31d6e789ce5f20e0f0f1 | 011416f366b8ff7da7e267cabcacb2279f328447 | /detector.py | e8686abcd2dfc72cadbfa58d80bc1c8997c14671 | [] | no_license | victorgrubio/Yolo-detection-NRG5 | ceed23cc7d2d7f97064bc9232e888e8c1df3df7a | 48c746d6cb1f1862f94bcfb5d90378d009fd73b6 | refs/heads/main | 2023-01-10T16:12:40.487364 | 2020-10-20T17:58:39 | 2020-10-20T17:58:39 | 306,098,308 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | """
Created on Mon Jan 29 17:25:59 2018
@author: victor
"""
import pyximport; pyximport.install() # allow .pyx files import
def Detector():
def __init__(self, img):
pass
def process_img(self, img):
pass
| [
"victorgrubiodl@gmail.com"
] | victorgrubiodl@gmail.com |
6465301a497bfcd82a2d6d1b4edea5e3e8ea5605 | 1137db33db4a1ebe66ede596021c691f856b2979 | /funcmeasure/models/__init__.py | 46ae28b12fb8f86a8ef5b0bfa21d539b8112c3af | [
"MIT"
] | permissive | kkristof200/py_funcmeasure | d1aa6f0d86f4cd854d863772c2ed663641ae91f8 | 0cf910e7759466df60bcd6fa411051d36088f97d | refs/heads/master | 2023-04-15T09:31:57.546583 | 2021-04-15T15:14:29 | 2021-04-15T15:14:29 | 270,449,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | from .function_stats import FunctionStats
from .enums import * | [
"kovacskristof200@gmail.com"
] | kovacskristof200@gmail.com |
8e7c8e939b745936b9c56fdcad18bbc94247f2dc | c1bd12405d244c5924a4b069286cd9baf2c63895 | /azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/mab_error_info.py | d57e90b3fdeaad0ccaa8fce630564c8ecc36c04b | [
"MIT"
] | permissive | lmazuel/azure-sdk-for-python | 972708ad5902778004680b142874582a284a8a7c | b40e0e36cc00a82b7f8ca2fa599b1928240c98b5 | refs/heads/master | 2022-08-16T02:32:14.070707 | 2018-03-29T17:16:15 | 2018-03-29T17:16:15 | 21,287,134 | 1 | 3 | MIT | 2019-10-25T15:56:00 | 2014-06-27T19:40:56 | Python | UTF-8 | Python | false | false | 1,097 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class MabErrorInfo(Model):
"""MAB workload-specific error information.
:param error_string: Localized error string.
:type error_string: str
:param recommendations: List of localized recommendations.
:type recommendations: list of str
"""
_attribute_map = {
'error_string': {'key': 'errorString', 'type': 'str'},
'recommendations': {'key': 'recommendations', 'type': '[str]'},
}
def __init__(self, error_string=None, recommendations=None):
self.error_string = error_string
self.recommendations = recommendations
| [
"dheeru.rathor14@gmail.com"
] | dheeru.rathor14@gmail.com |
967bc0c6daed181a230ed0df131092a91d1585c7 | 9b3f578e63a7e17e2b1bab5f38aa8625b8a80251 | /descarteslabs/workflows/types/primitives/primitive.py | 2d49e5b56195eb800ecbd67e14ed0bf44934e74c | [
"Apache-2.0"
] | permissive | carderne/descarteslabs-python | e6f7000f08cd1569e0ddd0f7fb8e53abb6765183 | 757b480efb8d58474a3bf07f1dbd90652b46ed64 | refs/heads/master | 2022-12-09T23:19:02.361226 | 2020-08-13T11:52:30 | 2020-08-13T11:52:30 | 287,264,851 | 0 | 0 | NOASSERTION | 2020-08-13T11:46:58 | 2020-08-13T11:46:57 | null | UTF-8 | Python | false | false | 1,324 | py | from descarteslabs.common.graft import client
from ..core import Proxytype, ProxyTypeError
class Primitive(Proxytype):
"""
Proxy wrapper around a Python primitive type.
Do not use Primitive directly; instead, use one of the built-in subtypes (Int, Str, etc.)
"""
_pytype = None
def __init__(self, obj):
if self._is_generic():
raise ProxyTypeError(
"Cannot instantiate a generic {}; use a concrete subclass".format(
type(self).__name__
)
)
from .any_ import Any # TODO circular import
if isinstance(obj, (type(self), Any)):
self.graft = obj.graft
else:
if not isinstance(obj, self._pytype):
raise ProxyTypeError(
"Cannot promote {} to {}".format(type(obj), type(self))
)
self.graft = client.value_graft(obj)
self._literal_value = obj
@classmethod
def _promote(cls, obj):
return cls(obj)
@property
def literal_value(self):
"Python literal value this proxy object was constructed with, or None if not constructed from a literal value."
return getattr(self, "_literal_value", None)
def _is_generic(self):
return self._pytype is None
| [
"support@descarteslabs.com"
] | support@descarteslabs.com |
bd6fbef0bcbf14bea60261fe548c8aa68a9ac909 | 302442c32bacca6cde69184d3f2d7529361e4f3c | /cidtrsend-all/stage2-model/pytz/zoneinfo/America/Argentina/Mendoza.py | d3b0b6b1d1cd786afa0f915837aa14c8768788d6 | [] | no_license | fucknoob/WebSemantic | 580b85563072b1c9cc1fc8755f4b09dda5a14b03 | f2b4584a994e00e76caccce167eb04ea61afa3e0 | refs/heads/master | 2021-01-19T09:41:59.135927 | 2015-02-07T02:11:23 | 2015-02-07T02:11:23 | 30,441,659 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,974 | py | '''tzinfo timezone information for America/Argentina/Mendoza.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Mendoza(DstTzInfo):
'''America/Argentina/Mendoza timezone definition. See datetime.tzinfo for details'''
zone = 'America/Argentina/Mendoza'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1920,5,1,4,16,48),
d(1930,12,1,4,0,0),
d(1931,4,1,3,0,0),
d(1931,10,15,4,0,0),
d(1932,3,1,3,0,0),
d(1932,11,1,4,0,0),
d(1933,3,1,3,0,0),
d(1933,11,1,4,0,0),
d(1934,3,1,3,0,0),
d(1934,11,1,4,0,0),
d(1935,3,1,3,0,0),
d(1935,11,1,4,0,0),
d(1936,3,1,3,0,0),
d(1936,11,1,4,0,0),
d(1937,3,1,3,0,0),
d(1937,11,1,4,0,0),
d(1938,3,1,3,0,0),
d(1938,11,1,4,0,0),
d(1939,3,1,3,0,0),
d(1939,11,1,4,0,0),
d(1940,3,1,3,0,0),
d(1940,7,1,4,0,0),
d(1941,6,15,3,0,0),
d(1941,10,15,4,0,0),
d(1943,8,1,3,0,0),
d(1943,10,15,4,0,0),
d(1946,3,1,3,0,0),
d(1946,10,1,4,0,0),
d(1963,10,1,3,0,0),
d(1963,12,15,4,0,0),
d(1964,3,1,3,0,0),
d(1964,10,15,4,0,0),
d(1965,3,1,3,0,0),
d(1965,10,15,4,0,0),
d(1966,3,1,3,0,0),
d(1966,10,15,4,0,0),
d(1967,4,2,3,0,0),
d(1967,10,1,4,0,0),
d(1968,4,7,3,0,0),
d(1968,10,6,4,0,0),
d(1969,4,6,3,0,0),
d(1969,10,5,4,0,0),
d(1974,1,23,3,0,0),
d(1974,5,1,2,0,0),
d(1988,12,1,3,0,0),
d(1989,3,5,2,0,0),
d(1989,10,15,3,0,0),
d(1990,3,4,2,0,0),
d(1990,10,15,4,0,0),
d(1991,3,1,3,0,0),
d(1991,10,15,4,0,0),
d(1992,3,1,3,0,0),
d(1992,10,18,4,0,0),
d(1993,3,7,2,0,0),
d(1999,10,3,3,0,0),
d(2000,3,3,3,0,0),
d(2004,5,23,3,0,0),
d(2004,9,26,4,0,0),
]
_transition_info = [
i(-15420,0,'CMT'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,3600,'ARST'),
i(-14400,0,'ART'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-10800,0,'ART'),
i(-7200,3600,'ARST'),
i(-14400,0,'WART'),
i(-10800,3600,'WARST'),
i(-14400,0,'WART'),
i(-10800,3600,'WARST'),
i(-14400,0,'WART'),
i(-7200,7200,'ARST'),
i(-10800,0,'ART'),
i(-10800,0,'ARST'),
i(-10800,0,'ART'),
i(-14400,0,'WART'),
i(-10800,0,'ART'),
]
Mendoza = Mendoza()
| [
"learnfuzzy@gmail.com"
] | learnfuzzy@gmail.com |
731ddffa3a3330ee11c7a4b1f6c437a7196dcce7 | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/lianxi/函数参数.py | 604939b96265d356561c29ce3cf5a71702d1a3db | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | def sum_2_num():
num1 = 10
num2 = 20
result = num1 + num2
print("%d + %d = %d"% (num1,num2,result))
sum_2_num()
| [
"1083027306@qq.com"
] | 1083027306@qq.com |
bc4cfdc288816b00de2839a560736efa2542f302 | 07151cc20993dff5e3e22a8fc2fe4fe7fb3e2551 | /parse_drugbank.py | 3142e785a49b34248686366bc30b75f9c1d3bc04 | [] | no_license | jmuhlich/lincs-drug-targets | 4a2b122185caf587a3b4eda47da125c4a3c8e439 | bf627c4760c52fa0a15645c4b49c077a4ed478d5 | refs/heads/master | 2021-01-19T08:26:02.903067 | 2013-07-03T19:48:01 | 2013-07-03T19:48:01 | 10,800,024 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,668 | py | import os
import sys
import lxml.etree
import csv
import sqlalchemy as sa
def xpath(obj, path, single=True):
result = map(unicode, obj.xpath(path, namespaces={'d': ns}))
if single:
if len(result) == 0:
result = None
elif len(result) == 1:
result = result[0]
else:
raise ValueError("XPath expression matches more than one value")
return result
def record_match(hmsl_id, drugbank_id, description):
conn.execute(hmsl_drugbank.insert().values(locals()))
db_file = 'drugbank.sqlite'
#db_file = ':memory:'
engine = sa.create_engine('sqlite:///' + db_file)
conn = engine.connect()
metadata = sa.MetaData(bind=conn)
drugbank_drug = sa.Table(
'drugbank_drug', metadata,
sa.Column('drug_id', sa.String(), primary_key=True),
sa.Column('name', sa.String()),
sa.Column('synonyms', sa.PickleType()), # list of strings
sa.Column('kegg_id', sa.String()),
sa.Column('pubchem_cid', sa.String()),
sa.Column('molecular_formula', sa.String()),
sa.Column('partners', sa.PickleType()), # list of strings
)
drugbank_name = sa.Table(
'drugbank_name', metadata,
sa.Column('drug_id', sa.String()),
sa.Column('name', sa.String(), index=True),
)
hmsl_drugbank = sa.Table(
'hmsl_drugbank', metadata,
sa.Column('hmsl_id', sa.String(), primary_key=True),
sa.Column('drugbank_id', sa.String()),
sa.Column('description', sa.String()),
)
metadata.create_all()
datafile_name = 'drugbank.xml'
datafile = open(datafile_name)
ns = 'http://drugbank.ca'
qnames = dict((tag, lxml.etree.QName(ns, tag).text)
for tag in ('drug', 'drug-interaction', 'partner'))
# Parse drugbank xml into sqlite, only if the table is empty.
if not conn.execute(drugbank_drug.select()).first():
with conn.begin() as trans:
for event, element in lxml.etree.iterparse(datafile, tag=qnames['drug']):
# We need to skip 'drug' elements in drug-interaction sub-elements.
# It's unfortunate they re-used this tag name.
if element.getparent().tag == qnames['drug-interaction']:
continue
drug_id = xpath(element, 'd:drugbank-id/text()')
name = xpath(element, 'd:name/text()')
synonyms = xpath(
element, 'd:synonyms/d:synonym/text()', single=False)
synonyms += xpath(
element, 'd:brands/d:brand/text()', single=False)
molecular_formula = xpath(
element, './/d:property[d:kind="Molecular Formula"]/'
'd:value/text()')
kegg_id = xpath(
element, './/d:external-identifier[d:resource="KEGG Drug"]/'
'd:identifier/text()')
pubchem_cid = xpath(
element, './/d:external-identifier[d:resource="PubChem Compound"]/'
'd:identifier/text()')
partner_ids = xpath(
element, 'd:targets/d:target/@partner', single=False)
conn.execute(
drugbank_drug.insert().
values(drug_id=drug_id, name=name, synonyms=synonyms,
kegg_id=kegg_id, pubchem_cid=pubchem_cid,
molecular_formula=molecular_formula,
partners=partner_ids))
conn.execute(
drugbank_name.insert().
values(drug_id=drug_id, name=name.lower()))
for s in synonyms:
conn.execute(
drugbank_name.insert().
values(drug_id=drug_id, name=s.lower()))
element.clear()
# Turns out it's much faster to do a second iterparse loop with a different
# tag argument than to do just one iterparse loop with a conditional on the
# tag name. The lxml internals are much more efficient at filtering tags
# than we are, and the disk I/O and buffer cache impact are negligible. It
# would be nice if the tag argument could accept a list of tag names...
datafile.seek(0)
partner_to_uniprot = {}
for event, element in lxml.etree.iterparse(datafile, tag=qnames['partner']):
partner_id = element.get('id')
uniprot_id = xpath(element, './/d:external-identifier'
'[d:resource="UniProtKB"]/d:identifier/text()')
partner_to_uniprot[partner_id] = uniprot_id
element.clear()
with conn.begin() as trans:
for rec in conn.execute(drugbank_drug.select()):
new_values = dict(rec)
new_values['partners'] = map(partner_to_uniprot.__getitem__, rec.partners)
new_values['partners'] = filter(None, new_values['partners'])
conn.execute(drugbank_drug.update().
where(drugbank_drug.c.drug_id == rec.drug_id).
values(**new_values))
drugbank_names = [
rec[0] for rec in conn.execute(sa.select([drugbank_name.c.name]))]
sm_filename = os.path.join(os.path.dirname(sys.argv[0]),
'small_molecule.130624M134120.tsv')
sm_file = open(sm_filename, 'rb')
sm_reader = csv.reader(sm_file, dialect='excel-tab')
sm_fields = [f.lower().replace(' ', '_') for f in sm_reader.next()]
sm_fields[0] = 'sm_id'
hmsl_sm = sa.Table(
'hmsl_sm', metadata,
*[sa.Column(f, sa.String()) for f in sm_fields]
)
hmsl_sm.append_constraint(sa.PrimaryKeyConstraint(hmsl_sm.c.sm_id))
hmsl_sm.c.alternative_names.type = sa.PickleType()
metadata.create_all(tables=[hmsl_sm])
# Clear out hmsl_sm table unconditionally (it's fast to reload).
conn.execute(hmsl_sm.delete())
with conn.begin() as trans:
for row in sm_reader:
row[0] = row[0][:-4]
row[2] = row[2].split(';')
try:
conn.execute(hmsl_sm.insert().values(row))
except sa.exc.IntegrityError as e:
# Merge tsv row with existing record.
rec = conn.execute(hmsl_sm.select().
where(hmsl_sm.c.sm_id == row[0])).first()
if rec:
new_rec = dict(rec)
# Append new name and synonyms to synonyms.
new_rec['alternative_names'] = list(set(
rec.alternative_names +
[row[sm_fields.index('sm_name')]] +
row[sm_fields.index('alternative_names')]))
# If no existing CID, use the new one.
if not rec.pubchem_cid:
new_rec['pubchem_cid'] = row[sm_fields.index('pubchem_cid')]
conn.execute(hmsl_sm.update().
where(hmsl_sm.c.sm_id == new_rec['sm_id']).
values(new_rec))
conn.execute(hmsl_drugbank.delete())
with conn.begin() as trans:
for sm in conn.execute(hmsl_sm.select()):
hmsl_names = [s.lower() for s in [sm.sm_name] + sm.alternative_names]
for name in hmsl_names:
match = conn.execute(sa.select([drugbank_name.c.drug_id]).
where(drugbank_name.c.name == name)
).scalar()
if match:
break
if match:
record_match(sm.sm_id, match, 'Name: %s' % name)
continue
match = conn.execute(sa.select([drugbank_drug.c.drug_id]).
where(drugbank_drug.c.pubchem_cid ==
sm.pubchem_cid)
).scalar()
if match:
record_match(sm.sm_id, match, 'PubChem CID: %s' % sm.pubchem_cid)
continue
for rec in conn.execute(hmsl_drugbank.select()):
print '\t'.join(rec)
| [
"jmuhlich@bitflood.org"
] | jmuhlich@bitflood.org |
bc0ab3ba1d66e12d5151b4ece16b2e5d76d35cfa | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/h5py/version.py | d07fd5c286ba42d9633ba01d61c2280a7fd43eff | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 1,652 | py | # This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Versioning module for h5py.
"""
from __future__ import absolute_import
from collections import namedtuple
from . import h5 as _h5
import sys
import numpy
# All should be integers, except pre, as validating versions is more than is
# needed for our use case
_H5PY_VERSION_CLS = namedtuple("_H5PY_VERSION_CLS",
"major minor bugfix pre post dev")
hdf5_built_version_tuple = _h5.HDF5_VERSION_COMPILED_AGAINST
version_tuple = _H5PY_VERSION_CLS(2, 9, 0, None, None, None)
version = "{0.major:d}.{0.minor:d}.{0.bugfix:d}".format(version_tuple)
if version_tuple.pre is not None:
version += version_tuple.pre
if version_tuple.post is not None:
version += ".post{0.post:d}".format(version_tuple)
if version_tuple.dev is not None:
version += ".dev{0.dev:d}".format(version_tuple)
hdf5_version_tuple = _h5.get_libversion()
hdf5_version = "%d.%d.%d" % hdf5_version_tuple
api_version_tuple = (1,8)
api_version = "%d.%d" % api_version_tuple
info = """\
Summary of the h5py configuration
---------------------------------
h5py %(h5py)s
HDF5 %(hdf5)s
Python %(python)s
sys.platform %(platform)s
sys.maxsize %(maxsize)s
numpy %(numpy)s
""" % { 'h5py': version,
'hdf5': hdf5_version,
'python': sys.version,
'platform': sys.platform,
'maxsize': sys.maxsize,
'numpy': numpy.__version__ }
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
8ecf3e72e374f924b88bc99a155fc33cd9c050a1 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/ospf/lsastatshist5min.py | 10f90f8bccaf46bf0a16f11a7a67d8878606062d | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 27,691 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class LsaStatsHist5min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.ospf.LsaStatsHist5min", "Ospf Lsa Packets")
counter = CounterMeta("droppedLsaPktsWhileGR", CounterCategory.COUNTER, "packets", "LSA Packets Dropped During GR")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "droppedLsaPktsWhileGRCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "droppedLsaPktsWhileGRPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "droppedLsaPktsWhileGRMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "droppedLsaPktsWhileGRMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "droppedLsaPktsWhileGRAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "droppedLsaPktsWhileGRSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "droppedLsaPktsWhileGRThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "droppedLsaPktsWhileGRTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "droppedLsaPktsWhileGRRate"
meta._counters.append(counter)
counter = CounterMeta("droppedLsaPktsWhileSPF", CounterCategory.COUNTER, "packets", "LSA Packets Dropped During SPF")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "droppedLsaPktsWhileSPFCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "droppedLsaPktsWhileSPFPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "droppedLsaPktsWhileSPFMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "droppedLsaPktsWhileSPFMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "droppedLsaPktsWhileSPFAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "droppedLsaPktsWhileSPFSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "droppedLsaPktsWhileSPFThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "droppedLsaPktsWhileSPFTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "droppedLsaPktsWhileSPFRate"
meta._counters.append(counter)
counter = CounterMeta("rcvdLsaPktsIgnored", CounterCategory.COUNTER, "packets", "Received LSA Packets Ignored")
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "rcvdLsaPktsIgnoredCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "rcvdLsaPktsIgnoredPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "rcvdLsaPktsIgnoredMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "rcvdLsaPktsIgnoredMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "rcvdLsaPktsIgnoredAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "rcvdLsaPktsIgnoredSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "rcvdLsaPktsIgnoredThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "rcvdLsaPktsIgnoredTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "rcvdLsaPktsIgnoredRate"
meta._counters.append(counter)
meta.moClassName = "ospfLsaStatsHist5min"
meta.rnFormat = "HDospfLsaStats5min-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Ospf Lsa Packets stats in 5 minute"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.ospf.IfStats")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.ospf.LsaStatsHist")
meta.rnPrefixes = [
('HDospfLsaStats5min-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRAvg", "droppedLsaPktsWhileGRAvg", 48837, PropCategory.IMPLICIT_AVG)
prop.label = "LSA Packets Dropped During GR average value"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRAvg", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRCum", "droppedLsaPktsWhileGRCum", 48833, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LSA Packets Dropped During GR cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRCum", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRMax", "droppedLsaPktsWhileGRMax", 48836, PropCategory.IMPLICIT_MAX)
prop.label = "LSA Packets Dropped During GR maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRMax", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRMin", "droppedLsaPktsWhileGRMin", 48835, PropCategory.IMPLICIT_MIN)
prop.label = "LSA Packets Dropped During GR minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRMin", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRPer", "droppedLsaPktsWhileGRPer", 48834, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LSA Packets Dropped During GR periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRPer", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRRate", "droppedLsaPktsWhileGRRate", 48841, PropCategory.IMPLICIT_RATE)
prop.label = "LSA Packets Dropped During GR rate"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRRate", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRSpct", "droppedLsaPktsWhileGRSpct", 48838, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LSA Packets Dropped During GR suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRSpct", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRThr", "droppedLsaPktsWhileGRThr", 48839, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LSA Packets Dropped During GR thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("droppedLsaPktsWhileGRThr", prop)
prop = PropMeta("str", "droppedLsaPktsWhileGRTr", "droppedLsaPktsWhileGRTr", 48840, PropCategory.IMPLICIT_TREND)
prop.label = "LSA Packets Dropped During GR trend"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileGRTr", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFAvg", "droppedLsaPktsWhileSPFAvg", 48858, PropCategory.IMPLICIT_AVG)
prop.label = "LSA Packets Dropped During SPF average value"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFAvg", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFCum", "droppedLsaPktsWhileSPFCum", 48854, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "LSA Packets Dropped During SPF cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFCum", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFMax", "droppedLsaPktsWhileSPFMax", 48857, PropCategory.IMPLICIT_MAX)
prop.label = "LSA Packets Dropped During SPF maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFMax", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFMin", "droppedLsaPktsWhileSPFMin", 48856, PropCategory.IMPLICIT_MIN)
prop.label = "LSA Packets Dropped During SPF minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFMin", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFPer", "droppedLsaPktsWhileSPFPer", 48855, PropCategory.IMPLICIT_PERIODIC)
prop.label = "LSA Packets Dropped During SPF periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFPer", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFRate", "droppedLsaPktsWhileSPFRate", 48862, PropCategory.IMPLICIT_RATE)
prop.label = "LSA Packets Dropped During SPF rate"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFRate", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFSpct", "droppedLsaPktsWhileSPFSpct", 48859, PropCategory.IMPLICIT_SUSPECT)
prop.label = "LSA Packets Dropped During SPF suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFSpct", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFThr", "droppedLsaPktsWhileSPFThr", 48860, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "LSA Packets Dropped During SPF thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("droppedLsaPktsWhileSPFThr", prop)
prop = PropMeta("str", "droppedLsaPktsWhileSPFTr", "droppedLsaPktsWhileSPFTr", 48861, PropCategory.IMPLICIT_TREND)
prop.label = "LSA Packets Dropped During SPF trend"
prop.isOper = True
prop.isStats = True
meta.props.add("droppedLsaPktsWhileSPFTr", prop)
prop = PropMeta("str", "index", "index", 47832, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredAvg", "rcvdLsaPktsIgnoredAvg", 48879, PropCategory.IMPLICIT_AVG)
prop.label = "Received LSA Packets Ignored average value"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredAvg", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredCum", "rcvdLsaPktsIgnoredCum", 48875, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "Received LSA Packets Ignored cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredCum", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredMax", "rcvdLsaPktsIgnoredMax", 48878, PropCategory.IMPLICIT_MAX)
prop.label = "Received LSA Packets Ignored maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredMax", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredMin", "rcvdLsaPktsIgnoredMin", 48877, PropCategory.IMPLICIT_MIN)
prop.label = "Received LSA Packets Ignored minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredMin", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredPer", "rcvdLsaPktsIgnoredPer", 48876, PropCategory.IMPLICIT_PERIODIC)
prop.label = "Received LSA Packets Ignored periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredPer", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredRate", "rcvdLsaPktsIgnoredRate", 48883, PropCategory.IMPLICIT_RATE)
prop.label = "Received LSA Packets Ignored rate"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredRate", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredSpct", "rcvdLsaPktsIgnoredSpct", 48880, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Received LSA Packets Ignored suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredSpct", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredThr", "rcvdLsaPktsIgnoredThr", 48881, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Received LSA Packets Ignored thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("rcvdLsaPktsIgnoredThr", prop)
prop = PropMeta("str", "rcvdLsaPktsIgnoredTr", "rcvdLsaPktsIgnoredTr", 48882, PropCategory.IMPLICIT_TREND)
prop.label = "Received LSA Packets Ignored trend"
prop.isOper = True
prop.isStats = True
meta.props.add("rcvdLsaPktsIgnoredTr", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
8dcce500bccb4d7e0fe014e6d850a544ff23c742 | 822027ec57f113f80a51f100c520eb76a6f302f6 | /test/z_component_tests/test__encoding.py | c16288fc2367ecd1ff65d2070ad6a1e0a27f5ece | [
"MIT"
] | permissive | KIC/pandas_ml_utils | 131de11f4914f0993570687b581452e2e81b256b | 76b764e2f87c2e9bcee9a62cfe0b54e7fb046034 | refs/heads/master | 2023-04-04T00:08:23.175385 | 2020-02-24T14:44:42 | 2020-02-24T14:44:42 | 205,210,206 | 3 | 0 | MIT | 2023-03-24T23:20:47 | 2019-08-29T16:54:12 | Python | UTF-8 | Python | false | false | 1,683 | py | import logging
import unittest
from typing import List
import pandas as pd
import numpy as np
from sklearn.neural_network import MLPClassifier
import pandas_ml_utils as pdu
from pandas_ml_utils.constants import *
from test.config import TEST_FILE
from pandas_ml_utils.model.features_and_labels.target_encoder import TargetLabelEncoder
from test.mocks.mock_model import MockModel
from pandas_ml_utils.utils.functions import integrate_nested_arrays
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
class EncoderTest(unittest.TestCase):
def test__2d_encoding(self):
"""given"""
df = pd.read_csv(TEST_FILE, index_col='Date')
df["label"] = df["spy_Close"] > df["spy_Open"]
class ArrayEncoder(TargetLabelEncoder):
def __init__(self):
super().__init__()
@property
def labels_source_columns(self) -> List[str]:
return ["spy_Close"]
@property
def encoded_labels_columns(self) -> List[str]:
return ["2D"]
def encode(self, df: pd.DataFrame, **kwargs) -> pd.DataFrame:
res = pd.DataFrame({}, index=df.index)
res["2D"] = df["spy_Close"] = df["spy_Close"].apply(lambda r: np.array([r, r]))
return res
"""when"""
model = MockModel(pdu.FeaturesAndLabels(["spy_Close"], ArrayEncoder(), feature_lags=[0, 1, 2]))
fit = df.fit(model)
"""then"""
print(fit.test_summary.df)
self.assertEqual(fit.test_summary.df.shape, (2682, 2))
self.assertEqual(integrate_nested_arrays(fit.test_summary.df.values).shape, (2682, 2, 2))
| [
"ch9.ki7@gmail.com"
] | ch9.ki7@gmail.com |
fe993ecafc7ef8012d6a4063011c843657ce6c70 | f0681b8c129e8afce21e340697502230f45ce930 | /venv/Lib/site-packages/com/vmware/nsx_policy/infra/services_client.py | 82757de614aaf94ee6efc90a2e3fe00d79d670b9 | [] | no_license | dungla2011/python_pyvmomi_working_sample_vmware_easy | 8852b6fdcd0f7d0f648f6f7b6c6e4f70c7213746 | a3b6d86a802f28c7ee249fc03523d5e5f0a2e3bd | refs/heads/main | 2023-07-05T14:56:46.551091 | 2021-08-20T12:19:39 | 2021-08-20T12:19:39 | 395,496,219 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,849 | py | # -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2021 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx_policy.infra.services.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class ServiceEntries(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx_policy.infra.services.service_entries'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _ServiceEntriesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
service_id,
service_entry_id,
):
"""
Delete Service entry
:type service_id: :class:`str`
:param service_id: Service ID (required)
:type service_entry_id: :class:`str`
:param service_entry_id: Service entry ID (required)
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'service_id': service_id,
'service_entry_id': service_entry_id,
})
def get(self,
service_id,
service_entry_id,
):
"""
Service entry
:type service_id: :class:`str`
:param service_id: Service ID (required)
:type service_entry_id: :class:`str`
:param service_entry_id: Service entry ID (required)
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx_policy.model.ServiceEntry
The return value will contain all the attributes defined in
:class:`com.vmware.nsx_policy.model_client.ServiceEntry`.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'service_id': service_id,
'service_entry_id': service_entry_id,
})
def list(self,
service_id,
cursor=None,
include_mark_for_delete_objects=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Paginated list of Service entries for the given service
:type service_id: :class:`str`
:param service_id: Service ID (required)
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type include_mark_for_delete_objects: :class:`bool` or ``None``
:param include_mark_for_delete_objects: Include objects that are marked for deletion in results (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx_policy.model_client.ServiceEntryListResult`
:return: com.vmware.nsx_policy.model.ServiceEntryListResult
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'service_id': service_id,
'cursor': cursor,
'include_mark_for_delete_objects': include_mark_for_delete_objects,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def patch(self,
service_id,
service_entry_id,
service_entry,
):
"""
If a service entry with the service-entry-id is not already present,
create a new service entry. If it already exists, patch the service
entry.
:type service_id: :class:`str`
:param service_id: Service ID (required)
:type service_entry_id: :class:`str`
:param service_entry_id: Service entry ID (required)
:type service_entry: :class:`vmware.vapi.struct.VapiStruct`
:param service_entry: (required)
The parameter must contain all the attributes defined in
:class:`com.vmware.nsx_policy.model_client.ServiceEntry`.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('patch',
{
'service_id': service_id,
'service_entry_id': service_entry_id,
'service_entry': service_entry,
})
def update(self,
service_id,
service_entry_id,
service_entry,
):
"""
If a service entry with the service-entry-id is not already present,
create a new service entry. If it already exists, update the service
entry.
:type service_id: :class:`str`
:param service_id: Service ID (required)
:type service_entry_id: :class:`str`
:param service_entry_id: Service entry ID (required)
:type service_entry: :class:`vmware.vapi.struct.VapiStruct`
:param service_entry: (required)
The parameter must contain all the attributes defined in
:class:`com.vmware.nsx_policy.model_client.ServiceEntry`.
:rtype: :class:`vmware.vapi.struct.VapiStruct`
:return: com.vmware.nsx_policy.model.ServiceEntry
The return value will contain all the attributes defined in
:class:`com.vmware.nsx_policy.model_client.ServiceEntry`.
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'service_id': service_id,
'service_entry_id': service_entry_id,
'service_entry': service_entry,
})
class _ServiceEntriesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_entry_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/policy/api/v1/infra/services/{service-id}/service-entries/{service-entry-id}',
path_variables={
'service_id': 'service-id',
'service_entry_id': 'service-entry-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_entry_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
HasFieldsOfValidator()
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/services/{service-id}/service-entries/{service-entry-id}',
path_variables={
'service_id': 'service-id',
'service_entry_id': 'service-entry-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'cursor': type.OptionalType(type.StringType()),
'include_mark_for_delete_objects': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
HasFieldsOfValidator()
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/policy/api/v1/infra/services/{service-id}/service-entries',
path_variables={
'service_id': 'service-id',
},
query_parameters={
'cursor': 'cursor',
'include_mark_for_delete_objects': 'include_mark_for_delete_objects',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for patch operation
patch_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_entry_id': type.StringType(),
'service_entry': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceEntry')]),
})
patch_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
patch_input_value_validator_list = [
HasFieldsOfValidator()
]
patch_output_validator_list = [
]
patch_rest_metadata = OperationRestMetadata(
http_method='PATCH',
url_template='/policy/api/v1/infra/services/{service-id}/service-entries/{service-entry-id}',
request_body_parameter='service_entry',
path_variables={
'service_id': 'service-id',
'service_entry_id': 'service-entry-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'service_id': type.StringType(),
'service_entry_id': type.StringType(),
'service_entry': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceEntry')]),
})
update_error_dict = {
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
HasFieldsOfValidator()
]
update_output_validator_list = [
HasFieldsOfValidator()
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/policy/api/v1/infra/services/{service-id}/service-entries/{service-entry-id}',
request_body_parameter='service_entry',
path_variables={
'service_id': 'service-id',
'service_entry_id': 'service-entry-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceEntry')]),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceEntryListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'patch': {
'input_type': patch_input_type,
'output_type': type.VoidType(),
'errors': patch_error_dict,
'input_value_validator_list': patch_input_value_validator_list,
'output_validator_list': patch_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.DynamicStructType('vmware.vapi.dynamic_struct', {}, VapiStruct, [type.ReferenceType('com.vmware.nsx_policy.model_client', 'ServiceEntry')]),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'patch': patch_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx_policy.infra.services.service_entries',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'ServiceEntries': ServiceEntries,
}
| [
"dungla2011@gmail.com"
] | dungla2011@gmail.com |
c1d2ad1b4ef08b921ee81f80d41045d6c1deef7a | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_211/ch27_2020_03_11_19_25_38_657892.py | 6bfeffb25e1b70d6b961b54597fe66634b50247f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | '''Faça um programa que pergunta ao aluno se ele tem dúvidas na disciplina. Se o aluno responder qualquer coisa diferente de "não", escreva "Pratique mais" e pergunte novamente se ele tem dúvidas. Continue perguntando até que o aluno responda que não tem dúvidas. Finalmente, escreva "Até a próxima".
Seu programa deve imprimir as strings exatamente como descritas acima e nada mais.'''
x=input("você tem alguma dúvida?")
while x!="não":
x=input("você tem alguma dúvida?")
| [
"you@example.com"
] | you@example.com |
5326aadeaf20fb0e6e60b6f2a9f3f75699f6c732 | d623b8fe1b7e5d49d1c2623fc6ff0356bda50d5d | /tests/components/bluetooth/test_init.py | 9b958e2fadeb2c4bd0469a829652668f93675d45 | [
"Apache-2.0"
] | permissive | piotr-kubiak/home-assistant | 02f1ab8195d9111c6d4c96a55715e67de6b103d9 | d32f3e359f1fabe2d79b0e07e375b3723b7cb07c | refs/heads/dev | 2023-03-03T11:08:25.871531 | 2022-08-26T19:41:41 | 2022-08-26T19:41:41 | 198,906,482 | 1 | 0 | Apache-2.0 | 2023-02-22T06:23:51 | 2019-07-25T22:00:44 | Python | UTF-8 | Python | false | false | 69,951 | py | """Tests for the Bluetooth integration."""
import asyncio
from datetime import timedelta
import time
from unittest.mock import MagicMock, Mock, patch
from bleak import BleakError
from bleak.backends.scanner import AdvertisementData, BLEDevice
import pytest
from homeassistant.components import bluetooth
from homeassistant.components.bluetooth import (
BluetoothChange,
BluetoothScanningMode,
BluetoothServiceInfo,
async_process_advertisements,
async_rediscover_address,
async_track_unavailable,
models,
scanner,
)
from homeassistant.components.bluetooth.const import (
CONF_PASSIVE,
DEFAULT_ADDRESS,
DOMAIN,
SOURCE_LOCAL,
UNAVAILABLE_TRACK_SECONDS,
)
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant, callback
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from . import (
_get_manager,
async_setup_with_default_adapter,
inject_advertisement,
inject_advertisement_with_time_and_source_connectable,
patch_discovered_devices,
)
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_setup_and_stop(hass, mock_bleak_scanner_start, enable_bluetooth):
"""Test we and setup and stop the scanner."""
mock_bt = [
{"domain": "switchbot", "service_uuid": "cba20d00-224d-11e6-9fb8-0002a5d5c51b"}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init"):
assert await async_setup_component(
hass, bluetooth.DOMAIN, {bluetooth.DOMAIN: {}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
async def test_setup_and_stop_passive(hass, mock_bleak_scanner_start, one_adapter):
"""Test we and setup and stop the scanner the passive scanner."""
entry = MockConfigEntry(
domain=bluetooth.DOMAIN,
data={},
options={CONF_PASSIVE: True},
unique_id="00:00:00:00:00:01",
)
entry.add_to_hass(hass)
init_kwargs = None
class MockPassiveBleakScanner:
def __init__(self, *args, **kwargs):
"""Init the scanner."""
nonlocal init_kwargs
init_kwargs = kwargs
async def start(self, *args, **kwargs):
"""Start the scanner."""
async def stop(self, *args, **kwargs):
"""Stop the scanner."""
def register_detection_callback(self, *args, **kwargs):
"""Register a callback."""
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner",
MockPassiveBleakScanner,
):
assert await async_setup_component(
hass, bluetooth.DOMAIN, {bluetooth.DOMAIN: {}}
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert init_kwargs == {
"adapter": "hci0",
"bluez": scanner.PASSIVE_SCANNER_ARGS,
"scanning_mode": "passive",
}
async def test_setup_and_stop_no_bluetooth(hass, caplog, macos_adapter):
"""Test we fail gracefully when bluetooth is not available."""
mock_bt = [
{"domain": "switchbot", "service_uuid": "cba20d00-224d-11e6-9fb8-0002a5d5c51b"}
]
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner",
side_effect=BleakError,
) as mock_ha_bleak_scanner, patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert len(mock_ha_bleak_scanner.mock_calls) == 1
assert "Failed to initialize Bluetooth" in caplog.text
async def test_setup_and_stop_broken_bluetooth(hass, caplog, macos_adapter):
"""Test we fail gracefully when bluetooth/dbus is broken."""
mock_bt = []
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.start",
side_effect=BleakError,
), patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert "Failed to start Bluetooth" in caplog.text
assert len(bluetooth.async_discovered_service_info(hass)) == 0
async def test_setup_and_stop_broken_bluetooth_hanging(hass, caplog, macos_adapter):
"""Test we fail gracefully when bluetooth/dbus is hanging."""
mock_bt = []
async def _mock_hang():
await asyncio.sleep(1)
with patch.object(scanner, "START_TIMEOUT", 0), patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.start",
side_effect=_mock_hang,
), patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert "Timed out starting Bluetooth" in caplog.text
async def test_setup_and_retry_adapter_not_yet_available(hass, caplog, macos_adapter):
"""Test we retry if the adapter is not yet available."""
mock_bt = []
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.start",
side_effect=BleakError,
), patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
entry = hass.config_entries.async_entries(bluetooth.DOMAIN)[0]
assert "Failed to start Bluetooth" in caplog.text
assert len(bluetooth.async_discovered_service_info(hass)) == 0
assert entry.state == ConfigEntryState.SETUP_RETRY
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.start",
):
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=10))
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.stop",
):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
async def test_no_race_during_manual_reload_in_retry_state(hass, caplog, macos_adapter):
"""Test we can successfully reload when the entry is in a retry state."""
mock_bt = []
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.start",
side_effect=BleakError,
), patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
entry = hass.config_entries.async_entries(bluetooth.DOMAIN)[0]
assert "Failed to start Bluetooth" in caplog.text
assert len(bluetooth.async_discovered_service_info(hass)) == 0
assert entry.state == ConfigEntryState.SETUP_RETRY
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.start",
):
await hass.config_entries.async_reload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.LOADED
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner.stop",
):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
async def test_calling_async_discovered_devices_no_bluetooth(
hass, caplog, macos_adapter
):
"""Test we fail gracefully when asking for discovered devices and there is no blueooth."""
mock_bt = []
with patch(
"homeassistant.components.bluetooth.scanner.OriginalBleakScanner",
side_effect=FileNotFoundError,
), patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert "Failed to initialize Bluetooth" in caplog.text
assert not bluetooth.async_discovered_service_info(hass)
assert not bluetooth.async_address_present(hass, "aa:bb:bb:dd:ee:ff")
async def test_discovery_match_by_service_uuid(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test bluetooth discovery match by service_uuid."""
mock_bt = [
{"domain": "switchbot", "service_uuid": "cba20d00-224d-11e6-9fb8-0002a5d5c51b"}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
wrong_device = BLEDevice("44:44:33:11:23:45", "wrong_name")
wrong_adv = AdvertisementData(local_name="wrong_name", service_uuids=[])
inject_advertisement(hass, wrong_device, wrong_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand", service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "switchbot"
def _domains_from_mock_config_flow(mock_config_flow: Mock) -> list[str]:
"""Get all the domains that were passed to async_init except bluetooth."""
return [call[1][0] for call in mock_config_flow.mock_calls if call[1][0] != DOMAIN]
async def test_discovery_match_by_service_uuid_connectable(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by service_uuid and the ble device is connectable."""
mock_bt = [
{
"domain": "switchbot",
"connectable": True,
"service_uuid": "cba20d00-224d-11e6-9fb8-0002a5d5c51b",
}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
wrong_device = BLEDevice("44:44:33:11:23:45", "wrong_name")
wrong_adv = AdvertisementData(local_name="wrong_name", service_uuids=[])
inject_advertisement_with_time_and_source_connectable(
hass, wrong_device, wrong_adv, time.monotonic(), "any", True
)
await hass.async_block_till_done()
assert len(_domains_from_mock_config_flow(mock_config_flow)) == 0
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand", service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
inject_advertisement_with_time_and_source_connectable(
hass, switchbot_device, switchbot_adv, time.monotonic(), "any", True
)
await hass.async_block_till_done()
called_domains = _domains_from_mock_config_flow(mock_config_flow)
assert len(called_domains) == 1
assert called_domains == ["switchbot"]
async def test_discovery_match_by_service_uuid_not_connectable(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by service_uuid and the ble device is not connectable."""
mock_bt = [
{
"domain": "switchbot",
"connectable": True,
"service_uuid": "cba20d00-224d-11e6-9fb8-0002a5d5c51b",
}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
wrong_device = BLEDevice("44:44:33:11:23:45", "wrong_name")
wrong_adv = AdvertisementData(local_name="wrong_name", service_uuids=[])
inject_advertisement_with_time_and_source_connectable(
hass, wrong_device, wrong_adv, time.monotonic(), "any", False
)
await hass.async_block_till_done()
assert len(_domains_from_mock_config_flow(mock_config_flow)) == 0
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand", service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
inject_advertisement_with_time_and_source_connectable(
hass, switchbot_device, switchbot_adv, time.monotonic(), "any", False
)
await hass.async_block_till_done()
assert len(_domains_from_mock_config_flow(mock_config_flow)) == 0
async def test_discovery_match_by_name_connectable_false(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by name and the integration will take non-connectable devices."""
mock_bt = [
{
"domain": "qingping",
"connectable": False,
"local_name": "Qingping*",
}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
wrong_device = BLEDevice("44:44:33:11:23:45", "wrong_name")
wrong_adv = AdvertisementData(local_name="wrong_name", service_uuids=[])
inject_advertisement_with_time_and_source_connectable(
hass, wrong_device, wrong_adv, time.monotonic(), "any", False
)
await hass.async_block_till_done()
assert len(_domains_from_mock_config_flow(mock_config_flow)) == 0
qingping_device = BLEDevice("44:44:33:11:23:45", "Qingping Motion & Light")
qingping_adv = AdvertisementData(
local_name="Qingping Motion & Light",
service_data={
"0000fdcd-0000-1000-8000-00805f9b34fb": b"H\x12\xcd\xd5`4-X\x08\x04\x01\xe8\x00\x00\x0f\x01{"
},
)
inject_advertisement_with_time_and_source_connectable(
hass, qingping_device, qingping_adv, time.monotonic(), "any", False
)
await hass.async_block_till_done()
assert _domains_from_mock_config_flow(mock_config_flow) == ["qingping"]
mock_config_flow.reset_mock()
# Make sure it will also take a connectable device
inject_advertisement_with_time_and_source_connectable(
hass, qingping_device, qingping_adv, time.monotonic(), "any", True
)
await hass.async_block_till_done()
assert _domains_from_mock_config_flow(mock_config_flow) == ["qingping"]
async def test_discovery_match_by_local_name(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by local_name."""
mock_bt = [{"domain": "switchbot", "local_name": "wohand"}]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
wrong_device = BLEDevice("44:44:33:11:23:45", "wrong_name")
wrong_adv = AdvertisementData(local_name="wrong_name", service_uuids=[])
inject_advertisement(hass, wrong_device, wrong_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(local_name="wohand", service_uuids=[])
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "switchbot"
async def test_discovery_match_by_manufacturer_id_and_manufacturer_data_start(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by manufacturer_id and manufacturer_data_start."""
mock_bt = [
{
"domain": "homekit_controller",
"manufacturer_id": 76,
"manufacturer_data_start": [0x06, 0x02, 0x03],
}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
hkc_device = BLEDevice("44:44:33:11:23:45", "lock")
hkc_adv_no_mfr_data = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={},
)
hkc_adv = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={76: b"\x06\x02\x03\x99"},
)
# 1st discovery with no manufacturer data
# should not trigger config flow
inject_advertisement(hass, hkc_device, hkc_adv_no_mfr_data)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 2nd discovery with manufacturer data
# should trigger a config flow
inject_advertisement(hass, hkc_device, hkc_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "homekit_controller"
mock_config_flow.reset_mock()
# 3rd discovery should not generate another flow
inject_advertisement(hass, hkc_device, hkc_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
not_hkc_device = BLEDevice("44:44:33:11:23:21", "lock")
not_hkc_adv = AdvertisementData(
local_name="lock", service_uuids=[], manufacturer_data={76: b"\x02"}
)
inject_advertisement(hass, not_hkc_device, not_hkc_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
not_apple_device = BLEDevice("44:44:33:11:23:23", "lock")
not_apple_adv = AdvertisementData(
local_name="lock", service_uuids=[], manufacturer_data={21: b"\x02"}
)
inject_advertisement(hass, not_apple_device, not_apple_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
async def test_discovery_match_by_service_data_uuid_then_others(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by service_data_uuid and then other fields."""
mock_bt = [
{
"domain": "my_domain",
"service_data_uuid": "0000fd3d-0000-1000-8000-00805f9b34fb",
},
{
"domain": "my_domain",
"service_uuid": "0000fd3d-0000-1000-8000-00805f9b34fc",
},
{
"domain": "other_domain",
"manufacturer_id": 323,
},
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
device = BLEDevice("44:44:33:11:23:45", "lock")
adv_without_service_data_uuid = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={},
)
adv_with_mfr_data = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={323: b"\x01\x02\x03"},
service_data={},
)
adv_with_service_data_uuid = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={},
service_data={"0000fd3d-0000-1000-8000-00805f9b34fb": b"\x01\x02\x03"},
)
adv_with_service_data_uuid_and_mfr_data = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={323: b"\x01\x02\x03"},
service_data={"0000fd3d-0000-1000-8000-00805f9b34fb": b"\x01\x02\x03"},
)
adv_with_service_data_uuid_and_mfr_data_and_service_uuid = AdvertisementData(
local_name="lock",
manufacturer_data={323: b"\x01\x02\x03"},
service_data={"0000fd3d-0000-1000-8000-00805f9b34fb": b"\x01\x02\x03"},
service_uuids=["0000fd3d-0000-1000-8000-00805f9b34fd"],
)
adv_with_service_uuid = AdvertisementData(
local_name="lock",
manufacturer_data={},
service_data={},
service_uuids=["0000fd3d-0000-1000-8000-00805f9b34fd"],
)
# 1st discovery should not generate a flow because the
# service_data_uuid is not in the advertisement
inject_advertisement(hass, device, adv_without_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 2nd discovery should not generate a flow because the
# service_data_uuid is not in the advertisement
inject_advertisement(hass, device, adv_without_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 3rd discovery should generate a flow because the
# manufacturer_data is in the advertisement
inject_advertisement(hass, device, adv_with_mfr_data)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "other_domain"
mock_config_flow.reset_mock()
# 4th discovery should generate a flow because the
# service_data_uuid is in the advertisement and
# we never saw a service_data_uuid before
inject_advertisement(hass, device, adv_with_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "my_domain"
mock_config_flow.reset_mock()
# 5th discovery should not generate a flow because the
# we already saw an advertisement with the service_data_uuid
inject_advertisement(hass, device, adv_with_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
# 6th discovery should not generate a flow because the
# manufacturer_data is in the advertisement
# and we saw manufacturer_data before
inject_advertisement(hass, device, adv_with_service_data_uuid_and_mfr_data)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 7th discovery should generate a flow because the
# service_uuids is in the advertisement
# and we never saw service_uuids before
inject_advertisement(
hass, device, adv_with_service_data_uuid_and_mfr_data_and_service_uuid
)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 2
assert {
mock_config_flow.mock_calls[0][1][0],
mock_config_flow.mock_calls[1][1][0],
} == {"my_domain", "other_domain"}
mock_config_flow.reset_mock()
# 8th discovery should not generate a flow
# since all fields have been seen at this point
inject_advertisement(
hass, device, adv_with_service_data_uuid_and_mfr_data_and_service_uuid
)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 9th discovery should not generate a flow
# since all fields have been seen at this point
inject_advertisement(hass, device, adv_with_service_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
# 10th discovery should not generate a flow
# since all fields have been seen at this point
inject_advertisement(hass, device, adv_with_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
# 11th discovery should not generate a flow
# since all fields have been seen at this point
inject_advertisement(hass, device, adv_without_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
async def test_discovery_match_by_service_data_uuid_when_format_changes(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery match by service_data_uuid when format changes."""
mock_bt = [
{
"domain": "xiaomi_ble",
"service_data_uuid": "0000fe95-0000-1000-8000-00805f9b34fb",
},
{
"domain": "qingping",
"service_data_uuid": "0000fdcd-0000-1000-8000-00805f9b34fb",
},
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
device = BLEDevice("44:44:33:11:23:45", "lock")
adv_without_service_data_uuid = AdvertisementData(
local_name="Qingping Temp RH M",
service_uuids=[],
manufacturer_data={},
)
xiaomi_format_adv = AdvertisementData(
local_name="Qingping Temp RH M",
service_data={
"0000fe95-0000-1000-8000-00805f9b34fb": b"0XH\x0b\x06\xa7%\x144-X\x08"
},
)
qingping_format_adv = AdvertisementData(
local_name="Qingping Temp RH M",
service_data={
"0000fdcd-0000-1000-8000-00805f9b34fb": b"\x08\x16\xa7%\x144-X\x01\x04\xdb\x00\xa6\x01\x02\x01d"
},
)
# 1st discovery should not generate a flow because the
# service_data_uuid is not in the advertisement
inject_advertisement(hass, device, adv_without_service_data_uuid)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 2nd discovery should generate a flow because the
# service_data_uuid matches xiaomi format
inject_advertisement(hass, device, xiaomi_format_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "xiaomi_ble"
mock_config_flow.reset_mock()
# 4th discovery should generate a flow because the
# service_data_uuid matches qingping format
inject_advertisement(hass, device, qingping_format_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "qingping"
mock_config_flow.reset_mock()
# 5th discovery should not generate a flow because the
# we already saw an advertisement with the service_data_uuid
inject_advertisement(hass, device, qingping_format_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
# 6th discovery should not generate a flow because the
# we already saw an advertisement with the service_data_uuid
inject_advertisement(hass, device, xiaomi_format_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
mock_config_flow.reset_mock()
async def test_discovery_match_first_by_service_uuid_and_then_manufacturer_id(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test bluetooth discovery matches twice for service_uuid and then manufacturer_id."""
mock_bt = [
{
"domain": "my_domain",
"manufacturer_id": 76,
},
{
"domain": "my_domain",
"service_uuid": "0000fd3d-0000-1000-8000-00805f9b34fc",
},
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
device = BLEDevice("44:44:33:11:23:45", "lock")
adv_service_uuids = AdvertisementData(
local_name="lock",
service_uuids=["0000fd3d-0000-1000-8000-00805f9b34fc"],
manufacturer_data={},
)
adv_manufacturer_data = AdvertisementData(
local_name="lock",
service_uuids=[],
manufacturer_data={76: b"\x06\x02\x03\x99"},
)
# 1st discovery with matches service_uuid
# should trigger config flow
inject_advertisement(hass, device, adv_service_uuids)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "my_domain"
mock_config_flow.reset_mock()
# 2nd discovery with manufacturer data
# should trigger a config flow
inject_advertisement(hass, device, adv_manufacturer_data)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "my_domain"
mock_config_flow.reset_mock()
# 3rd discovery should not generate another flow
inject_advertisement(hass, device, adv_service_uuids)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
# 4th discovery should not generate another flow
inject_advertisement(hass, device, adv_manufacturer_data)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 0
async def test_rediscovery(hass, mock_bleak_scanner_start, enable_bluetooth):
"""Test bluetooth discovery can be re-enabled for a given domain."""
mock_bt = [
{"domain": "switchbot", "service_uuid": "cba20d00-224d-11e6-9fb8-0002a5d5c51b"}
]
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init") as mock_config_flow:
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand", service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 1
assert mock_config_flow.mock_calls[0][1][0] == "switchbot"
async_rediscover_address(hass, "44:44:33:11:23:45")
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(mock_config_flow.mock_calls) == 2
assert mock_config_flow.mock_calls[1][1][0] == "switchbot"
async def test_async_discovered_device_api(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test the async_discovered_device API."""
mock_bt = []
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch(
"bleak.BleakScanner.discovered_devices", # Must patch before we setup
[MagicMock(address="44:44:33:11:23:45")],
):
assert not bluetooth.async_discovered_service_info(hass)
assert not bluetooth.async_address_present(hass, "44:44:22:22:11:22")
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
assert not bluetooth.async_discovered_service_info(hass)
wrong_device = BLEDevice("44:44:33:11:23:42", "wrong_name")
wrong_adv = AdvertisementData(local_name="wrong_name", service_uuids=[])
inject_advertisement(hass, wrong_device, wrong_adv)
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(local_name="wohand", service_uuids=[])
inject_advertisement(hass, switchbot_device, switchbot_adv)
wrong_device_went_unavailable = False
switchbot_device_went_unavailable = False
@callback
def _wrong_device_unavailable_callback(_address: str) -> None:
"""Wrong device unavailable callback."""
nonlocal wrong_device_went_unavailable
wrong_device_went_unavailable = True
raise ValueError("blow up")
@callback
def _switchbot_device_unavailable_callback(_address: str) -> None:
"""Switchbot device unavailable callback."""
nonlocal switchbot_device_went_unavailable
switchbot_device_went_unavailable = True
wrong_device_unavailable_cancel = async_track_unavailable(
hass, _wrong_device_unavailable_callback, wrong_device.address
)
switchbot_device_unavailable_cancel = async_track_unavailable(
hass, _switchbot_device_unavailable_callback, switchbot_device.address
)
async_fire_time_changed(
hass, dt_util.utcnow() + timedelta(seconds=UNAVAILABLE_TRACK_SECONDS)
)
await hass.async_block_till_done()
service_infos = bluetooth.async_discovered_service_info(hass)
assert switchbot_device_went_unavailable is False
assert wrong_device_went_unavailable is True
# See the devices again
inject_advertisement(hass, wrong_device, wrong_adv)
inject_advertisement(hass, switchbot_device, switchbot_adv)
# Cancel the callbacks
wrong_device_unavailable_cancel()
switchbot_device_unavailable_cancel()
wrong_device_went_unavailable = False
switchbot_device_went_unavailable = False
# Verify the cancel is effective
async_fire_time_changed(
hass, dt_util.utcnow() + timedelta(seconds=UNAVAILABLE_TRACK_SECONDS)
)
await hass.async_block_till_done()
assert switchbot_device_went_unavailable is False
assert wrong_device_went_unavailable is False
assert len(service_infos) == 1
# wrong_name should not appear because bleak no longer sees it
infos = list(service_infos)
assert infos[0].name == "wohand"
assert infos[0].source == SOURCE_LOCAL
assert isinstance(infos[0].device, BLEDevice)
assert isinstance(infos[0].advertisement, AdvertisementData)
assert bluetooth.async_address_present(hass, "44:44:33:11:23:42") is False
assert bluetooth.async_address_present(hass, "44:44:33:11:23:45") is True
async def test_register_callbacks(hass, mock_bleak_scanner_start, enable_bluetooth):
"""Test registering a callback."""
mock_bt = []
callbacks = []
def _fake_subscriber(
service_info: BluetoothServiceInfo,
change: BluetoothChange,
) -> None:
"""Fake subscriber for the BleakScanner."""
callbacks.append((service_info, change))
if len(callbacks) >= 3:
raise ValueError
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch.object(hass.config_entries.flow, "async_init"):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
cancel = bluetooth.async_register_callback(
hass,
_fake_subscriber,
{"service_uuids": {"cba20d00-224d-11e6-9fb8-0002a5d5c51b"}},
BluetoothScanningMode.ACTIVE,
)
assert len(mock_bleak_scanner_start.mock_calls) == 1
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
inject_advertisement(hass, switchbot_device, switchbot_adv)
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
inject_advertisement(hass, empty_device, empty_adv)
await hass.async_block_till_done()
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
# 3rd callback raises ValueError but is still tracked
inject_advertisement(hass, empty_device, empty_adv)
await hass.async_block_till_done()
cancel()
# 4th callback should not be tracked since we canceled
inject_advertisement(hass, empty_device, empty_adv)
await hass.async_block_till_done()
assert len(callbacks) == 3
service_info: BluetoothServiceInfo = callbacks[0][0]
assert service_info.name == "wohand"
assert service_info.source == SOURCE_LOCAL
assert service_info.manufacturer == "Nordic Semiconductor ASA"
assert service_info.manufacturer_id == 89
service_info: BluetoothServiceInfo = callbacks[1][0]
assert service_info.name == "empty"
assert service_info.source == SOURCE_LOCAL
assert service_info.manufacturer is None
assert service_info.manufacturer_id is None
service_info: BluetoothServiceInfo = callbacks[2][0]
assert service_info.name == "empty"
assert service_info.source == SOURCE_LOCAL
assert service_info.manufacturer is None
assert service_info.manufacturer_id is None
async def test_register_callback_by_address(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test registering a callback by address."""
mock_bt = []
callbacks = []
def _fake_subscriber(
service_info: BluetoothServiceInfo, change: BluetoothChange
) -> None:
"""Fake subscriber for the BleakScanner."""
callbacks.append((service_info, change))
if len(callbacks) >= 3:
raise ValueError
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
cancel = bluetooth.async_register_callback(
hass,
_fake_subscriber,
{"address": "44:44:33:11:23:45"},
BluetoothScanningMode.ACTIVE,
)
assert len(mock_bleak_scanner_start.mock_calls) == 1
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
inject_advertisement(hass, switchbot_device, switchbot_adv)
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
inject_advertisement(hass, empty_device, empty_adv)
await hass.async_block_till_done()
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
# 3rd callback raises ValueError but is still tracked
inject_advertisement(hass, empty_device, empty_adv)
await hass.async_block_till_done()
cancel()
# 4th callback should not be tracked since we canceled
inject_advertisement(hass, empty_device, empty_adv)
await hass.async_block_till_done()
# Now register again with a callback that fails to
# make sure we do not perm fail
cancel = bluetooth.async_register_callback(
hass,
_fake_subscriber,
{"address": "44:44:33:11:23:45"},
BluetoothScanningMode.ACTIVE,
)
cancel()
# Now register again, since the 3rd callback
# should fail but we should still record it
cancel = bluetooth.async_register_callback(
hass,
_fake_subscriber,
{"address": "44:44:33:11:23:45"},
BluetoothScanningMode.ACTIVE,
)
cancel()
assert len(callbacks) == 3
for idx in range(3):
service_info: BluetoothServiceInfo = callbacks[idx][0]
assert service_info.name == "wohand"
assert service_info.manufacturer == "Nordic Semiconductor ASA"
assert service_info.manufacturer_id == 89
async def test_register_callback_survives_reload(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test registering a callback by address survives bluetooth being reloaded."""
mock_bt = []
callbacks = []
def _fake_subscriber(
service_info: BluetoothServiceInfo, change: BluetoothChange
) -> None:
"""Fake subscriber for the BleakScanner."""
callbacks.append((service_info, change))
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
):
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
bluetooth.async_register_callback(
hass,
_fake_subscriber,
{"address": "44:44:33:11:23:45"},
BluetoothScanningMode.ACTIVE,
)
assert len(mock_bleak_scanner_start.mock_calls) == 1
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["zba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
inject_advertisement(hass, switchbot_device, switchbot_adv)
assert len(callbacks) == 1
service_info: BluetoothServiceInfo = callbacks[0][0]
assert service_info.name == "wohand"
assert service_info.manufacturer == "Nordic Semiconductor ASA"
assert service_info.manufacturer_id == 89
entry = hass.config_entries.async_entries(bluetooth.DOMAIN)[0]
await hass.config_entries.async_reload(entry.entry_id)
await hass.async_block_till_done()
inject_advertisement(hass, switchbot_device, switchbot_adv)
assert len(callbacks) == 2
service_info: BluetoothServiceInfo = callbacks[1][0]
assert service_info.name == "wohand"
assert service_info.manufacturer == "Nordic Semiconductor ASA"
assert service_info.manufacturer_id == 89
async def test_process_advertisements_bail_on_good_advertisement(
hass: HomeAssistant, mock_bleak_scanner_start, enable_bluetooth
):
"""Test as soon as we see a 'good' advertisement we return it."""
done = asyncio.Future()
def _callback(service_info: BluetoothServiceInfo) -> bool:
done.set_result(None)
return len(service_info.service_data) > 0
handle = hass.async_create_task(
async_process_advertisements(
hass,
_callback,
{"address": "aa:44:33:11:23:45"},
BluetoothScanningMode.ACTIVE,
5,
)
)
while not done.done():
device = BLEDevice("aa:44:33:11:23:45", "wohand")
adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51a"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fa": b"H\x10c"},
)
inject_advertisement(hass, device, adv)
inject_advertisement(hass, device, adv)
inject_advertisement(hass, device, adv)
await asyncio.sleep(0)
result = await handle
assert result.name == "wohand"
async def test_process_advertisements_ignore_bad_advertisement(
hass: HomeAssistant, mock_bleak_scanner_start, enable_bluetooth
):
"""Check that we ignore bad advertisements."""
done = asyncio.Event()
return_value = asyncio.Event()
device = BLEDevice("aa:44:33:11:23:45", "wohand")
adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51a"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fa": b""},
)
def _callback(service_info: BluetoothServiceInfo) -> bool:
done.set()
return return_value.is_set()
handle = hass.async_create_task(
async_process_advertisements(
hass,
_callback,
{"address": "aa:44:33:11:23:45"},
BluetoothScanningMode.ACTIVE,
5,
)
)
# The goal of this loop is to make sure that async_process_advertisements sees at least one
# callback that returns False
while not done.is_set():
inject_advertisement(hass, device, adv)
await asyncio.sleep(0)
# Set the return value and mutate the advertisement
# Check that scan ends and correct advertisement data is returned
return_value.set()
adv.service_data["00000d00-0000-1000-8000-00805f9b34fa"] = b"H\x10c"
inject_advertisement(hass, device, adv)
await asyncio.sleep(0)
result = await handle
assert result.service_data["00000d00-0000-1000-8000-00805f9b34fa"] == b"H\x10c"
async def test_process_advertisements_timeout(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test we timeout if no advertisements at all."""
def _callback(service_info: BluetoothServiceInfo) -> bool:
return False
with pytest.raises(asyncio.TimeoutError):
await async_process_advertisements(
hass, _callback, {}, BluetoothScanningMode.ACTIVE, 0
)
async def test_wrapped_instance_with_filter(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test consumers can use the wrapped instance with a filter as if it was normal BleakScanner."""
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=[]
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
detected = []
def _device_detected(
device: BLEDevice, advertisement_data: AdvertisementData
) -> None:
"""Handle a detected device."""
detected.append((device, advertisement_data))
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
assert _get_manager() is not None
scanner = models.HaBleakScannerWrapper(
filters={"UUIDs": ["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]}
)
scanner.register_detection_callback(_device_detected)
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
discovered = await scanner.discover(timeout=0)
assert len(discovered) == 1
assert discovered == [switchbot_device]
assert len(detected) == 1
scanner.register_detection_callback(_device_detected)
# We should get a reply from the history when we register again
assert len(detected) == 2
scanner.register_detection_callback(_device_detected)
# We should get a reply from the history when we register again
assert len(detected) == 3
with patch_discovered_devices([]):
discovered = await scanner.discover(timeout=0)
assert len(discovered) == 0
assert discovered == []
inject_advertisement(hass, switchbot_device, switchbot_adv)
assert len(detected) == 4
# The filter we created in the wrapped scanner with should be respected
# and we should not get another callback
inject_advertisement(hass, empty_device, empty_adv)
assert len(detected) == 4
async def test_wrapped_instance_with_service_uuids(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test consumers can use the wrapped instance with a service_uuids list as if it was normal BleakScanner."""
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=[]
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
detected = []
def _device_detected(
device: BLEDevice, advertisement_data: AdvertisementData
) -> None:
"""Handle a detected device."""
detected.append((device, advertisement_data))
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
assert _get_manager() is not None
scanner = models.HaBleakScannerWrapper(
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
scanner.register_detection_callback(_device_detected)
for _ in range(2):
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(detected) == 2
# The UUIDs list we created in the wrapped scanner with should be respected
# and we should not get another callback
inject_advertisement(hass, empty_device, empty_adv)
assert len(detected) == 2
async def test_wrapped_instance_with_broken_callbacks(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test broken callbacks do not cause the scanner to fail."""
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=[]
), patch.object(hass.config_entries.flow, "async_init"):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
detected = []
def _device_detected(
device: BLEDevice, advertisement_data: AdvertisementData
) -> None:
"""Handle a detected device."""
if detected:
raise ValueError
detected.append((device, advertisement_data))
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
assert _get_manager() is not None
scanner = models.HaBleakScannerWrapper(
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
scanner.register_detection_callback(_device_detected)
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(detected) == 1
async def test_wrapped_instance_changes_uuids(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test consumers can use the wrapped instance can change the uuids later."""
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=[]
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
detected = []
def _device_detected(
device: BLEDevice, advertisement_data: AdvertisementData
) -> None:
"""Handle a detected device."""
detected.append((device, advertisement_data))
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
empty_device = BLEDevice("11:22:33:44:55:66", "empty")
empty_adv = AdvertisementData(local_name="empty")
assert _get_manager() is not None
scanner = models.HaBleakScannerWrapper()
scanner.set_scanning_filter(
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]
)
scanner.register_detection_callback(_device_detected)
for _ in range(2):
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(detected) == 2
# The UUIDs list we created in the wrapped scanner with should be respected
# and we should not get another callback
inject_advertisement(hass, empty_device, empty_adv)
assert len(detected) == 2
async def test_wrapped_instance_changes_filters(
hass, mock_bleak_scanner_start, enable_bluetooth
):
"""Test consumers can use the wrapped instance can change the filter later."""
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=[]
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
detected = []
def _device_detected(
device: BLEDevice, advertisement_data: AdvertisementData
) -> None:
"""Handle a detected device."""
detected.append((device, advertisement_data))
switchbot_device = BLEDevice("44:44:33:11:23:42", "wohand")
switchbot_adv = AdvertisementData(
local_name="wohand",
service_uuids=["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
manufacturer_data={89: b"\xd8.\xad\xcd\r\x85"},
service_data={"00000d00-0000-1000-8000-00805f9b34fb": b"H\x10c"},
)
empty_device = BLEDevice("11:22:33:44:55:62", "empty")
empty_adv = AdvertisementData(local_name="empty")
assert _get_manager() is not None
scanner = models.HaBleakScannerWrapper()
scanner.set_scanning_filter(
filters={"UUIDs": ["cba20d00-224d-11e6-9fb8-0002a5d5c51b"]}
)
scanner.register_detection_callback(_device_detected)
for _ in range(2):
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert len(detected) == 2
# The UUIDs list we created in the wrapped scanner with should be respected
# and we should not get another callback
inject_advertisement(hass, empty_device, empty_adv)
assert len(detected) == 2
async def test_wrapped_instance_unsupported_filter(
hass, mock_bleak_scanner_start, caplog, enable_bluetooth
):
"""Test we want when their filter is ineffective."""
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=[]
):
await async_setup_with_default_adapter(hass)
with patch.object(hass.config_entries.flow, "async_init"):
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert _get_manager() is not None
scanner = models.HaBleakScannerWrapper()
scanner.set_scanning_filter(
filters={
"unsupported": ["cba20d00-224d-11e6-9fb8-0002a5d5c51b"],
"DuplicateData": True,
}
)
assert "Only UUIDs filters are supported" in caplog.text
async def test_async_ble_device_from_address(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test the async_ble_device_from_address api."""
mock_bt = []
with patch(
"homeassistant.components.bluetooth.async_get_bluetooth", return_value=mock_bt
), patch(
"bleak.BleakScanner.discovered_devices", # Must patch before we setup
[MagicMock(address="44:44:33:11:23:45")],
):
assert not bluetooth.async_discovered_service_info(hass)
assert not bluetooth.async_address_present(hass, "44:44:22:22:11:22")
assert (
bluetooth.async_ble_device_from_address(hass, "44:44:33:11:23:45") is None
)
await async_setup_with_default_adapter(hass)
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_bleak_scanner_start.mock_calls) == 1
assert not bluetooth.async_discovered_service_info(hass)
switchbot_device = BLEDevice("44:44:33:11:23:45", "wohand")
switchbot_adv = AdvertisementData(local_name="wohand", service_uuids=[])
inject_advertisement(hass, switchbot_device, switchbot_adv)
await hass.async_block_till_done()
assert (
bluetooth.async_ble_device_from_address(hass, "44:44:33:11:23:45")
is switchbot_device
)
assert (
bluetooth.async_ble_device_from_address(hass, "00:66:33:22:11:22") is None
)
async def test_can_unsetup_bluetooth_single_adapter_macos(
hass, mock_bleak_scanner_start, enable_bluetooth, macos_adapter
):
"""Test we can setup and unsetup bluetooth."""
entry = MockConfigEntry(domain=bluetooth.DOMAIN, data={}, unique_id=DEFAULT_ADDRESS)
entry.add_to_hass(hass)
for _ in range(2):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
async def test_can_unsetup_bluetooth_single_adapter_linux(
hass, mock_bleak_scanner_start, enable_bluetooth, one_adapter
):
"""Test we can setup and unsetup bluetooth."""
entry = MockConfigEntry(
domain=bluetooth.DOMAIN, data={}, unique_id="00:00:00:00:00:01"
)
entry.add_to_hass(hass)
for _ in range(2):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
async def test_can_unsetup_bluetooth_multiple_adapters(
hass, mock_bleak_scanner_start, enable_bluetooth, two_adapters
):
"""Test we can setup and unsetup bluetooth with multiple adapters."""
entry1 = MockConfigEntry(
domain=bluetooth.DOMAIN, data={}, unique_id="00:00:00:00:00:01"
)
entry1.add_to_hass(hass)
entry2 = MockConfigEntry(
domain=bluetooth.DOMAIN, data={}, unique_id="00:00:00:00:00:02"
)
entry2.add_to_hass(hass)
for _ in range(2):
for entry in (entry1, entry2):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
async def test_three_adapters_one_missing(
hass, mock_bleak_scanner_start, enable_bluetooth, two_adapters
):
"""Test three adapters but one is missing results in a retry on setup."""
entry = MockConfigEntry(
domain=bluetooth.DOMAIN, data={}, unique_id="00:00:00:00:00:03"
)
entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ConfigEntryState.SETUP_RETRY
async def test_auto_detect_bluetooth_adapters_linux(hass, one_adapter):
"""Test we auto detect bluetooth adapters on linux."""
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(bluetooth.DOMAIN)
assert len(hass.config_entries.flow.async_progress(bluetooth.DOMAIN)) == 1
async def test_auto_detect_bluetooth_adapters_linux_multiple(hass, two_adapters):
"""Test we auto detect bluetooth adapters on linux with multiple adapters."""
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(bluetooth.DOMAIN)
assert len(hass.config_entries.flow.async_progress(bluetooth.DOMAIN)) == 2
async def test_auto_detect_bluetooth_adapters_linux_none_found(hass):
"""Test we auto detect bluetooth adapters on linux with no adapters found."""
with patch(
"bluetooth_adapters.get_bluetooth_adapter_details", return_value={}
), patch(
"homeassistant.components.bluetooth.util.platform.system", return_value="Linux"
):
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(bluetooth.DOMAIN)
assert len(hass.config_entries.flow.async_progress(bluetooth.DOMAIN)) == 0
async def test_auto_detect_bluetooth_adapters_macos(hass):
"""Test we auto detect bluetooth adapters on macos."""
with patch(
"homeassistant.components.bluetooth.util.platform.system", return_value="Darwin"
):
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(bluetooth.DOMAIN)
assert len(hass.config_entries.flow.async_progress(bluetooth.DOMAIN)) == 1
async def test_no_auto_detect_bluetooth_adapters_windows(hass):
"""Test we auto detect bluetooth adapters on windows."""
with patch(
"homeassistant.components.bluetooth.util.platform.system",
return_value="Windows",
):
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert not hass.config_entries.async_entries(bluetooth.DOMAIN)
assert len(hass.config_entries.flow.async_progress(bluetooth.DOMAIN)) == 0
async def test_getting_the_scanner_returns_the_wrapped_instance(hass, enable_bluetooth):
"""Test getting the scanner returns the wrapped instance."""
scanner = bluetooth.async_get_scanner(hass)
assert isinstance(scanner, models.HaBleakScannerWrapper)
async def test_migrate_single_entry_macos(
hass, mock_bleak_scanner_start, macos_adapter
):
"""Test we can migrate a single entry on MacOS."""
entry = MockConfigEntry(domain=bluetooth.DOMAIN, data={})
entry.add_to_hass(hass)
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert entry.unique_id == DEFAULT_ADDRESS
async def test_migrate_single_entry_linux(hass, mock_bleak_scanner_start, one_adapter):
"""Test we can migrate a single entry on Linux."""
entry = MockConfigEntry(domain=bluetooth.DOMAIN, data={})
entry.add_to_hass(hass)
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert entry.unique_id == "00:00:00:00:00:01"
async def test_discover_new_usb_adapters(hass, mock_bleak_scanner_start, one_adapter):
"""Test we can discover new usb adapters."""
entry = MockConfigEntry(
domain=bluetooth.DOMAIN, data={}, unique_id="00:00:00:00:00:01"
)
entry.add_to_hass(hass)
saved_callback = None
def _async_register_scan_request_callback(_hass, _callback):
nonlocal saved_callback
saved_callback = _callback
return lambda: None
with patch(
"homeassistant.components.bluetooth.usb.async_register_scan_request_callback",
_async_register_scan_request_callback,
):
assert await async_setup_component(hass, bluetooth.DOMAIN, {})
await hass.async_block_till_done()
assert not hass.config_entries.flow.async_progress(DOMAIN)
saved_callback()
assert not hass.config_entries.flow.async_progress(DOMAIN)
with patch(
"homeassistant.components.bluetooth.util.platform.system", return_value="Linux"
), patch(
"bluetooth_adapters.get_bluetooth_adapter_details",
return_value={
"hci0": {
"org.bluez.Adapter1": {
"Address": "00:00:00:00:00:01",
"Name": "BlueZ 4.63",
"Modalias": "usbid:1234",
}
},
"hci1": {
"org.bluez.Adapter1": {
"Address": "00:00:00:00:00:02",
"Name": "BlueZ 4.63",
"Modalias": "usbid:1234",
}
},
},
):
for wait_sec in range(10, 20):
async_fire_time_changed(
hass, dt_util.utcnow() + timedelta(seconds=wait_sec)
)
await hass.async_block_till_done()
assert len(hass.config_entries.flow.async_progress(DOMAIN)) == 1
| [
"noreply@github.com"
] | piotr-kubiak.noreply@github.com |
287eb5948fdfa0b92d31d92331777526e4b0d8c2 | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_5492.py | 8908eaf023624b909fb44cceba923652e2fb1cb3 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # 5492
# ^((\D*[a-z]\D*[A-Z]\D*)|(\D*[A-Z]\D*[a-z]\D*)|(\D*\W\D*[a-z])|(\D*\W\D*[A-Z])|(\D*[a-z]\D*\W)|(\D*[A-Z]\D*\W))$
# EXPONENT
# nums:5
# EXPONENT AttackString:""+"aA"*512+"@1 _SLQ_2"
import re2 as re
from time import perf_counter
regex = """^((\D*[a-z]\D*[A-Z]\D*)|(\D*[A-Z]\D*[a-z]\D*)|(\D*\W\D*[a-z])|(\D*\W\D*[A-Z])|(\D*[a-z]\D*\W)|(\D*[A-Z]\D*\W))$"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "aA" * i * 1 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *1}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
d4e4c2c0bc5b59146ff0bc3021c814b5a8821c8a | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_undulated.py | 64b34a6850b8ecaf7e0aabe42b76a28fce49e7b8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py |
from xai.brain.wordbase.verbs._undulate import _UNDULATE
#calss header
class _UNDULATED(_UNDULATE, ):
def __init__(self,):
_UNDULATE.__init__(self)
self.name = "UNDULATED"
self.specie = 'verbs'
self.basic = "undulate"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
4c8ea9cb63d8f0c08dd8f3b0cec6698d4ed9ea3d | 0cc2a3a4b5948d8a30a4ab6e6a81209b28fa4dc2 | /Introduction.py | b75a74b8451203f59734e9fa4a6de2b3aa61bb28 | [] | no_license | zoshs2/Statiscal_Learning_Beginner | ece80effaae28875ed023803f2c738baf21fb6af | dc48640b00b04c1357ea205340f81b3e6bdbff5b | refs/heads/main | 2023-01-13T09:12:29.204403 | 2020-10-28T06:46:30 | 2020-10-28T06:46:30 | 306,056,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | ### Introduction
# Basic statistic for beginners
# https://www.kaggle.com/kanncaa1/statistical-learning-tutorial-for-beginners
## Chapter 1.py
# 1. Histogram
# 2. Outliers
# 3. Box Plot
# 4. Summary Statistics
## Chapter 2.py
# 5. CDF (Cumulative Distribution Function?)
# 6. Effect Size
# 7. Relationship Between Variables
# 8. Correlation
# 9. Covariance
## Chapter 3.py
# 10. Pearson Correlation
# 11. Spearman's Rank Correlation
# 12. Mean VS. Median
# 13. Hypothesis Testing
# 14. Normal(Gaussian) Distribution & z-score | [
"zoshs27@gmail.com"
] | zoshs27@gmail.com |
bae18e6d6c368cd7d692ce5ddccda12121b1bcd3 | f6217c228984107f1fdde63fc544c92ad32efd13 | /common/hash/sha256.py | 36c2288f256afde86f48bd1cd2dc3a4118fb44cb | [
"MIT"
] | permissive | lukius/mts | 8be64972fd700ec9110789a7e15307e3fc3dfecb | 96d3d8b28742a474aca67bfcb079577c878bbb4c | refs/heads/master | 2021-06-06T03:22:21.991908 | 2017-11-28T23:52:50 | 2017-11-28T23:52:50 | 22,904,866 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from Crypto.Hash import SHA256 as _SHA256
from common.hash import HashFunction
class SHA256(HashFunction):
@classmethod
def get_OID(cls):
return '\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01'
def hash(self, message):
# TODO: implement custom SHA256.
return _SHA256.new(message).digest() | [
"lukius@gmail.com"
] | lukius@gmail.com |
2d0ddf5bf1de02234b97f6a5df7b3d69b8d470a4 | 22b3822af1a3c525cfbc85efabcb80f7198dba8d | /Functions/Brantley_U5_04/Brantley_U5_04.py | 39c8efb038eaf3f8b4602369e39fffdb88cef6ec | [] | no_license | ccbrantley/Python_3.30 | 90b05a0b985819e95333e490006544332bb5e462 | 681bfd542505754abe36224f5b773d889f20ae38 | refs/heads/master | 2021-12-25T02:04:44.501778 | 2018-04-05T17:40:46 | 2018-04-05T17:40:46 | 80,469,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | expenseDict = {'loan payment' : 0, 'insurance' : 0, 'gas' : 0, 'oil' : 0, 'tires' : 0, 'maintenace' : 0}
def main():
expense()
totalMonthly, totalYearly = total()
print('Total monthly cost: $', format(totalMonthly, ',.2f'), sep='')
print('Total annual cost: $', format(totalYearly, ',.2f'), sep='')
def expense():
for x in expenseDict:
y = int(input('Enter cost amount of ' + x +': '))
expenseDict[x] = y
totalMonthly = sum(expenseDict.values())
def total():
x = sum(expenseDict.values())
return x, x * 12
main()
| [
"noreply@github.com"
] | ccbrantley.noreply@github.com |
d6ca72f18a592b1ecc313eea503875930f5d835c | 167face5e34f69ba36b8a8d93306387dcaa50d24 | /testes.py | 9502eb97df4ebeb820bacf59a85b1d29e3ef13b5 | [] | no_license | william-cirico/python-study | 4fbe20936c46af6115f0d88ad861c71e6273db71 | 5923268fea4c78707fe82f1f609535a69859d0df | refs/heads/main | 2023-04-19T03:49:23.237829 | 2021-05-03T01:24:56 | 2021-05-03T01:24:56 | 309,492,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | import unittest
from atividades import comer, dormir, eh_engracado
class AtividadesTestes(unittest.TestCase):
def test_comer_saudavel(self):
"""Testando o retorno com comida saudavel"""
self.assertEqual(
comer('quiabo', True),
"Estou comendo quiabo porque quero manter a forma"
)
def test_comer_gostosa(self):
"""Testando o retorno com comida gostosa"""
self.assertEqual(
comer(comida="pizza", eh_saudavel=False),
"Estou comendo pizza porque a gente só vive uma vez"
)
def test_dormindo_pouco(self):
"""Testando o retorno dormindo pouco"""
self.assertEqual(
dormir(4),
"Continuo cansado após dormir por 4 horas. :("
)
def test_domindo_muito(self):
"""Testando o retorno dormindo muito"""
self.assertEqual(
dormir(10),
"Ptz! Dormi muito! Estou atrasado para o trabalho!"
)
def test_eh_engracado(self):
# self.assertEqual(eh_engracado("Sérgio Malandro"), False)
self.assertFalse(eh_engracado("Sérgio Malandro"))
if __name__ == '__main__':
unittest.main() | [
"contato.williamc@gmail.com"
] | contato.williamc@gmail.com |
e966f209cb98135cc7d991a4ed9fb9f6176e8b2b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_artworks.py | d3cffdc1e4d13959720c0a0811e03088e3d625c5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._artwork import _ARTWORK
#calss header
class _ARTWORKS(_ARTWORK, ):
def __init__(self,):
_ARTWORK.__init__(self)
self.name = "ARTWORKS"
self.specie = 'nouns'
self.basic = "artwork"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
24842142cd636353539a3e7b63e7cef1c4626bb1 | 7a1b88d06ea18772b065b43d775cec6dd2acdf80 | /4153.py | 39386beff06afc1b47bb3d042dc3cabb7a745654 | [] | no_license | skaurl/baekjoon-online-judge | 28144cca45168e79b1ae0baa9a351f498f8d19ab | 1620d298c2f429e03c5f9387d8aca13763f5c731 | refs/heads/master | 2023-07-26T10:07:29.724066 | 2021-09-07T09:21:02 | 2021-09-07T09:21:02 | 299,019,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 269 | py | while True:
A = input().split()
A[0] = int(A[0])
A[1] = int(A[1])
A[2] = int(A[2])
A = sorted(A)
if A[0] == 0 and A[1] == 0 and A[2] == 0:
break
if A[0]**2 + A[1]**2 == A[2]**2:
print('right')
else:
print('wrong') | [
"dr_lunars@naver.com"
] | dr_lunars@naver.com |
94d7fe23b39627e9dafd26e70c17d851bdc74ebc | bedadeffd76899b4255871eaa79a03e8c8c5d7a9 | /screenshot/urls.py | 48013d0dce79f803043d4b0400d96b8fd8e14906 | [] | no_license | aakriti1435/Django-HTML-to-PDF | 5b48c5b0300227bc37439c4ea3d515c9ca3644a1 | 1f9a261ef1b17267514a951b8155c54ad74a281a | refs/heads/master | 2022-12-02T02:12:20.659027 | 2020-08-13T13:32:01 | 2020-08-13T13:32:01 | 287,287,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | from django.urls import path
from . import views
# from .views import take_screenshot
urlpatterns = [
path('', views.canvas),
path('/take', views.take_screenshot, name='canvas'),
] | [
"65544777+aakriti1435@users.noreply.github.com"
] | 65544777+aakriti1435@users.noreply.github.com |
902f3b00f8b02d6588611e6f3ec9c27f5ca52daa | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /b36bBpsnzyDbd4mzF_7.py | 569c718eecf06dc047827688656c8d981d55a694 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py |
def imposter_formula(i, p):
return str(round(100 * (i/p),)) + "%"
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
6fad1f124a44e93e1651c2f3ac8832a29a8777dd | 4be467ebc691f31b94dc72de88c10e1ab14d9c53 | /data.py | b7c56b80f628bfbb8bf81d5948588ea589af7f90 | [] | no_license | oziTeam/mockup-warp-test | 546d96a028155b2d605f72fbd1b0513d23b63ada | 242e838d31c57603f04060b5e8c196ac8ba9f306 | refs/heads/master | 2022-12-04T10:09:18.159312 | 2020-08-19T04:52:32 | 2020-08-19T04:52:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,833 | py | # ARTWORK_DATA = {
# "Front": "./sample_data/artworks/3-front.jpg",
# "Hood": "./sample_data/artworks/3-hood.jpg",
# "Back": "./sample_data/artworks/3-back.jpg"
# }
#
# MOCKUP_DATA = [
# {
# "side_name": "Back",
# "parts": [
# {
# "name": "back.left_sleeve",
# "model_path": "./sample_data//models/tshirt.back.left_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.left_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Back"
# },
# {
# "name": "back.left_sleeve",
# "model_path": "./sample_data//models/tshirt.back.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.front_back-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Back"
# },
# {
# "name": "back.right_sleeve",
# "model_path": "./sample_data//models/tshirt.back.right_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.right_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Back"
# },
# {
# "name": "back.top_hood",
# "model_path": "./sample_data//models/tshirt.back.top_hood.model.npy",
# "cut_image_path": "./sample_data/cut_images/back.top_hood-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Hood"
# },
# ]
# },
# {
# "side_name": "Front",
# "parts": [
# {
# "name": "front.left_sleeve",
# "model_path": "./sample_data//models/tshirt.front.left_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.left_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Front"
# },
# {
# "name": "front",
# "model_path": "./sample_data//models/tshirt.front.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.front-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Front"
# },
# {
# "name": "front.right_sleeve",
# "model_path": "./sample_data//models/tshirt.front.right_sleeve.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.right_sleeve-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Front"
# },
# {
# "name": "front.bottom_hood",
# "model_path": "./sample_data//models/tshirt.front.bottom_hood.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.bottom_hood-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Hood"
#
# },
# {
# "name": "front.top_hood",
# "model_path": "./sample_data//models/tshirt.front.top_hood.model.npy",
# "cut_image_path": "./sample_data/cut_images/front.top_hood-cut.png",
# "shadow_image": "", # can be None
# "artwork_side": "Hood"
# }
# ]
# }
# ]
ARTWORK_DATA = {
"Front": "./sample_data/artworks/fushion-mask.jpeg",
"Adult": "./sample_data/artworks/mask-4.jpeg",
}
MOCKUP_DATA = [
{
"side_name": "Adult",
"parts": [
{
"name": "Adult",
"model_path": "./sample_data/models/aop_cc_mask.adult.model.npy",
"cut_image_path": "./sample_data/cut_images/cc_mask.adult.cut.png",
"shadow_image": "", # can be None
"artwork_side": "Adult"
}
]
},
{
"side_name": "Front",
"parts": [
{
"name": "Front",
"model_path": "./sample_data/models/aop_cc_mask.front.model.npy",
"cut_image_path": "./sample_data/cut_images/cc_mask.front.cut.png",
"shadow_image": "", # can be None
"artwork_side": "Front"
}
]
},
{
"side_name": "White",
"parts": [
{
"name": "White",
"model_path": "./sample_data/models/aop_cc_mask.white.front.model.npy",
"cut_image_path": "./sample_data/cut_images/cc_mask.white.front.cut.png",
"shadow_image": "", # can be None
"artwork_side": "Front"
}
]
}
]
| [
"vantrong291@gmail.com"
] | vantrong291@gmail.com |
0495487a69cc62832cd6afee4efb15ddda3a9969 | 10e94d77e56d9cbb979174795c465b679d03d6b3 | /tensorflow/contrib/learn/python/learn/dataframe/transforms/difference.py | f9cb0c9485516abedbb3847530755d5cb328287f | [
"Apache-2.0"
] | permissive | pint1022/tf-coriander | 68939732c1ec0f052929c13ef6d8f49e44d423e4 | 197a685accca4a3f38285d6ac3ccf3998a200090 | refs/heads/master | 2020-04-14T18:56:40.334257 | 2019-01-11T00:40:11 | 2019-01-11T00:40:11 | 164,038,861 | 1 | 0 | Apache-2.0 | 2019-01-04T00:53:40 | 2019-01-04T00:53:40 | null | UTF-8 | Python | false | false | 2,361 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `Transform` that performs subtraction on two `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import ops
from tensorflow.python.ops import sparse_ops
def _negate_sparse(sparse_tensor):
return ops.SparseTensor(indices=sparse_tensor.indices,
values=-sparse_tensor.values,
shape=sparse_tensor.shape)
@series.Series.register_binary_op("__sub__")
class Difference(transform.TensorFlowTransform):
"""Subtracts one 'Series` from another."""
def __init__(self):
super(Difference, self).__init__()
@property
def name(self):
return "difference"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
pair_sparsity = (isinstance(input_tensors[0], ops.SparseTensor),
isinstance(input_tensors[1], ops.SparseTensor))
if pair_sparsity == (False, False):
result = input_tensors[0] - input_tensors[1]
# note tf.sparse_add accepts the mixed cases,
# so long as at least one input is sparse.
elif not pair_sparsity[1]:
result = sparse_ops.sparse_add(input_tensors[0], - input_tensors[1])
else:
result = sparse_ops.sparse_add(input_tensors[0],
_negate_sparse(input_tensors[1]))
# pylint: disable=not-callable
return self.return_type(result)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
d908b2fae3acda4b5f7c3d8687dd1444f93be70c | c6389f9b11fd40ee9295f4e88a14a8057e294e4f | /components/nvs_flash/nvs_partition_generator/nvs_partition_gen.py | 3c755efed3e57a74ab399fd34544cd6f210af845 | [
"MIT"
] | permissive | ghsecuritylab/N14 | 987ebb27cfbd7ebf84deadeb09a480aa51be34c7 | 76bc595e3face0903436e48165f31724e4d4532a | refs/heads/master | 2021-02-28T19:46:09.834253 | 2019-11-19T14:36:58 | 2019-11-19T14:36:58 | 245,728,464 | 0 | 0 | MIT | 2020-03-08T00:40:31 | 2020-03-08T00:40:30 | null | UTF-8 | Python | false | false | 34,844 | py | #!/usr/bin/env python
#
# esp-idf NVS partition generation tool. Tool helps in generating NVS-compatible
# partition binary, with key-value pair entries provided via a CSV file.
#
# Copyright 2018 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division, print_function
from builtins import int, range, bytes
from io import open
import sys
import argparse
import binascii
import random
import struct
import os
import array
import csv
import zlib
import codecs
import datetime
import distutils.dir_util
try:
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
except ImportError:
print('The cryptography package is not installed.'
'Please refer to the Get Started section of the ESP-IDF Programming Guide for '
'setting up the required packages.')
raise
VERSION1_PRINT = "v1 - Multipage Blob Support Disabled"
VERSION2_PRINT = "v2 - Multipage Blob Support Enabled"
""" Class for standard NVS page structure """
class Page(object):
PAGE_PARAMS = {
"max_size": 4096,
"max_old_blob_size": 1984,
"max_new_blob_size": 4000,
"max_entries": 126
}
# Item type codes
U8 = 0x01
I8 = 0x11
U16 = 0x02
I16 = 0x12
U32 = 0x04
I32 = 0x14
SZ = 0x21
BLOB = 0x41
BLOB_DATA = 0x42
BLOB_IDX = 0x48
# Few Page constants
HEADER_SIZE = 32
BITMAPARRAY_OFFSET = 32
BITMAPARRAY_SIZE_IN_BYTES = 32
FIRST_ENTRY_OFFSET = 64
SINGLE_ENTRY_SIZE = 32
CHUNK_ANY = 0xFF
ACTIVE = 0xFFFFFFFE
FULL = 0xFFFFFFFC
VERSION1 = 0xFF
VERSION2 = 0xFE
def __init__(self, page_num, is_rsrv_page=False):
self.entry_num = 0
self.is_encrypt = False
self.encr_key = None
self.bitmap_array = array.array('B')
self.version = Page.VERSION2
self.page_buf = bytearray(b'\xff') * Page.PAGE_PARAMS["max_size"]
if not is_rsrv_page:
self.bitmap_array = self.create_bitmap_array()
self.set_header(page_num)
def set_header(self, page_num):
global page_header
# set page state to active
page_header = bytearray(b'\xff') * 32
page_state_active_seq = Page.ACTIVE
struct.pack_into('<I', page_header, 0, page_state_active_seq)
# set page sequence number
struct.pack_into('<I', page_header, 4, page_num)
# set version
if version == Page.VERSION2:
page_header[8] = Page.VERSION2
elif version == Page.VERSION1:
page_header[8] = Page.VERSION1
# set header's CRC
crc_data = bytes(page_header[4:28])
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', page_header, 28, crc & 0xFFFFFFFF)
self.page_buf[0:len(page_header)] = page_header
def create_bitmap_array(self):
bitarray = array.array('B')
charsize = 32 # bitmaparray has 256 bits, hence 32 bytes
fill = 255 # Fill all 8 bits with 1's
bitarray.extend((fill,) * charsize)
return bitarray
def write_bitmaparray(self):
bitnum = self.entry_num * 2
byte_idx = bitnum // 8 # Find byte index in the array
bit_offset = bitnum & 7 # Find bit offset in given byte index
mask = ~(1 << bit_offset)
self.bitmap_array[byte_idx] &= mask
start_idx = Page.BITMAPARRAY_OFFSET
end_idx = Page.BITMAPARRAY_OFFSET + Page.BITMAPARRAY_SIZE_IN_BYTES
self.page_buf[start_idx:end_idx] = self.bitmap_array
def encrypt_entry(self, data_arr, tweak_arr, encr_key):
# Encrypt 32 bytes of data using AES-XTS encryption
backend = default_backend()
plain_text = codecs.decode(data_arr, 'hex')
tweak = codecs.decode(tweak_arr, 'hex')
cipher = Cipher(algorithms.AES(encr_key), modes.XTS(tweak), backend=backend)
encryptor = cipher.encryptor()
encrypted_data = encryptor.update(plain_text)
return encrypted_data
def reverse_hexbytes(self, addr_tmp):
addr = []
reversed_bytes = ""
for i in range(0, len(addr_tmp), 2):
addr.append(addr_tmp[i:i + 2])
reversed_bytes = "".join(reversed(addr))
return reversed_bytes
def encrypt_data(self, data_input, no_of_entries, nvs_obj):
# Set values needed for encryption and encrypt data byte wise
encr_data_to_write = bytearray()
data_len_needed = 64 # in hex
tweak_len_needed = 32 # in hex
init_tweak_val = '0'
init_data_val = 'f'
tweak_tmp = ''
encr_key_input = None
# Extract encryption key and tweak key from given key input
if len(self.encr_key) == key_len_needed:
encr_key_input = self.encr_key
else:
encr_key_input = codecs.decode(self.encr_key, 'hex')
rel_addr = nvs_obj.page_num * Page.PAGE_PARAMS["max_size"] + Page.FIRST_ENTRY_OFFSET
if not isinstance(data_input, bytearray):
byte_arr = bytearray(b'\xff') * 32
byte_arr[0:len(data_input)] = data_input
data_input = byte_arr
data_input = binascii.hexlify(data_input)
entry_no = self.entry_num
start_idx = 0
end_idx = start_idx + 64
for _ in range(0, no_of_entries):
# Set tweak value
offset = entry_no * Page.SINGLE_ENTRY_SIZE
addr = hex(rel_addr + offset)[2:]
addr_len = len(addr)
if addr_len > 2:
if not addr_len % 2:
addr_tmp = addr
tweak_tmp = self.reverse_hexbytes(addr_tmp)
tweak_val = tweak_tmp + (init_tweak_val * (tweak_len_needed - (len(tweak_tmp))))
else:
addr_tmp = init_tweak_val + addr
tweak_tmp = self.reverse_hexbytes(addr_tmp)
tweak_val = tweak_tmp + (init_tweak_val * (tweak_len_needed - (len(tweak_tmp))))
else:
tweak_val = addr + (init_tweak_val * (tweak_len_needed - len(addr)))
# Encrypt data
data_bytes = data_input[start_idx:end_idx]
if type(data_bytes) == bytes:
data_bytes = data_bytes.decode()
data_val = data_bytes + (init_data_val * (data_len_needed - len(data_bytes)))
encr_data_ret = self.encrypt_entry(data_val, tweak_val, encr_key_input)
encr_data_to_write = encr_data_to_write + encr_data_ret
# Update values for encrypting next set of data bytes
start_idx = end_idx
end_idx = start_idx + 64
entry_no += 1
return encr_data_to_write
def write_entry_to_buf(self, data, entrycount,nvs_obj):
encr_data = bytearray()
if self.is_encrypt:
encr_data_ret = self.encrypt_data(data, entrycount,nvs_obj)
encr_data[0:len(encr_data_ret)] = encr_data_ret
data = encr_data
data_offset = Page.FIRST_ENTRY_OFFSET + (Page.SINGLE_ENTRY_SIZE * self.entry_num)
start_idx = data_offset
end_idx = data_offset + len(data)
self.page_buf[start_idx:end_idx] = data
# Set bitmap array for entries in current page
for i in range(0, entrycount):
self.write_bitmaparray()
self.entry_num += 1
def set_crc_header(self, entry_struct):
crc_data = bytearray(b'28')
crc_data[0:4] = entry_struct[0:4]
crc_data[4:28] = entry_struct[8:32]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF)
return entry_struct
def write_varlen_binary_data(self, entry_struct, ns_index, key, data, data_size, total_entry_count, encoding, nvs_obj):
chunk_start = 0
chunk_count = 0
chunk_index = Page.CHUNK_ANY
offset = 0
remaining_size = data_size
tailroom = None
while True:
chunk_size = 0
# Get the size available in current page
tailroom = (Page.PAGE_PARAMS["max_entries"] - self.entry_num - 1) * Page.SINGLE_ENTRY_SIZE
assert tailroom >= 0, "Page overflow!!"
# Split the binary data into two and store a chunk of available size onto curr page
if tailroom < remaining_size:
chunk_size = tailroom
else:
chunk_size = remaining_size
remaining_size = remaining_size - chunk_size
# Change type of data to BLOB_DATA
entry_struct[1] = Page.BLOB_DATA
# Calculate no. of entries data chunk will require
datachunk_rounded_size = (chunk_size + 31) & ~31
datachunk_entry_count = datachunk_rounded_size // 32
datachunk_total_entry_count = datachunk_entry_count + 1 # +1 for the entry header
# Set Span
entry_struct[2] = datachunk_total_entry_count
# Update the chunkIndex
chunk_index = chunk_start + chunk_count
entry_struct[3] = chunk_index
# Set data chunk
data_chunk = data[offset:offset + chunk_size]
# Compute CRC of data chunk
struct.pack_into('<H', entry_struct, 24, chunk_size)
if type(data) != bytes:
data_chunk = bytes(data_chunk, encoding='utf8')
crc = zlib.crc32(data_chunk, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF)
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write entry header
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
# write actual data
self.write_entry_to_buf(data_chunk, datachunk_entry_count,nvs_obj)
chunk_count = chunk_count + 1
if remaining_size or (tailroom - chunk_size) < Page.SINGLE_ENTRY_SIZE:
if page_header[0:4] != Page.FULL:
page_state_full_seq = Page.FULL
struct.pack_into('<I', page_header, 0, page_state_full_seq)
nvs_obj.create_new_page()
self = nvs_obj.cur_page
offset = offset + chunk_size
# All chunks are stored, now store the index
if not remaining_size:
# Initialise data field to 0xff
data_array = bytearray(b'\xff') * 8
entry_struct[24:32] = data_array
# change type of data to BLOB_IDX
entry_struct[1] = Page.BLOB_IDX
# Set Span
entry_struct[2] = 1
# Update the chunkIndex
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
struct.pack_into('<I', entry_struct, 24, data_size)
entry_struct[28] = chunk_count
entry_struct[29] = chunk_start
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write last entry
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
break
return entry_struct
def write_single_page_entry(self, entry_struct, data, datalen, data_entry_count, nvs_obj):
# compute CRC of data
struct.pack_into('<H', entry_struct, 24, datalen)
if type(data) != bytes:
data = bytes(data, encoding='utf8')
crc = zlib.crc32(data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 28, crc & 0xFFFFFFFF)
# compute crc of entry header
entry_struct = self.set_crc_header(entry_struct)
# write entry header
self.write_entry_to_buf(entry_struct, 1, nvs_obj)
# write actual data
self.write_entry_to_buf(data, data_entry_count, nvs_obj)
"""
Low-level function to write variable length data into page buffer. Data should be formatted
according to encoding specified.
"""
def write_varlen_data(self, key, data, encoding, ns_index,nvs_obj):
# Set size of data
datalen = len(data)
if datalen > Page.PAGE_PARAMS["max_old_blob_size"]:
if version == Page.VERSION1:
raise InputError("Version %s\n%s: Size exceeds max allowed length." % (VERSION1_PRINT,key))
else:
if encoding == "string":
raise InputError("Version %s\n%s: Size exceeds max allowed length." % (VERSION2_PRINT,key))
# Calculate no. of entries data will require
rounded_size = (datalen + 31) & ~31
data_entry_count = rounded_size // 32
total_entry_count = data_entry_count + 1 # +1 for the entry header
# Check if page is already full and new page is needed to be created right away
if self.entry_num >= Page.PAGE_PARAMS["max_entries"]:
raise PageFullError()
elif (self.entry_num + total_entry_count) >= Page.PAGE_PARAMS["max_entries"]:
if not (version == Page.VERSION2 and encoding in ["hex2bin", "binary", "base64"]):
raise PageFullError()
# Entry header
entry_struct = bytearray(b'\xff') * 32
# Set Namespace Index
entry_struct[0] = ns_index
# Set Span
if version == Page.VERSION2:
if encoding == "string":
entry_struct[2] = data_entry_count + 1
# Set Chunk Index
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
else:
entry_struct[2] = data_entry_count + 1
# set key
key_array = b'\x00' * 16
entry_struct[8:24] = key_array
entry_struct[8:8 + len(key)] = key.encode()
# set Type
if encoding == "string":
entry_struct[1] = Page.SZ
elif encoding in ["hex2bin", "binary", "base64"]:
entry_struct[1] = Page.BLOB
if version == Page.VERSION2 and (encoding in ["hex2bin", "binary", "base64"]):
entry_struct = self.write_varlen_binary_data(entry_struct,ns_index,key,data,
datalen,total_entry_count, encoding, nvs_obj)
else:
self.write_single_page_entry(entry_struct, data, datalen, data_entry_count, nvs_obj)
""" Low-level function to write data of primitive type into page buffer. """
def write_primitive_data(self, key, data, encoding, ns_index,nvs_obj):
# Check if entry exceeds max number of entries allowed per page
if self.entry_num >= Page.PAGE_PARAMS["max_entries"]:
raise PageFullError()
entry_struct = bytearray(b'\xff') * 32
entry_struct[0] = ns_index # namespace index
entry_struct[2] = 0x01 # Span
chunk_index = Page.CHUNK_ANY
entry_struct[3] = chunk_index
# write key
key_array = b'\x00' * 16
entry_struct[8:24] = key_array
entry_struct[8:8 + len(key)] = key.encode()
if encoding == "u8":
entry_struct[1] = Page.U8
struct.pack_into('<B', entry_struct, 24, data)
elif encoding == "i8":
entry_struct[1] = Page.I8
struct.pack_into('<b', entry_struct, 24, data)
elif encoding == "u16":
entry_struct[1] = Page.U16
struct.pack_into('<H', entry_struct, 24, data)
elif encoding == "u32":
entry_struct[1] = Page.U32
struct.pack_into('<I', entry_struct, 24, data)
elif encoding == "i32":
entry_struct[1] = Page.I32
struct.pack_into('<i', entry_struct, 24, data)
# Compute CRC
crc_data = bytearray(b'28')
crc_data[0:4] = entry_struct[0:4]
crc_data[4:28] = entry_struct[8:32]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', entry_struct, 4, crc & 0xFFFFFFFF)
# write to file
self.write_entry_to_buf(entry_struct, 1,nvs_obj)
""" Get page buffer data of a given page """
def get_data(self):
return self.page_buf
"""
NVS class encapsulates all NVS specific operations to create a binary with given key-value pairs.
Binary can later be flashed onto device via a flashing utility.
"""
class NVS(object):
def __init__(self, fout, input_size):
self.size = input_size
self.namespace_idx = 0
self.page_num = -1
self.pages = []
self.cur_page = self.create_new_page()
self.fout = fout
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None and exc_value is None:
# Create pages for remaining available size
while True:
try:
self.create_new_page()
except InsufficientSizeError:
self.size = None
# Creating the last reserved page
self.create_new_page(is_rsrv_page=True)
break
result = self.get_binary_data()
if version == Page.VERSION1:
print("Version: ", VERSION1_PRINT)
else:
print("Version: ", VERSION2_PRINT)
self.fout.write(result)
def create_new_page(self, is_rsrv_page=False):
# Update available size as each page is created
if self.size == 0:
raise InsufficientSizeError("Size parameter is less than the size of data in csv.Please increase size.")
if not is_rsrv_page:
self.size = self.size - Page.PAGE_PARAMS["max_size"]
self.page_num += 1
new_page = Page(self.page_num, is_rsrv_page)
new_page.version = version
new_page.is_encrypt = is_encrypt_data
if new_page.is_encrypt:
new_page.encr_key = key_input
self.pages.append(new_page)
self.cur_page = new_page
return new_page
"""
Write namespace entry and subsequently increase namespace count so that all upcoming entries
will be mapped to a new namespace.
"""
def write_namespace(self, key):
self.namespace_idx += 1
try:
self.cur_page.write_primitive_data(key, self.namespace_idx, "u8", 0,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_primitive_data(key, self.namespace_idx, "u8", 0,self)
"""
Write key-value pair. Function accepts value in the form of ascii character and converts
it into appropriate format before calling Page class's functions to write entry into NVS format.
Function handles PageFullError and creates a new page and re-invokes the function on a new page.
We don't have to guard re-invocation with try-except since no entry can span multiple pages.
"""
def write_entry(self, key, value, encoding):
if encoding == "hex2bin":
if len(value) % 2 != 0:
raise InputError("%s: Invalid data length. Should be multiple of 2." % key)
value = binascii.a2b_hex(value)
if encoding == "base64":
value = binascii.a2b_base64(value)
if encoding == "string":
if type(value) == bytes:
value = value.decode()
value += '\0'
encoding = encoding.lower()
varlen_encodings = ["string", "binary", "hex2bin", "base64"]
primitive_encodings = ["u8", "i8", "u16", "u32", "i32"]
if encoding in varlen_encodings:
try:
self.cur_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_varlen_data(key, value, encoding, self.namespace_idx,self)
elif encoding in primitive_encodings:
try:
self.cur_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
except PageFullError:
new_page = self.create_new_page()
new_page.write_primitive_data(key, int(value), encoding, self.namespace_idx,self)
else:
raise InputError("%s: Unsupported encoding" % encoding)
""" Return accumulated data of all pages """
def get_binary_data(self):
data = bytearray()
for page in self.pages:
data += page.get_data()
return data
class PageFullError(RuntimeError):
"""
Represents error when current page doesn't have sufficient entries left
to accommodate current request
"""
def __init__(self):
super(PageFullError, self).__init__()
class InputError(RuntimeError):
"""
Represents error on the input
"""
def __init__(self, e):
super(InputError, self).__init__(e)
class InsufficientSizeError(RuntimeError):
"""
Represents error when NVS Partition size given is insufficient
to accomodate the data in the given csv file
"""
def __init__(self, e):
super(InsufficientSizeError, self).__init__(e)
def nvs_open(result_obj, input_size):
""" Wrapper to create and NVS class object. This object can later be used to set key-value pairs
:param result_obj: File/Stream object to dump resultant binary. If data is to be dumped into memory, one way is to use BytesIO object
:param input_size: Size of Partition
:return: NVS class instance
"""
return NVS(result_obj, input_size)
def write_entry(nvs_instance, key, datatype, encoding, value):
""" Wrapper to set key-value pair in NVS format
:param nvs_instance: Instance of an NVS class returned by nvs_open()
:param key: Key of the data
:param datatype: Data type. Valid values are "file", "data" and "namespace"
:param encoding: Data encoding. Valid values are "u8", "i8", "u16", "u32", "i32", "string", "binary", "hex2bin" and "base64"
:param value: Data value in ascii encoded string format for "data" datatype and filepath for "file" datatype
:return: None
"""
if datatype == "file":
abs_file_path = value
if os.path.isabs(value) is False:
script_dir = os.getcwd()
abs_file_path = os.path.join(script_dir, value)
with open(abs_file_path, 'rb') as f:
value = f.read()
if datatype == "namespace":
nvs_instance.write_namespace(key)
else:
nvs_instance.write_entry(key, value, encoding)
def nvs_close(nvs_instance):
""" Wrapper to finish writing to NVS and write data to file/stream object provided to nvs_open method
:param nvs_instance: Instance of NVS class returned by nvs_open()
:return: None
"""
nvs_instance.__exit__(None, None, None)
def check_input_args(input_filename=None, output_filename=None, input_part_size=None, is_key_gen=None,
encrypt_mode=None, key_file=None, version_no=None, print_arg_str=None, print_encrypt_arg_str=None,
output_dir=None):
global version, is_encrypt_data, input_size, key_gen
version = version_no
is_encrypt_data = encrypt_mode
key_gen = is_key_gen
input_size = input_part_size
if not output_dir == os.getcwd() and (key_file and os.path.isabs(key_file)):
sys.exit("Error. Cannot provide --outdir argument as --keyfile is absolute path.")
if not os.path.isdir(output_dir):
distutils.dir_util.mkpath(output_dir)
if is_encrypt_data.lower() == 'true':
is_encrypt_data = True
elif is_encrypt_data.lower() == 'false':
is_encrypt_data = False
if version == 'v1':
version = Page.VERSION1
elif version == 'v2':
version = Page.VERSION2
if key_gen.lower() == 'true':
key_gen = True
elif key_gen.lower() == 'false':
key_gen = False
if key_gen:
if all(arg is not None for arg in [input_filename, output_filename, input_size]):
if not is_encrypt_data:
sys.exit("--encrypt argument is missing or set to false.")
elif any(arg is not None for arg in [input_filename, output_filename, input_size]):
sys.exit(print_arg_str)
else:
if not (input_filename and output_filename and input_size):
sys.exit(print_arg_str)
if is_encrypt_data and not key_gen and not key_file:
sys.exit(print_encrypt_arg_str)
if not is_encrypt_data and key_file:
sys.exit("Invalid. Cannot give --keyfile as --encrypt is set to false.")
if key_file:
key_file_name, key_file_ext = os.path.splitext(key_file)
if key_file_ext:
if not key_file_ext == '.bin':
sys.exit("--keyfile argument can be a filename with no extension or .bin extension only")
# If only one of the arguments - input_filename, output_filename, input_size is given
if ((any(arg is None for arg in [input_filename, output_filename, input_size])) is True) and \
((all(arg is None for arg in [input_filename, output_filename, input_size])) is False):
sys.exit(print_arg_str)
if input_size:
# Set size
input_size = int(input_size, 0)
if input_size % 4096 != 0:
sys.exit("Size of partition must be multiple of 4096")
# Update size as a page needs to be reserved of size 4KB
input_size = input_size - Page.PAGE_PARAMS["max_size"]
if input_size < (2 * Page.PAGE_PARAMS["max_size"]):
sys.exit("Minimum NVS partition size needed is 0x3000 bytes.")
def nvs_part_gen(input_filename=None, output_filename=None, input_part_size=None, is_key_gen=None, encrypt_mode=None,
key_file=None, encr_key_prefix=None, version_no=None, output_dir=None):
""" Wrapper to generate nvs partition binary
:param input_filename: Name of input file containing data
:param output_filename: Name of output file to store generated binary
:param input_part_size: Size of partition in bytes (must be multiple of 4096)
:param is_key_gen: Enable encryption key generation in encryption mode
:param encrypt_mode: Enable/Disable encryption mode
:param key_file: Input file having encryption keys in encryption mode
:param version_no: Format Version number
:return: None
"""
global key_input, key_len_needed
encr_key_bin_file = None
encr_keys_dir = None
backslash = ['/','\\']
key_len_needed = 64
key_input = bytearray()
if key_gen:
key_input = ''.join(random.choice('0123456789abcdef') for _ in range(128)).strip()
elif key_file:
with open(key_file, 'rb') as key_f:
key_input = key_f.read(64)
if all(arg is not None for arg in [input_filename, output_filename, input_size]):
if not os.path.isabs(output_filename) and not any(ch in output_filename for ch in backslash):
output_filename = os.path.join(output_dir, '') + output_filename
input_file = open(input_filename, 'rt', encoding='utf8')
output_file = open(output_filename, 'wb')
with nvs_open(output_file, input_size) as nvs_obj:
reader = csv.DictReader(input_file, delimiter=',')
for row in reader:
try:
write_entry(nvs_obj, row["key"], row["type"], row["encoding"], row["value"])
except (InputError) as e:
print(e)
input_file.close()
output_file.close()
sys.exit(-2)
input_file.close()
output_file.close()
print("NVS binary created: " + output_filename)
if key_gen:
keys_page_buf = bytearray(b'\xff') * Page.PAGE_PARAMS["max_size"]
key_bytes = bytearray()
if len(key_input) == key_len_needed:
key_bytes = key_input
else:
key_bytes = codecs.decode(key_input, 'hex')
key_len = len(key_bytes)
keys_page_buf[0:key_len] = key_bytes
crc_data = keys_page_buf[0:key_len]
crc_data = bytes(crc_data)
crc = zlib.crc32(crc_data, 0xFFFFFFFF)
struct.pack_into('<I', keys_page_buf, key_len, crc & 0xFFFFFFFF)
if not key_file or (key_file and not os.path.isabs(key_file)):
# Create encryption keys bin file with timestamp
if not encr_key_prefix:
timestamp = datetime.datetime.now().strftime('%m-%d_%H-%M')
output_dir = os.path.join(output_dir, '')
encr_keys_dir = output_dir + "keys"
if not os.path.isdir(encr_keys_dir):
distutils.dir_util.mkpath(encr_keys_dir)
# Add backslash to `keys` dir if it is not present
encr_keys_dir = os.path.join(encr_keys_dir, '')
if key_file:
key_file_name, ext = os.path.splitext(key_file)
if ext:
if ".bin" not in ext:
sys.exit("Error: --keyfile must have .bin extension")
encr_key_bin_file = os.path.basename(key_file)
else:
encr_key_bin_file = key_file_name + ".bin"
if encr_keys_dir:
encr_key_bin_file = encr_keys_dir + encr_key_bin_file
else:
if encr_key_prefix:
encr_key_bin_file = encr_keys_dir + encr_key_prefix + "-keys" + ".bin"
else:
encr_key_bin_file = encr_keys_dir + "encryption_keys_" + timestamp + ".bin"
with open(encr_key_bin_file,'wb') as output_keys_file:
output_keys_file.write(keys_page_buf)
print("Encryption keys binary created: " + encr_key_bin_file)
def main():
parser = argparse.ArgumentParser(description="ESP32 NVS partition generation utility")
nvs_part_gen_group = parser.add_argument_group('To generate NVS partition')
nvs_part_gen_group.add_argument("--input",
help="Path to CSV file to parse.",
default=None)
nvs_part_gen_group.add_argument("--output",
help='Path to output converted binary file.',
default=None)
nvs_part_gen_group.add_argument("--size",
help='Size of NVS Partition in bytes (must be multiple of 4096)')
nvs_part_gen_group.add_argument("--version",
help='Set version. Default: v2',
choices=['v1','v2'],
default='v2',
type=str.lower)
keygen_action_key = nvs_part_gen_group.add_argument("--keygen",
help='Generate keys for encryption.',
choices=['true','false'],
default='false',
type=str.lower)
nvs_part_gen_group.add_argument("--encrypt",
help='Set encryption mode. Default: false',
choices=['true','false'],
default='false',
type=str.lower)
keygen_action_file = nvs_part_gen_group.add_argument("--keyfile",
help='File having key for encryption (Applicable only if encryption mode is true).',
default=None)
keygen_action_dir = nvs_part_gen_group.add_argument('--outdir',
dest='outdir',
default=os.getcwd(),
help='the output directory to store the files created\
(Default: current directory)')
key_gen_group = parser.add_argument_group('To generate encryption keys')
key_gen_group._group_actions.append(keygen_action_key)
key_gen_group._group_actions.append(keygen_action_file)
key_gen_group._group_actions.append(keygen_action_dir)
args = parser.parse_args()
input_filename = args.input
output_filename = args.output
part_size = args.size
version_no = args.version
is_key_gen = args.keygen
is_encrypt_data = args.encrypt
key_file = args.keyfile
output_dir_path = args.outdir
encr_keys_prefix = None
print_arg_str = "Invalid.\nTo generate nvs partition binary --input, --output and --size arguments are mandatory.\
\nTo generate encryption keys --keygen argument is mandatory."
print_encrypt_arg_str = "Missing parameter. Enter --keyfile or --keygen."
check_input_args(input_filename,output_filename, part_size, is_key_gen, is_encrypt_data, key_file, version_no,
print_arg_str, print_encrypt_arg_str, output_dir_path)
nvs_part_gen(input_filename, output_filename, part_size, is_key_gen, is_encrypt_data, key_file,
encr_keys_prefix, version_no, output_dir_path)
if __name__ == "__main__":
main()
| [
"qitas@qitas.cn"
] | qitas@qitas.cn |
0428f2bbc10bab71365ca218e39a361a0a85a71f | e89b1297206710aad354ae7a0514ea8d0dfe5984 | /setup.py | 907ca11897264ee61d985f4a1558c49f3ab2f3e7 | [] | no_license | dandavison/docopt-subcommand-completion-example | d649f635012e147cc59c94611a198abe7b61aff7 | f700d61aa43fb9ddf16c4bd11aeccdb7bad171dc | refs/heads/master | 2021-01-17T13:29:34.631932 | 2016-07-10T21:03:31 | 2016-07-10T21:03:31 | 56,941,539 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | import os
from setuptools import find_packages
from setuptools import setup
setup(
name='docopt-example',
version=(open(os.path.join(os.path.dirname(__file__),
'app',
'version.txt'))
.read().strip()),
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=['docopt'],
entry_points={
'console_scripts': [
'docopt-example = app.cli:main',
],
},
)
| [
"dandavison7@gmail.com"
] | dandavison7@gmail.com |
555ad2bb52e603076658741cc942bcaa8a6e7d82 | a024fe3b05dd320a7860165dd72ebd832ce6e484 | /intn_informe_bascula_web/models/models.py | 50eadeb8cc3bf328f707f559a4c7e5cdcabf4edf | [] | no_license | acostaw/erp_odoo | 97d02a675908e441cf8e1ba4e3dcbc62691f8dec | 2437997b650c9fdbf6a6f007c0a1fea2aab018e2 | refs/heads/main | 2023-04-19T14:52:48.877851 | 2021-04-22T18:40:07 | 2021-04-22T18:40:07 | 360,644,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
from odoo import models, fields, api
# class intn_informe_bascula_web(models.Model):
# _name = 'intn_informe_bascula_web.intn_informe_bascula_web'
# name = fields.Char()
# value = fields.Integer()
# value2 = fields.Float(compute="_value_pc", store=True)
# description = fields.Text()
#
# @api.depends('value')
# def _value_pc(self):
# self.value2 = float(self.value) / 100 | [
"wacosta@INTN.GOV.PY"
] | wacosta@INTN.GOV.PY |
d659427ec99c9669489b717acd6c596b6664ec5a | 98d22227d64517351db489dd5d751bcbf852e9b3 | /keras/applications/inception_v3.py | 58c6d1f27363aebe3a0e0b0c5994b6ce713b5512 | [
"MIT"
] | permissive | intel/keras | 5d4d869ff4ab96a440abc12a6654daca59cd6714 | ced92ff0293f95bf1c200b55af098e8e136686c2 | refs/heads/master | 2023-08-30T13:39:09.291242 | 2022-08-04T23:04:26 | 2022-08-04T23:04:26 | 72,058,381 | 13 | 5 | null | 2016-10-27T01:09:40 | 2016-10-27T01:09:38 | null | UTF-8 | Python | false | false | 12,610 | py | # -*- coding: utf-8 -*-
'''Inception V3 model for Keras.
Note that the ImageNet weights provided are from a model that had not fully converged.
Inception v3 should be able to reach 6.9% top-5 error, but our model
only gets to 7.8% (same as a fully-converged ResNet 50).
For comparison, VGG16 only gets to 9.9%, quite a bit worse.
Also, do note that the input image format for this model is different than for
the VGG16 and ResNet models (299x299 instead of 224x224), and that the input preprocessing function
is also different (same as Xception).
# Reference:
- [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567)
'''
from __future__ import print_function
from __future__ import absolute_import
import warnings
from ..models import Model
from ..layers import Flatten, Dense, Input, BatchNormalization, merge
from ..layers import Convolution2D, MaxPooling2D, AveragePooling2D
from ..utils.layer_utils import convert_all_kernels_in_model
from ..utils.data_utils import get_file
from .. import backend as K
from .imagenet_utils import decode_predictions
TH_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/inception_v3_weights_th_dim_ordering_th_kernels.h5'
TF_WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/inception_v3_weights_tf_dim_ordering_tf_kernels.h5'
TH_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/inception_v3_weights_th_dim_ordering_th_kernels_notop.h5'
TF_WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5'
def conv2d_bn(x, nb_filter, nb_row, nb_col,
border_mode='same', subsample=(1, 1),
name=None):
'''Utility function to apply conv + BN.
'''
if name is not None:
bn_name = name + '_bn'
conv_name = name + '_conv'
else:
bn_name = None
conv_name = None
if K.image_dim_ordering() == 'th':
bn_axis = 1
else:
bn_axis = 3
x = Convolution2D(nb_filter, nb_row, nb_col,
subsample=subsample,
activation='relu',
border_mode=border_mode,
name=conv_name)(x)
x = BatchNormalization(axis=bn_axis, name=bn_name)(x)
return x
def InceptionV3(include_top=True, weights='imagenet',
input_tensor=None):
'''Instantiate the Inception v3 architecture,
optionally loading weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_dim_ordering="tf"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
Note that the default input image size for this model is 299x299.
# Arguments
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
# Returns
A Keras model instance.
'''
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
# Determine proper input shape
if K.image_dim_ordering() == 'th':
if include_top:
input_shape = (3, 299, 299)
else:
input_shape = (3, None, None)
else:
if include_top:
input_shape = (299, 299, 3)
else:
input_shape = (None, None, 3)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_dim_ordering() == 'th':
channel_axis = 1
else:
channel_axis = 3
x = conv2d_bn(img_input, 32, 3, 3, subsample=(2, 2), border_mode='valid')
x = conv2d_bn(x, 32, 3, 3, border_mode='valid')
x = conv2d_bn(x, 64, 3, 3)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv2d_bn(x, 80, 1, 1, border_mode='valid')
x = conv2d_bn(x, 192, 3, 3, border_mode='valid')
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
# mixed 0, 1, 2: 35 x 35 x 256
for i in range(3):
branch1x1 = conv2d_bn(x, 64, 1, 1)
branch5x5 = conv2d_bn(x, 48, 1, 1)
branch5x5 = conv2d_bn(branch5x5, 64, 5, 5)
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 32, 1, 1)
x = merge([branch1x1, branch5x5, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(i))
# mixed 3: 17 x 17 x 768
branch3x3 = conv2d_bn(x, 384, 3, 3, subsample=(2, 2), border_mode='valid')
branch3x3dbl = conv2d_bn(x, 64, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3)
branch3x3dbl = conv2d_bn(branch3x3dbl, 96, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed3')
# mixed 4: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 128, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 128, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 128, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 128, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed4')
# mixed 5, 6: 17 x 17 x 768
for i in range(2):
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 160, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 160, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 160, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(5 + i))
# mixed 7: 17 x 17 x 768
branch1x1 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(x, 192, 1, 1)
branch7x7 = conv2d_bn(branch7x7, 192, 1, 7)
branch7x7 = conv2d_bn(branch7x7, 192, 7, 1)
branch7x7dbl = conv2d_bn(x, 160, 1, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 7, 1)
branch7x7dbl = conv2d_bn(branch7x7dbl, 192, 1, 7)
branch_pool = AveragePooling2D((3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch7x7, branch7x7dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed7')
# mixed 8: 8 x 8 x 1280
branch3x3 = conv2d_bn(x, 192, 1, 1)
branch3x3 = conv2d_bn(branch3x3, 320, 3, 3,
subsample=(2, 2), border_mode='valid')
branch7x7x3 = conv2d_bn(x, 192, 1, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 1, 7)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 7, 1)
branch7x7x3 = conv2d_bn(branch7x7x3, 192, 3, 3,
subsample=(2, 2), border_mode='valid')
branch_pool = AveragePooling2D((3, 3), strides=(2, 2))(x)
x = merge([branch3x3, branch7x7x3, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed8')
# mixed 9: 8 x 8 x 2048
for i in range(2):
branch1x1 = conv2d_bn(x, 320, 1, 1)
branch3x3 = conv2d_bn(x, 384, 1, 1)
branch3x3_1 = conv2d_bn(branch3x3, 384, 1, 3)
branch3x3_2 = conv2d_bn(branch3x3, 384, 3, 1)
branch3x3 = merge([branch3x3_1, branch3x3_2],
mode='concat', concat_axis=channel_axis,
name='mixed9_' + str(i))
branch3x3dbl = conv2d_bn(x, 448, 1, 1)
branch3x3dbl = conv2d_bn(branch3x3dbl, 384, 3, 3)
branch3x3dbl_1 = conv2d_bn(branch3x3dbl, 384, 1, 3)
branch3x3dbl_2 = conv2d_bn(branch3x3dbl, 384, 3, 1)
branch3x3dbl = merge([branch3x3dbl_1, branch3x3dbl_2],
mode='concat', concat_axis=channel_axis)
branch_pool = AveragePooling2D(
(3, 3), strides=(1, 1), border_mode='same')(x)
branch_pool = conv2d_bn(branch_pool, 192, 1, 1)
x = merge([branch1x1, branch3x3, branch3x3dbl, branch_pool],
mode='concat', concat_axis=channel_axis,
name='mixed' + str(9 + i))
if include_top:
# Classification block
x = AveragePooling2D((8, 8), strides=(8, 8), name='avg_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(1000, activation='softmax', name='predictions')(x)
# Create model
model = Model(img_input, x)
# load weights
if weights == 'imagenet':
if K.image_dim_ordering() == 'th':
if include_top:
weights_path = get_file('inception_v3_weights_th_dim_ordering_th_kernels.h5',
TH_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='b3baf3070cc4bf476d43a2ea61b0ca5f')
else:
weights_path = get_file('inception_v3_weights_th_dim_ordering_th_kernels_notop.h5',
TH_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='79aaa90ab4372b4593ba3df64e142f05')
model.load_weights(weights_path)
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image dimension ordering convention '
'(`image_dim_ordering="th"`). '
'For best performance, set '
'`image_dim_ordering="tf"` in '
'your Keras config '
'at ~/.keras/keras.json.')
convert_all_kernels_in_model(model)
else:
if include_top:
weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels.h5',
TF_WEIGHTS_PATH,
cache_subdir='models',
md5_hash='fe114b3ff2ea4bf891e9353d1bbfb32f')
else:
weights_path = get_file('inception_v3_weights_tf_dim_ordering_tf_kernels_notop.h5',
TF_WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='2f3609166de1d967d1a481094754f691')
model.load_weights(weights_path)
if K.backend() == 'theano':
convert_all_kernels_in_model(model)
return model
def preprocess_input(x):
x /= 255.
x -= 0.5
x *= 2.
return x
| [
"francois.chollet@gmail.com"
] | francois.chollet@gmail.com |
0b979cd389adf373b4cf58c997b7186c16712406 | 291ede8b17c404991e8140b9e8815c8e2e799163 | /NSC/src/train.py | aa871d3320b2339b8b28a015610fc02105c1b09a | [] | no_license | SleepyBag/NSC_tensorflow | 54d53d0d174b8d3e85ae222c8c0ca7e985363c38 | 3a2b7ff4a9a29d9b49f6510767ba3b0e8d408536 | refs/heads/master | 2020-04-03T03:09:07.906478 | 2018-10-27T15:45:55 | 2018-10-27T15:45:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,527 | py | #-*- coding: utf-8 -*-
#author: Zhen Wu
import os, time, pickle
import datetime
import numpy as np
import tensorflow as tf
from data_helpers import Dataset
import data_helpers
from model import NSC
# Data loading params
tf.flags.DEFINE_integer("n_class", 5, "Numbers of class")
tf.flags.DEFINE_string("dataset", 'yelp13', "The dataset")
# Model Hyperparameters
tf.flags.DEFINE_integer("embedding_dim", 200, "Dimensionality of character embedding")
tf.flags.DEFINE_integer("sen_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer("doc_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer("usr_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer("prd_hidden_size", 100, "hidden_size of rnn")
tf.flags.DEFINE_integer('max_sen_len', 50, 'max number of tokens per sentence')
tf.flags.DEFINE_integer('max_doc_len', 40, 'max number of tokens per sentence')
tf.flags.DEFINE_float("lr", 0.005, "Learning rate")
# Training parameters
tf.flags.DEFINE_integer("batch_size", 100, "Batch Size")
tf.flags.DEFINE_integer("num_epochs", 1000, "Number of training epochs")
tf.flags.DEFINE_integer("evaluate_every", 25, "Evaluate model on dev set after this many steps")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
# Load data
print("Loading data...")
trainset = Dataset('../../data/' + FLAGS.dataset + '/train.ss')
devset = Dataset('../../data/' + FLAGS.dataset + '/dev.ss')
testset = Dataset('../../data/' + FLAGS.dataset + '/test.ss')
alldata = np.concatenate([trainset.t_docs, devset.t_docs, testset.t_docs], axis=0)
embeddingpath = '../../data/' + FLAGS.dataset + '/embedding.txt'
embeddingfile, wordsdict = data_helpers.load_embedding(embeddingpath, alldata, FLAGS.embedding_dim)
del alldata
print("Loading data finished...")
usrdict, prddict = trainset.get_usr_prd_dict()
trainbatches = trainset.batch_iter(usrdict, prddict, wordsdict, FLAGS.n_class, FLAGS.batch_size,
FLAGS.num_epochs, FLAGS.max_sen_len, FLAGS.max_doc_len)
devset.genBatch(usrdict, prddict, wordsdict, FLAGS.batch_size,
FLAGS.max_sen_len, FLAGS.max_doc_len, FLAGS.n_class)
testset.genBatch(usrdict, prddict, wordsdict, FLAGS.batch_size,
FLAGS.max_sen_len, FLAGS.max_doc_len, FLAGS.n_class)
with tf.Graph().as_default():
session_config = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement
)
session_config.gpu_options.allow_growth = True
sess = tf.Session(config=session_config)
with sess.as_default():
nsc = NSC(
max_sen_len = FLAGS.max_sen_len,
max_doc_len = FLAGS.max_doc_len,
cls_cnt = FLAGS.n_class,
emb_file = embeddingfile,
emb_dim = FLAGS.embedding_dim,
sen_hidden_size = FLAGS.sen_hidden_size,
doc_hidden_size = FLAGS.doc_hidden_size,
usr_hidden_size = FLAGS.usr_hidden_size,
prd_hidden_size = FLAGS.prd_hidden_size,
usr_cnt = len(usrdict),
prd_cnt = len(prddict)
)
loss, mse, correct_num, accuracy = nsc.build()
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(FLAGS.lr)
grads_and_vars = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
# Save dict
timestamp = str(int(time.time()))
checkpoint_dir = os.path.abspath("../checkpoints/"+FLAGS.dataset+"/"+timestamp)
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
with open(checkpoint_dir + "/wordsdict.txt", 'wb') as f:
pickle.dump(wordsdict, f)
with open(checkpoint_dir + "/usrdict.txt", 'wb') as f:
pickle.dump(usrdict, f)
with open(checkpoint_dir + "/prddict.txt", 'wb') as f:
pickle.dump(prddict, f)
sess.run(tf.global_variables_initializer())
def train_step(batch, loss, accuracy):
u, p, x, y, sen_len, doc_len = zip(*batch)
feed_dict = {
nsc.usrid: u,
nsc.prdid: p,
nsc.input_x: x,
nsc.input_y: y,
nsc.sen_len: sen_len,
nsc.doc_len: doc_len
}
_, step, loss, accuracy = sess.run(
[train_op, global_step, loss, accuracy],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{0}: step {1}, loss {2}, acc {3}".format(time_str, step, loss, accuracy))
def predict_step(u, p, x, y, sen_len, doc_len, loss, accuracy, name=None):
feed_dict = {
nsc.usrid: u,
nsc.prdid: p,
nsc.input_x: x,
nsc.input_y: y,
nsc.sen_len: sen_len,
nsc.doc_len: doc_len
}
step, loss, accuracy, correct_num, mse = sess.run(
[global_step, loss, accuracy, nsc.correct_num, nsc.mse],
feed_dict)
return correct_num, accuracy, mse
def predict(dataset, loss, accuracy, name=None):
acc = 0
rmse = 0.
for i in xrange(dataset.epoch):
correct_num, _, mse = predict_step(dataset.usr[i], dataset.prd[i], dataset.docs[i],
dataset.label[i], dataset.sen_len[i], dataset.doc_len[i],
loss, accuracy, name)
acc += correct_num
rmse += mse
acc = acc * 1.0 / dataset.data_size
rmse = np.sqrt(rmse / dataset.data_size)
return acc, rmse
topacc = 0.
toprmse = 0.
better_dev_acc = 0.
predict_round = 0
# Training loop. For each batch...
for tr_batch in trainbatches:
train_step(tr_batch, loss, accuracy)
current_step = tf.train.global_step(sess, global_step)
if current_step % FLAGS.evaluate_every == 0:
predict_round += 1
print("\nEvaluation round %d:" % (predict_round))
dev_acc, dev_rmse = predict(devset, loss, accuracy, name="dev")
print("dev_acc: %.4f dev_RMSE: %.4f" % (dev_acc, dev_rmse))
test_acc, test_rmse = predict(testset, loss, accuracy, name="test")
print("test_acc: %.4f test_RMSE: %.4f" % (test_acc, test_rmse))
# print topacc with best dev acc
if dev_acc >= better_dev_acc:
better_dev_acc = dev_acc
topacc = test_acc
toprmse = test_rmse
path = saver.save(sess, checkpoint_prefix, global_step=current_step)
print("Saved model checkpoint to {}\n".format(path))
print("topacc: %.4f RMSE: %.4f" % (topacc, toprmse))
| [
"xueqianming200@gmail.com"
] | xueqianming200@gmail.com |
21ac7595d1c48ec6845defa0d35ade0a65638217 | 38b88b6123634e4d0deb4ffab4bdb8302dbc9e5a | /modules/estatistica-01/distribuicoes/distribuicao_normal-definicao.py | 23b5dae9208438902ac2a2e7b31f3855faa10625 | [] | no_license | Angelicogfa/data-science | 0c11d165b1d061c71812d596c86e4472a240017c | 30f05a3e62edd278a87f81eba952cce99bc9453e | refs/heads/master | 2020-04-21T09:13:38.211419 | 2019-06-28T13:36:47 | 2019-06-28T13:36:47 | 169,441,917 | 0 | 0 | null | 2019-11-02T07:00:19 | 2019-02-06T16:58:56 | Python | UTF-8 | Python | false | false | 1,482 | py | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
itens = [7.57, 6.72, 5.59, 9.56, 4.79, 4.84, 5.87, 10.23, 9.53, 6.99,
9.51, 9.21, 5.78, 6.72, 8.96, 7.32, 7.64, 8.53, 5.9, 7.93,
8.82, 8.45, 7.99, 5.77, 4.76, 4.49, 8.97, 6.60, 8.55, 6.30,
6.54, 5.98, 10.88, 8.92, 7.01, 7.58, 9.47, 6.34, 6.17, 7.46,
8.78, 7.13, 7.71, 8.06, 7.67, 7.05, 9.66, 4.37, 15.08, 9.20,
7.64, 5.89, 11.16, 5.35, 5.75, 8.98, 8.74, 8.20, 8.79, 5.80,
11.7, 5.53, 7.75, 6.54, 9.79, 7.43, 9.14, 5.78, 10.31, 10.12,
9.68, 8.11, 5.54, 10.41, 8.83, 10.00, 5.54, 10.32, 6.92, 7.93,
10.14, 9.66, 10.67, 8.17, 8.86, 8.40, 5.15, 6.98, 8.19, 8.72,
8.76, 8.02, 8.93, 8.54, 3.26, 10.06, 8.18, 2.43, 9.17, 12.00]
print(itens)
print(np.median(itens))
print(np.std(itens, ddof=1))
stats.probplot(itens, plot= plt)
plt.show()
# Funcao distribuição normal
# Z = (x - u) / a
# x = valor a ser obtido
# u = média
# a = desvio padrão
# Z = valor para pesquisa de tabela
# à probabilidade é acomulativa da esquerda para à direita
# Validar se a distribuição é normal:
# à media deve ser o centro de um histograma
# Deve ser simetrico entre os lados de cada eixo do grafico
# Deve encontrar a grande maioria dos dados em no máximo 3 destivos padrões da média
# Pode ser utilizado um driagrama de probabilidade normal
| [
"angelicogfa@gmail.com"
] | angelicogfa@gmail.com |
b5851bb47c31679be956cce35108ea80515cd733 | 910be469257538bcbbd15e894679856a1d311252 | /server/service/kernel/migrations/0043_auto_20170424_2209.py | 10536bd456c1285476597fbe490fe0f21ae0fd3c | [] | no_license | bopo/bankeys2 | ece7e7faa93aab48bf5a336721bfa69b33a870d8 | 5a81f5f4cd6442aade444444ba768b9ffa9dcbd4 | refs/heads/master | 2023-08-19T04:16:12.063961 | 2023-08-04T09:09:00 | 2023-08-04T09:09:00 | 119,646,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-04-24 22:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kernel', '0042_auto_20170417_0213'),
]
operations = [
migrations.AlterField(
model_name='relancement',
name='creation_time',
field=models.DateTimeField(auto_now_add=True, verbose_name='\u7533\u8bf7\u65f6\u95f4'),
),
]
| [
"ibopo@126.com"
] | ibopo@126.com |
1e60f4c33e6b2d6239d2677ec6afe2ff4f9186a6 | 057c525d6fbff928fc0cb0cd6b2930e9494b5d4b | /training-data/py/7-__init__.py | bfd89ded65d2386773e3e370d841ca01d3420cce | [] | no_license | uk-gov-mirror/ukwa.text-id | 0931742d1f2df3091ac52eee6160c177ea98180d | 5f3dcc6436bc46dedb375b37e3fd51c1c0d9b45b | refs/heads/master | 2022-02-26T15:32:15.901527 | 2019-11-19T16:36:06 | 2019-11-19T16:36:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # a ..a a aAaN
a .a a Aa, a_a_a, a_a_a_a, a_a_a_a, aAaN
a .aAaAaAa a *N
a .aAaAaAa a *N
a .a_a a Aa_AaAa_AaN
a .a_a a Aa_AaAaAa_AaN
a .a_a a Aa_AaAaAa_AaN
a .a_a a Aa_AaAaAa_AaN
# a .a_a a Aa_AaAaAa_AaN
a .a_a_ a Aa_AaAa__AaN
a .a_a a Aa_AaAa_AaN
a .a_a a Aa_AaAaAa_AaN
a .a_a a Aa_Aa_AaN
N
a .aAa a aAaAa | [
"Andrew.Jackson@bl.uk"
] | Andrew.Jackson@bl.uk |
bd80320694ed6fa0379f916daa2fb0c7caa8d53d | 7c51b321d97b6e1f2480941cf6ce17e6fc1eef55 | /hungerstation/hungerstation/doctype/vacation_air_ticket/test_vacation_air_ticket.py | d9afd535d85a06afa39c73daed8577fc0c598c60 | [
"MIT"
] | permissive | poweroftrue/hungerstation | 1c53131a98968b92d678cda28f9db45068ae1454 | 8df88ce77cbde553b21f87511c6875d63b2aeb48 | refs/heads/master | 2020-03-12T09:49:22.202964 | 2018-04-16T09:58:15 | 2018-04-16T09:58:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Accurate Systems and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestVacationAirTicket(unittest.TestCase):
pass
| [
"mhbu50@gmail.com"
] | mhbu50@gmail.com |
35436f7d0a4d6539eac725bb92f926434e59aaf0 | 8a82a83655f118208692e55d7804d9fa480ad4b6 | /book/packt/Mastering.Natural.Language.Processing.with.Python/Chapter 1/ch1_10.py | 5f7445a2ea2ad22292f509ee07c1e70e85cceb00 | [] | no_license | xenron/sandbox-da-python | 0814159da9a91923e4b66c5e40057e381f765e96 | ab8f1c0d57fdc6006355f613012b84165068c315 | refs/heads/master | 2020-04-12T05:41:33.182110 | 2016-12-14T22:57:33 | 2016-12-14T22:57:33 | 60,324,979 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | import nltk
from nltk.tokenize import regexp_tokenize
sent="Don't hesitate to ask questions"
print(regexp_tokenize(sent, pattern='\w+|\$[\d\.]+|\S+'))
| [
"xenron@outlook.com"
] | xenron@outlook.com |
2cc9d0b711bdaca74f11120bcc21b5c032da427a | 2218e1da5cb944e4509f8641ca051de137645c5e | /剑指 Offer/54. KthLargest.py | bff16aa6411802e289ae82e16e257f787326e850 | [] | no_license | Hegemony/Python-Practice | 9e76ebb414433e51c2074602fb0a871891647839 | b68ea41688e9e305635c63fdc43402e2b6fe6524 | refs/heads/main | 2023-05-05T14:00:59.921803 | 2021-06-01T15:38:30 | 2021-06-01T15:38:30 | 301,602,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def kthLargest(self, root: TreeNode, k: int) -> int:
def preTraversal(root, nums):
if root == None:
return
nums.append(root.val)
preTraversal(root.left, nums)
preTraversal(root.right, nums)
nums = []
preTraversal(root, nums)
nums.sort(reverse=True)
return nums[k-1] | [
"noreply@github.com"
] | Hegemony.noreply@github.com |
be017a4ba1b77d079419dd99b0595b8acd34030a | fab39aa4d1317bb43bc11ce39a3bb53295ad92da | /examples/tensorflow/common/object_detection/utils/mask_utils.py | 4dde46ca5e8538cd4e262119415ba0ae1c611d1a | [
"Apache-2.0"
] | permissive | dupeljan/nncf | 8cdce27f25f01ce8e611f15e1dc3036fb8548d6e | 0abfd7103ca212888a946ba4d0fbdb9d436fdaff | refs/heads/develop | 2023-06-22T00:10:46.611884 | 2021-07-22T10:32:11 | 2021-07-22T10:32:11 | 388,719,455 | 0 | 0 | Apache-2.0 | 2021-07-23T07:46:15 | 2021-07-23T07:43:43 | null | UTF-8 | Python | false | false | 3,853 | py | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cv2
def paste_instance_masks(masks, detected_boxes, image_height, image_width):
"""Paste instance masks to generate the image segmentation results.
Args:
masks: a numpy array of shape [N, mask_height, mask_width] representing the
instance masks w.r.t. the `detected_boxes`.
detected_boxes: a numpy array of shape [N, 4] representing the reference
bounding boxes.
image_height: an integer representing the height of the image.
image_width: an integer representing the width of the image.
Returns:
segms: a numpy array of shape [N, image_height, image_width] representing
the instance masks *pasted* on the image canvas.
"""
def expand_boxes(boxes, scale):
"""Expands an array of boxes by a given scale."""
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/boxes.py#L227
# The `boxes` in the reference implementation is in [x1, y1, x2, y2] form,
# whereas `boxes` here is in [x1, y1, w, h] form
w_half = boxes[:, 2] * .5
h_half = boxes[:, 3] * .5
x_c = boxes[:, 0] + w_half
y_c = boxes[:, 1] + h_half
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
# Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/core/test.py#L812
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
_, mask_height, mask_width = masks.shape
scale = max((mask_width + 2.0) / mask_width, (mask_height + 2.0) / mask_height)
ref_boxes = expand_boxes(detected_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((mask_height + 2, mask_width + 2), dtype=np.float32)
segms = []
for mask_ind, mask in enumerate(masks):
im_mask = np.zeros((image_height, image_width), dtype=np.uint8)
# Process mask inside bounding boxes.
padded_mask[1:-1, 1:-1] = mask[:, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h)) # pylint: disable=E1101
mask = np.array(mask > 0.5, dtype=np.uint8)
x_0 = min(max(ref_box[0], 0), image_width)
x_1 = min(max(ref_box[2] + 1, 0), image_width)
y_0 = min(max(ref_box[1], 0), image_height)
y_1 = min(max(ref_box[3] + 1, 0), image_height)
im_mask[y_0:y_1, x_0:x_1] = mask[(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])]
segms.append(im_mask)
segms = np.array(segms)
assert masks.shape[0] == segms.shape[0]
return segms
| [
"noreply@github.com"
] | dupeljan.noreply@github.com |
de16a25bb4c0fe0e41345993cb917cb6907c5490 | 09c87fe780df6d1f9eb33799ed516a0bbd7ab1e3 | /Admin/bitly-releases/bitly.py | 5285e9b5e8d0667bd6b843a772839257b5701f7a | [] | no_license | abulka/pynsource | 8ad412b85dc1acaeb83d7d34af8cc033c6baba91 | 979436525c57fdaeaa832e960985e0406e123587 | refs/heads/master | 2023-04-13T12:58:02.911318 | 2023-04-11T09:56:32 | 2023-04-11T09:56:32 | 32,249,425 | 271 | 46 | null | 2022-10-10T04:36:57 | 2015-03-15T07:21:43 | Python | UTF-8 | Python | false | false | 6,024 | py | """
Generate the links for
- DOWNLOADS.md
- Bitly
- Main website html
from parsing the Github release page HTML information
"""
import requests
from bs4 import BeautifulSoup
import bs4
import os
from dataclasses import dataclass # requires 3.7
from typing import List, Set, Dict, Tuple, Optional
import pprint
from beautifultable import BeautifulTable
from textwrap import dedent
releaseUrl = "https://github.com/abulka/pynsource/releases/tag/version-1.77"
response = requests.get(releaseUrl)
assert response.status_code == 200
html_doc = response.text
# with open("junk.html", "w") as fp:
# fp.write(html_doc)
soup = BeautifulSoup(html_doc, "html.parser")
# print(soup)
@dataclass
class DownloadEntity:
# link: bs4.element.Tag
url: str
basename: str
basenameNoExtension: str
bitlyUrl: str
downloads: Dict[str, DownloadEntity] = {}
for link in soup.find_all("a"):
if "/abulka/pynsource/releases/download/" in link.get("href"):
# print(link.get('href'))
url = f"https://github.com{link.get('href')}" # e.g. https://github.com/abulka/pynsource/releases/download/version-1.77/pynsource-1.77-macosx.zip
basename = os.path.basename(url) # e.g. pynsource-1.77-macosx.zip
basenameNoExtension = os.path.splitext(basename)[0] # e.g. pynsource-1.77-macosx
basenameNoExtension = basenameNoExtension.replace('.', '-') # get rid of the illegal '.' chars bitly doesn't like e.g. pynsource-1-77-macosx
bitlyUrl = f"http://bit.ly/{basenameNoExtension}" # e.g. http://bit.ly/pynsource-1-77-macosx
entity = DownloadEntity(
basename=basename,
basenameNoExtension=basenameNoExtension,
url=url,
bitlyUrl=bitlyUrl,
)
if "-macosx" in basename:
downloads["mac"] = entity
elif "-win-" in basename:
downloads["win"] = entity
elif "-ubuntu-18" in basename:
downloads["ubuntu-18"] = entity
elif "-ubuntu-16" in basename:
downloads["ubuntu-16"] = entity
else:
raise RuntimeError(
f"Unknown url on Github releases page {url} - cannot detect OS"
)
# validate that each download url exists OK - requests can't seem to handle it ?
#
# headers = {
# "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36 Edg/84.0.522.52",
# "Referer": "https://github.com/abulka/pynsource/releases/edit/untagged-3ddd799663921fd65d7a",
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
# "Accept-Encoding": "gzip, deflate, br",
# "Accept-Language": "en-AU,en-GB;q=0.9,en;q=0.8,en-US;q=0.7",
# "Cache-Control": "max-age=0",
# "Connection": "keep-alive",
# "Host": "github.com",
# "Sec-Fetch-Dest": "document",
# "Sec-Fetch-Mode": "navigate",
# "Sec-Fetch-Site": "same-origin",
# "Sec-Fetch-User": "?1",
# "Upgrade-Insecure-Requests": "1",
# }
# for downloadEntity in downloads.values():
# r = requests.head(downloadEntity.url, allow_redirects=True, headers=headers)
# print(r.url)
# # try again - doesn't seem to work, still get a 403
# if r.status_code == 403:
# newUrl = r.url # probably to amazon
# print("trying again...")
# r = requests.head(newUrl, allow_redirects=True, headers=headers)
# if r.status_code == 200:
# print(f"Url {downloadEntity.url} exists OK")
# elif r.status_code == 403:
# raise RuntimeError(
# f"Forbidden download url {downloadEntity.url} status {r.status_code}"
# )
# else:
# raise RuntimeError(
# f"Malformed download url {downloadEntity.url} status {r.status_code}"
# )
# print(downloads)
# pprint.pprint(downloads)
# Now that we have gathered up the information, generate the needed outputs
downloadMarkdown = f"""
* [Mac download]({downloads["mac"].bitlyUrl}) (unzip and drag app into the Applications directory)
* [Windows 10 download]({downloads["win"].bitlyUrl}) (unzip and run the installer)
* [Ubuntu Linux 18.0.4 download]({downloads["ubuntu-18"].bitlyUrl}) (unzip and run the executable)
* [Ubuntu Linux 16.0.4 download]({downloads["ubuntu-16"].bitlyUrl}) (unzip and run the executable)
* [Linux snap installer](http://bit.ly/pynsource-snap) (one-click install on any Ubuntu distro)
"""
print("DOWNLOADS.md")
print(downloadMarkdown)
t = BeautifulTable(max_width=760)
t.column_headers = [
"OS",
"download-url",
"customize back half / title",
"final bitly-url",
]
t.column_alignments["download-url"] = BeautifulTable.ALIGN_LEFT
t.column_alignments["final bitly-url"] = BeautifulTable.ALIGN_LEFT
for os, downloadEntity in downloads.items():
t.append_row(
[os, downloadEntity.url, downloadEntity.basenameNoExtension, downloadEntity.bitlyUrl,]
)
print("Bitly Entries to create (click on each link in turn (in vscode terminal) to ensure it exists and triggers a download)")
print(t)
print()
htmlFragmentForWebsite = dedent(f"""
<p>The latest version is <code>1.77</code></p>
<ul>
<li><a href="{downloads["mac"].bitlyUrl}" rel="nofollow">Mac download</a> (unzip and drag app into the Applications directory)</li>
<li><a href="{downloads["win"].bitlyUrl}" rel="nofollow">Windows 10 download</a> (unzip and run the installer)</li>
<li><a href="{downloads["ubuntu-18"].bitlyUrl}" rel="nofollow">Ubuntu Linux 18.0.4 download</a> (unzip and run the executable)</li>
<li><a href="{downloads["ubuntu-16"].bitlyUrl}" rel="nofollow">Ubuntu Linux 16.0.4 download</a> (unzip and run the executable)</li>
<li><a href="http://bit.ly/pynsource-snap" rel="nofollow">Linux snap installer</a> (one-click install on any Ubuntu distro)</li>
</ul>
""")
print("Fragment of HTML to put on official website on downloads page")
print(htmlFragmentForWebsite)
| [
"abulka@gmail.com"
] | abulka@gmail.com |
6dbbb0165a3e7b4a8f5c1900e13b0dda93327c4f | 47ef6997d03f4d5c921c83cc09aef1dfc6828e2c | /zeus/networks/erdb_esr.py | 9f0c7e19ded1f4cd4204891add8cb2e93f462763 | [
"MIT"
] | permissive | huawei-noah/xingtian | 620c9f245183d636e0a65659fd99a984397ecbd4 | e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04 | refs/heads/master | 2023-09-03T01:10:21.768245 | 2022-03-21T03:39:39 | 2022-03-21T03:39:39 | 287,759,621 | 308 | 91 | MIT | 2023-09-12T11:33:22 | 2020-08-15T14:13:06 | Python | UTF-8 | Python | false | false | 14,306 | py | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Efficient residual dense models for super-resolution."""
import math
import logging
from zeus.modules.module import Module
from zeus.modules.operators import ops
from zeus.modules.connections import Sequential
from zeus.common.class_factory import ClassType, ClassFactory
def channel_shuffle(x, groups):
"""Shuffle the channel of features.
:param x: feature maps
:type x: tensor
:param groups: group number of channels
:type groups: int
:return: shuffled feature map
:rtype: tensor
"""
batchsize, num_channels, height, width = ops.get_shape(x)
channels_per_group = num_channels // groups
x = ops.View([batchsize, groups, channels_per_group, height, width])(x)
x = ops.Transpose(1, 2)(x)
x = ops.View([batchsize, num_channels, height, width])(x)
return x
class RDB_Conv(Module):
"""Convolution operation of efficient residual dense block with shuffle and group."""
def __init__(self, inChannels, growRate, sh_groups, conv_groups, kSize=3):
"""Initialize Block.
:param inChannels: channel number of input
:type inChannels: int
:param growRate: growth rate of block
:type growRate: int
:param sh_groups: group number of shuffle operation
:type sh_groups: int
:param conv_groups: group number of convolution operation
:type conv_groups: int
:param kSize: kernel size of convolution operation
:type kSize: int
"""
super(RDB_Conv, self).__init__()
Cin = inChannels
G = growRate
self.shgroup = sh_groups
self.congroup = conv_groups
self.conv = Sequential(
ops.Conv2d(Cin, G, kSize, padding=(kSize - 1) // 2, stride=1,
groups=self.congroup),
ops.Relu()
)
def call(self, x):
"""Forward function.
:param x: input tensor
:type x: tensor
:return: the output of block
:rtype: tensor
"""
if self.data_format == "channels_first":
out = self.conv(channel_shuffle(x, groups=self.shgroup))
else:
x = ops.Permute([0, 3, 1, 2])(x)
out = self.conv(channel_shuffle(x, groups=self.shgroup))
x = ops.Permute([0, 2, 3, 1])(x)
out = ops.Permute([0, 2, 3, 1])(out)
return ops.concat((x, out))
class Group_RDB(Module):
"""Group residual dense block."""
def __init__(self, InChannel, OutChannel, growRate, nConvLayers, kSize=3):
"""Initialize Block.
:param InChannel: channel number of input
:type InChannel: int
:param OutChannel: channel number of output
:type OutChannel: int
:param growRate: growth rate of block
:type growRate: int
:param nConvLayers: the number of convlution layer
:type nConvLayers: int
:param kSize: kernel size of convolution operation
:type kSize: int
"""
super(Group_RDB, self).__init__()
self.InChan = InChannel
self.OutChan = OutChannel
self.G = growRate
self.C = nConvLayers
if self.InChan != self.G:
self.InConv = ops.Conv2d(
self.InChan, self.G, 1, padding=0, stride=1)
if self.OutChan != self.G and self.OutChan != self.InChan:
self.OutConv = ops.Conv2d(
self.InChan, self.OutChan, 1, padding=0, stride=1)
convs = []
for c in range(self.C):
convs.append(RDB_Conv((c + 1) * self.G, self.G, c + 1,
min(4, 2 ** int(math.log(c + 1, 2)))))
self.convs = Sequential(*convs)
self.LFF = ops.Conv2d((self.C + 1) * self.G, self.OutChan, 1, padding=0,
stride=1)
def call(self, x):
"""Forward function.
:param x: input tensor
:type x: tensor
:return: the output of block
:rtype: tensor
"""
if self.InChan != self.G:
x_InC = self.InConv(x)
x_inter = self.LFF(self.convs(x_InC))
else:
x_InC = None
x_inter = self.LFF(self.convs(x))
if self.OutChan == self.InChan:
x_return = x + x_inter
elif self.OutChan == self.G:
x_return = x_InC + x_inter
else:
x_return = self.OutConv(x) + x_inter
return x_return
class Shrink_RDB(Module):
"""Shrink residual dense block."""
def __init__(self, InChannel, OutChannel, growRate, nConvLayers, kSize=3):
"""Initialize Block.
:param InChannel: channel number of input
:type InChannel: int
:param OutChannel: channel number of output
:type OutChannel: int
:param growRate: growth rate of block
:type growRate: int
:param nConvLayers: the number of convlution layer
:type nConvLayers: int
:param kSize: kernel size of convolution operation
:type kSize: int
"""
super(Shrink_RDB, self).__init__()
self.InChan = InChannel
self.OutChan = OutChannel
self.G = growRate
self.C = nConvLayers
if self.InChan != self.G:
self.InConv = ops.Conv2d(
self.InChan, self.G, 1, padding=0, stride=1)
if self.OutChan != self.G and self.OutChan != self.InChan:
self.OutConv = ops.Conv2d(self.InChan, self.OutChan, 1, padding=0,
stride=1)
self.Convs = ops.MoudleList()
self.ShrinkConv = ops.MoudleList()
for i in range(self.C):
self.Convs.append(Sequential(
ops.Conv2d(self.G, self.G, kSize, padding=(kSize - 1) // 2,
stride=1), ops.Relu()))
if i == (self.C - 1):
self.ShrinkConv.append(
ops.Conv2d((2 + i) * self.G, self.OutChan, 1, padding=0,
stride=1))
else:
self.ShrinkConv.append(
ops.Conv2d((2 + i) * self.G, self.G, 1, padding=0, stride=1))
def call(self, x):
"""Forward function.
:param x: input tensor
:type x: tensor
:return: the output of block
:rtype: tensor
"""
if self.InChan != self.G:
x_InC = self.InConv(x)
x_inter = self.Convs[0](x_InC)
x_conc = ops.concat((x_InC, x_inter))
x_in = self.ShrinkConv[0](x_conc)
else:
x_InC = None
x_inter = self.Convs[0](x)
x_conc = ops.concat((x, x_inter))
x_in = self.ShrinkConv[0](x_conc)
for i in range(1, self.C):
x_inter = self.Convs[i](x_in)
x_conc = ops.concat((x_conc, x_inter))
x_in = self.ShrinkConv[i](x_conc)
if self.OutChan == self.InChan:
x_return = x + x_in
elif self.OutChan == self.G:
x_return = x_InC + x_in
else:
x_return = self.OutConv(x) + x_in
return x_return
class Cont_RDB(Module):
"""Contextual residual dense block."""
def __init__(self, InChannel, OutChannel, growRate, nConvLayers, kSize=3):
"""Initialize Block.
:param InChannel: channel number of input
:type InChannel: int
:param OutChannel: channel number of output
:type OutChannel: int
:param growRate: growth rate of block
:type growRate: int
:param nConvLayers: the number of convlution layer
:type nConvLayers: int
:param kSize: kernel size of convolution operation
:type kSize: int
"""
super(Cont_RDB, self).__init__()
self.InChan = InChannel
self.OutChan = OutChannel
self.G = growRate
self.C = nConvLayers
if self.InChan != self.G:
self.InConv = ops.Conv2d(
self.InChan, self.G, 1, padding=0, stride=1)
if self.OutChan != self.G and self.OutChan != self.InChan:
self.OutConv = ops.Conv2d(
self.InChan, self.OutChan, 1, padding=0, stride=1)
self.pool = ops.AvgPool2d(2, 2)
self.shup = ops.PixelShuffle(2)
self.Convs = ops.MoudleList()
self.ShrinkConv = ops.MoudleList()
for i in range(self.C):
self.Convs.append(Sequential(
ops.Conv2d(self.G, self.G, kSize, padding=(kSize - 1) // 2,
stride=1), ops.Relu()))
if i < (self.C - 1):
self.ShrinkConv.append(ops.Conv2d(
(2 + i) * self.G, self.G, 1, padding=0, stride=1))
else:
self.ShrinkConv.append(
ops.Conv2d(int((2 + i) * self.G / 4), self.OutChan, 1,
padding=0, stride=1))
def call(self, x):
"""Forward function.
:param x: input tensor
:type x: tensor
:return: the output of block
:rtype: tensor
"""
if self.InChan != self.G:
x_InC = self.InConv(x)
x_in = self.pool(x_InC)
else:
x_InC = None
x_in = self.pool(x)
x_conc = x_in
for i in range(0, self.C):
x_inter = self.Convs[i](x_in)
x_inter = self.Convs[i](x_inter)
x_inter = self.Convs[i](x_inter)
x_conc = ops.concat((x_conc, x_inter))
if i == (self.C - 1):
x_conc = self.shup(x_conc)
x_in = self.ShrinkConv[i](x_conc)
else:
x_in = self.ShrinkConv[i](x_conc)
if self.OutChan == self.InChan:
x_return = x + x_in
elif self.OutChan == self.G:
x_return = x_InC + x_in
else:
x_return = self.OutConv(x) + x_in
return x_return
class ERDBLayer(Module):
"""Create ERDBLayer Searchspace."""
def __init__(self, arch, G0, kSize):
"""Create ERDBLayer.
:param arch: arch
:type arch: dict
:param G0: G0
:type G0: G0
:param kSize: kSize
:type kSize: int
"""
super(ERDBLayer, self).__init__()
self.SFENet2 = ops.Conv2d(
G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
b_in_chan = G0
b_out_chan = 0
Conc_all = 0
ERDBs = ops.MoudleList()
for i in range(len(arch)):
name = arch[i]
key = name.split('_')
if i > 0:
b_in_chan = b_out_chan
b_conv_num = int(key[1])
b_grow_rat = int(key[2])
b_out_chan = int(key[3])
Conc_all += b_out_chan
if key[0] == 'S':
ERDBs.append(Shrink_RDB(InChannel=b_in_chan,
OutChannel=b_out_chan,
growRate=b_grow_rat,
nConvLayers=b_conv_num))
elif key[0] == 'G':
ERDBs.append(Group_RDB(InChannel=b_in_chan,
OutChannel=b_out_chan,
growRate=b_grow_rat,
nConvLayers=b_conv_num))
elif key[0] == 'C':
ERDBs.append(Cont_RDB(InChannel=b_in_chan,
OutChannel=b_out_chan,
growRate=b_grow_rat,
nConvLayers=b_conv_num))
self.ERBD = ERDBs
self.GFF = Sequential(
ops.Conv2d(Conc_all, G0, 1, padding=0, stride=1),
ops.Conv2d(G0, G0, kSize, padding=(kSize - 1) // 2, stride=1)
)
def call(self, inputs):
"""Calculate the output of the model.
:param x: input tensor
:type x: tensor
:return: output tensor of the model
:rtype: tensor
"""
x = self.SFENet2(inputs)
ERDBs_out = ()
for net in self.ERBD:
x = net(x)
ERDBs_out += (x,)
x = self.GFF(ops.concat(ERDBs_out))
x += inputs
return x
@ClassFactory.register(ClassType.NETWORK)
class ESRN(Module):
"""Efficient super-resolution networks construction."""
def __init__(self, block_type, conv_num, growth_rate, type_prob, conv_prob, growth_prob,
G0, scale, code, architecture):
"""Construct the ESRN class.
:param net_desc: config of the searched structure
:type net_desc: list
"""
super(ESRN, self).__init__()
logging.info("start init ESRN")
self.arch = architecture
self.D = len(self.arch)
r = scale
G0 = G0
kSize = 3
n_colors = 3
self.SFENet1 = ops.Conv2d(
n_colors, G0, kSize, padding=(kSize - 1) // 2, stride=1)
self.ERDBLayer = ERDBLayer(architecture, G0, kSize)
if r == 2 or r == 3:
self.UPNet = Sequential(
ops.Conv2d(G0, G0 * 3, kSize, padding=(kSize - 1) // 2,
stride=1),
ops.PixelShuffle(r),
ops.Conv2d(int(G0 * 3 / 4), n_colors, kSize,
padding=(kSize - 1) // 2, stride=1)
)
elif r == 4:
self.UPNet = Sequential(
ops.Conv2d(G0, G0 * 4, kSize, padding=(kSize - 1) // 2,
stride=1),
ops.PixelShuffle(2),
ops.Conv2d(G0, G0 * 4, kSize, padding=(kSize - 1) // 2,
stride=1),
ops.PixelShuffle(2),
ops.Conv2d(G0, n_colors, kSize, padding=(kSize - 1) // 2,
stride=1)
)
else:
raise ValueError("scale must be 2 or 3 or 4.")
| [
"hustqj@126.com"
] | hustqj@126.com |
5eb658bffb7a8c72a2d6633d288eb1a0ba4c1005 | f538e3974b8d9718a3cd24c1dea77023789c9315 | /DjangoUbuntu/images_env/bin/pip3.4 | f456ff02511f0abd21310a167cd42e107b3e6c74 | [] | no_license | doremonkinhcan87/BlogImage | de1eab86505befb595844ed15168d1eb7d352121 | c25dbe8c0a54c3294d3c8353cc9baf0a748a3707 | refs/heads/master | 2016-08-11T10:18:19.654850 | 2016-01-27T09:07:13 | 2016-01-27T09:07:13 | 49,034,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | 4 | #!/var/www/images_env/bin/python3.4
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dautienthuy@gmail.com"
] | dautienthuy@gmail.com |
e9f8199bbd0443f5ade26424134dfc5c24dfbf03 | 7c843f80a08db6725fd8d2e85099d9e6c13f6426 | /nets/res-unet1/trainInterface.py | b9b17f7178bc520e09135402109634d591211eae | [] | no_license | wanfade/scaffolding_Seg | e983c1d1cdd60efcd7d381728c277993a1cf4721 | 12ba8892eb44d3ce47fa2609973b0510904c4753 | refs/heads/master | 2023-03-16T05:57:28.808341 | 2017-11-25T13:53:11 | 2017-11-25T13:53:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,608 | py | # coding: utf-8
'''
res-unet
全图训练 自动填充黑边 以适应上下采样
Parameters
----------
step : int
填充黑边 将图片shape 调整为step的整数倍
'''
from lib import *
import logging
logging.basicConfig(level=logging.INFO)
npm = lambda m:m.asnumpy()
npm = FunAddMagicMethod(npm)
import mxnet as mx
from netdef import getNet
class SimpleBatch(object):
def __init__(self, data, label, pad=0):
self.data = data
self.label = label
self.pad = pad
from collections import Iterator
class genImg(Iterator):
def __init__(self,names,batch=1,
handleImgGt=None,
timesPerRead=1,
):
self.names = names
self.batch = batch
self.tpr = timesPerRead
self.handleImgGt = handleImgGt
self.genNameBatchs()
def genNameBatchs(self):
import random
self.now = 0
random.shuffle(self.names)
batch = self.batch
nameBatchs = listToBatch(self.names,batch)
more = (batch - len(nameBatchs[-1]))
nameBatchs[-1] += tuple(random.sample(self.names,more))
self.nameBatchs = nameBatchs
self.lenn = len(nameBatchs)
reset = genNameBatchs
def next(self):
now,lenn,names = self.now,self.lenn,self.nameBatchs
if lenn == now:
self.genNameBatchs()
raise StopIteration
self.now += 1
imgs = [];gts = []
for img,gt in names[now]:
imgs.append(imread(img))
gts.append(imread(gt))
if self.handleImgGt:
return self.handleImgGt(imgs,gts)
return (imgs,gts)
labrgb = lambda lab:cv2.cvtColor(lab,cv2.COLOR_LAB2RGB)
randint = lambda x:np.random.randint(-x,x)
def imgToLab(img,gt):
labr=cv2.cvtColor(img,cv2.COLOR_RGB2LAB)#/np.float32(255)
return labr
def imgAug(img,gt,prob=.5):
lab = img
if np.random.random()<prob:
lab = imgToLab(img,gt)
if np.random.random()<prob:
lab=np.fliplr(lab)
gt=np.fliplr(gt)
# show(labrgb(lab),img)
return lab,gt
def imgGtAdd0Fill(step=1):
def innerf(imgs,gts):
img = imgs[0][::c.resize,::c.resize]
h,w = img.shape[:2]
hh = ((h-1)//step+1)*step
ww = ((w-1)//step+1)*step
nimgs,ngts=[],[]
for img,gt in zip(imgs,gts):
gt=gt>.5
img,gt = img[::c.resize,::c.resize],gt[::c.resize,::c.resize]
img,gt = imgAug(img,gt)
img = img/255.
nimg = np.zeros((hh,ww,3))
ngt = np.zeros((hh,ww),np.bool)
h,w = img.shape[:2]
nimg[:h,:w] = img
ngt[:h,:w]=gt
nimgs.append(nimg)
ngts.append(ngt)
imgs,gts=np.array(nimgs),np.array(ngts)
# return imgs,gts
imgs = imgs.transpose(0,3,1,2)
mximgs = map(mx.nd.array,[imgs])
mxgtss = map(mx.nd.array,[gts])
mxdata = SimpleBatch(mximgs,mxgtss)
return mxdata
return innerf
class GenSimgInMxnet(genImg):
@property
def provide_data(self):
return [('data', (c.batch, 3, c.simgShape[0], c.simgShape[1]))]
@property
def provide_label(self):
return [('softmax1_label', (c.batch, c.simgShape[0], c.simgShape[1])),]
def saveNow(name = None):
f=mx.callback.do_checkpoint(name or args.prefix)
f(-1,mod.symbol,*mod.get_params())
c = dicto(
gpu = 1,
lr = 0.01,
epochSize = 10000,
step=64
)
c.resize = 1
if __name__ == '__main__':
from train import args
else:
from configManager import args
c.update(args)
args = c
img = imread(c.names[0][0])
img = img[::c.resize,::c.resize]
h,w = img.shape[:2]
hh = ((h-1)//c.step+1)*c.step
ww = ((w-1)//c.step+1)*c.step
args.simgShape = (hh,ww)
net = getNet(args.classn)
if args.resume:
print('resume training from epoch {}'.format(args.resume))
_, arg_params, aux_params = mx.model.load_checkpoint(
args.prefix, args.resume)
else:
arg_params = None
aux_params = None
if 'plot' in args:
mx.viz.plot_network(net, save_format='pdf', shape={
'data': (1, 3, 640, 640),
'softmax1_label': (1, 640, 640), }).render(args.prefix)
exit(0)
mod = mx.mod.Module(
symbol=net,
context=[mx.gpu(k) for k in range(args.gpu)],
data_names=('data',),
label_names=('softmax1_label',)
)
c.mod = mod
#if 0:
args.names = args.names[:]
# data = GenSimgInMxnet(args.names, args.simgShape,
# handleImgGt=handleImgGt,
# batch=args.batch,
# cache=None,
# iters=args.epochSize
# )
gen = GenSimgInMxnet(args.names,c.batch,handleImgGt=imgGtAdd0Fill(c.step))
g.gen = gen
total_steps = len(c.names) * args.epoch
lr_sch = mx.lr_scheduler.MultiFactorScheduler(
step=[total_steps // 2, total_steps // 4 * 3], factor=0.1)
def train():
mod.fit(
gen,
begin_epoch=args.resume,
arg_params=arg_params,
aux_params=aux_params,
batch_end_callback=mx.callback.Speedometer(args.batch),
epoch_end_callback=mx.callback.do_checkpoint(args.prefix),
optimizer='sgd',
optimizer_params=(('learning_rate', args.lr), ('momentum', 0.9),
('lr_scheduler', lr_sch), ('wd', 0.0005)),
num_epoch=args.epoch)
if __name__ == '__main__':
pass
if 0:
#%%
ne = g.gen.next()
#for ne in dd:
ds,las = ne.data, ne.label
d,la = npm-ds[0],npm-las[0]
im = d.transpose(0,2,3,1)
show(labrgb(uint8(im[0])));show(la)
| [
"ylxx@live.com"
] | ylxx@live.com |
b68b7615a7af8bb6f8aee3839a354f867e3f5bc5 | e26bf05bc4177e15c5f8cb28690882189d332bdf | /transformers_keras/question_answering/readers.py | f1662f73bc73213045db4c7d2e1530ae5abb8529 | [
"Apache-2.0"
] | permissive | OuyKai/transformers-keras | 1e4ed574acafcb807f3073f45e6462025c0139e5 | 58b87d5feb5632e3830c2d3b27873df6ae6be4b3 | refs/heads/master | 2023-09-06T07:50:10.404744 | 2021-11-23T02:34:34 | 2021-11-23T02:34:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | import json
import logging
import os
def read_jsonl_files_for_prediction(
input_files, conetxt_key="context", question_key="question", answers_key="answer", **kwargs
):
if isinstance(input_files, str):
input_files = [input_files]
for f in input_files:
if not os.path.exists(f):
logging.warning("File %d does not exist, skipped.", f)
continue
with open(f, mode="rt", encoding="utf-8") as fin:
for line in fin:
line = line.strip()
if not line:
continue
data = json.loads(line)
answers = data[answers_key]
if isinstance(answers, str):
answers = [answers]
answer = answers[0]
instance = {"context": data[conetxt_key], "question": data[question_key], "answer": answer}
yield instance
| [
"zhouyang.luo@gmail.com"
] | zhouyang.luo@gmail.com |
268edd9811fd6743a2f68e9cdc53f307295bd5df | ff23900a911e099595c392a7efab1d268b4f5f7d | /python_modules/libraries/dagster-census/dagster_census_tests/test_op.py | e552b2e971e3033594f96e914cba86674bacb4b9 | [
"Apache-2.0"
] | permissive | zkan/dagster | bbf2da091bdc7fca028c569db72b9c68ddf55e98 | b2b19edb71fc8985f505b116927350dd23b4a7d9 | refs/heads/master | 2022-08-24T03:20:12.583577 | 2022-08-16T00:01:23 | 2022-08-16T00:01:23 | 244,012,061 | 0 | 0 | Apache-2.0 | 2020-02-29T17:33:24 | 2020-02-29T17:33:24 | null | UTF-8 | Python | false | false | 1,952 | py | import responses
from dagster_census import CensusOutput, census_resource, census_trigger_sync_op
from dagster import job, op
from .utils import (
get_destination_data,
get_source_data,
get_sync_data,
get_sync_run_data,
get_sync_trigger_data,
)
def test_census_trigger_sync_op():
cen_resource = census_resource.configured({"api_key": "foo"})
@op
def foo_op():
pass
@job(
resource_defs={"census": cen_resource},
config={
"ops": {
"census_trigger_sync_op": {
"config": {
"sync_id": 52,
"poll_interval": 0,
"poll_timeout": 10,
}
}
}
},
)
def census_sync_job():
census_trigger_sync_op(start_after=foo_op())
with responses.RequestsMock() as rsps:
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/syncs/52",
json=get_sync_data(),
)
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/sources/15",
json=get_source_data(),
)
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/destinations/15",
json=get_destination_data(),
)
rsps.add(
rsps.POST,
"https://app.getcensus.com/api/v1/syncs/52/trigger",
json=get_sync_trigger_data(),
)
rsps.add(
rsps.GET,
"https://app.getcensus.com/api/v1/sync_runs/94",
json=get_sync_run_data(),
)
result = census_sync_job.execute_in_process()
assert result.output_for_node("census_trigger_sync_op") == CensusOutput(
sync_run=get_sync_run_data()["data"],
source=get_source_data()["data"],
destination=get_destination_data()["data"],
)
| [
"noreply@github.com"
] | zkan.noreply@github.com |
4b935fb5f1d7a8408bd454a00959604aafb39b14 | d58a90a5befc0a594d6cde3ecd3a1233f422db04 | /solutions/transfer_linear.py | b0712f3839df04060d158b94812528f7b00420a8 | [] | no_license | omarun/intro_to_cnns | a0bf11854a51101c69566f03e7baf7602af485c8 | a759ce6349712869f648b82680b60a07caa91d87 | refs/heads/master | 2021-01-20T06:25:07.525332 | 2016-10-21T18:51:35 | 2016-10-21T18:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | classifier = LogisticRegression()
classifier.fit(x_train_flat, y_train)
classifier.score(x_valid_flat, y_valid)
| [
"luiz.gh@gmail.com"
] | luiz.gh@gmail.com |
662affb01df36470968915cb99bf04a3e048044e | 084c3246c44c2e5ae5a0dd38522cb19ac993fe35 | /game_utils.py | 6b73e7fec2552661e18af332d04f300a6c757822 | [] | no_license | archivest/PythonWars-1996 | 5bafaca65764ca0d0999b063a5411c53cdbbb0eb | b2b301233d72334cfd9b4404b32a45ac22f0b248 | refs/heads/master | 2023-02-06T09:53:32.464771 | 2020-12-30T07:37:03 | 2020-12-30T07:37:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,475 | py | # PythonWars copyright © 2020 by Paul Penner. All rights reserved. In order to
# use this codebase you must comply with all licenses.
#
# Original Diku Mud copyright © 1990, 1991 by Sebastian Hammer,
# Michael Seifert, Hans Henrik Stærfeldt, Tom Madsen, and Katja Nyboe.
#
# Merc Diku Mud improvements copyright © 1992, 1993 by Michael
# Chastain, Michael Quan, and Mitchell Tse.
#
# ROM 2.4 is copyright 1993-1998 Russ Taylor. ROM has been brought to
# you by the ROM consortium: Russ Taylor (rtaylor@hypercube.org),
# Gabrielle Taylor (gtaylor@hypercube.org), and Brian Moore (zump@rom.org).
#
# Ported to Python by Davion of MudBytes.net using Miniboa
# (https://code.google.com/p/miniboa/).
#
# In order to use any part of this Merc Diku Mud, you must comply with
# both the original Diku license in 'license.doc' as well the Merc
# license in 'license.txt'. In particular, you may not remove either of
# these copyright notices.
#
# Much time and thought has gone into this software, and you are
# benefiting. We hope that you share your changes too. What goes
# around, comes around.
import collections
import random
import re
import sys
import time
import uuid
import bit
import comm
import instance
import merc
def read_forward(pstr, jump=1):
return pstr[jump:]
def read_letter(pstr):
pstr = pstr.lstrip()
return pstr[1:], pstr[:1]
def str_cmp(astr, bstr, lower: bool = True):
if not astr or not bstr:
return False
if type(astr) != str:
comm.notify("str_cmp: astr:{} must be a type(str), received {}".format(astr, type(astr)), merc.CONSOLE_WARNING)
return False
if type(bstr) == list:
i = 0
while i < len(bstr):
if str_cmp(astr, bstr[i]):
return True
i += 1
return False
if len(astr) != len(bstr):
return False
if lower:
astr = astr.lower()
bstr = bstr.lower()
index = 0
while index < len(astr):
if astr[index] != bstr[index]:
return False
index += 1
return True
def read_word(pstr, to_lower=True):
if not pstr:
return "", ""
pstr = pstr.strip()
start = None
end = None
i = -1
for c in pstr:
i += 1
if c == "'" and start is None:
start = i + 1
quote = pstr.find("'", i + 1)
if quote > -1:
end = quote
else:
end = len(pstr)
return pstr[end + 1:], pstr[start:end]
elif c == '"' and start is None:
start = i + 1
quote = pstr.find('"', i + 1)
if quote > -1:
end = quote
else:
end = len(pstr)
return pstr[end + 1:], pstr[start:end]
elif c.isspace():
if start is not None:
end = i
break
else:
if start is None:
start = i
if not end:
end = len(pstr)
return pstr[end:].strip(), pstr[start:end].lower() if to_lower else pstr[start:end]
# JINNOTE - 11/10/2020 @ 8:41 PM (EST)
# Probably overthinking how to do this. But maybe the function will
# come in handy down the road beyond 'read_word()'; tried to allow it
# have easy expansion.
def list_in_dict(pstr: str = None, orig_dict: dict = None, delimiter="|"):
my_list = [str(s).strip() for s in pstr.split(delimiter)]
my_dict = {k: orig_dict[k] for k in orig_dict.keys() & set(my_list)}
fi_list = []
if not my_dict:
return "0"
for k, v in my_dict.items():
if k not in my_list:
comm.notify("list_in_dict: bad format '{}'".format(k), merc.CONSOLE_WARNING)
fi_list.append("0")
continue
for bitvector in my_list:
if str_cmp(bitvector, k):
fi_list.append(str(v))
return delimiter.join(fi_list)
# JINNOTE - 11/10/2020 @ 8:45 PM (EST)
# Revamped read_int() function; seems to be working well from testing.
# Can probably be refined more but is just a rehash of the stock Pyom version with "better"
# functionality. Supports additional -/+ functionality as well as adds | functionality.
# read_int("1|+1|-1|100|-1|+1000 0 Mathmatically should be: 1 + 1 + -1 + 100 + -1 + 1000 = 1100")
# (' 0 Mathmatically should be: 1 + 1 + -1 + 100 + -1 + 1000 = 1100', 1100)
# read_int("100d20+100")
# ('d20+100', 100)
# read_int("20+100")
# ('+100', 20)
def read_int(pstr):
if not pstr:
return None, None
pstr = pstr.lstrip()
nstr = ""
if pstr.isalpha():
pstr = list_in_dict(pstr, bit.bitvector_table)
if not pstr[0].isdigit() and pstr[0] not in ["-", "+"]:
comm.notify("read_int: bad format ({})".format(pstr), merc.CONSOLE_CRITICAL)
sys.exit(1)
for index, c in enumerate(pstr):
if c.isdigit() or c in ["-", "|"]:
nstr += c
elif c in ["+"]:
if pstr[index - 1] and pstr[index - 1].isdigit():
break
nstr += c
else:
break
pstr = pstr[len(nstr):]
nstr = [int(s) for s in nstr.lstrip().split("|")]
return pstr, sum(nstr)
def read_string(pstr):
if not pstr:
return None, None
end = pstr.find("~")
word = pstr[0:end]
pstr = pstr[end + 1:]
return pstr, word.strip()
# JINPOINT - Becareful when using with bit.Bit() types; if w.isdigit() it will change type(bit.Bit) to type(int).
# Assuming that is why room.room_flags used both room_flags.is_set() and is_set(room_flags) and nobody tried/managed to fix.
# Use the safer bit.read_bits().
def read_flags(pstr):
if not pstr:
return None, None
pstr, w = read_word(pstr, False)
if w in ["0", 0]:
return pstr, 0
if w.isdigit():
return pstr, int(w)
flags = 0
for c in w:
flag = 0
if "A" <= c <= "Z":
flag = merc.BV01
while c != "A":
flag *= 2
c = chr(ord(c) - 1)
elif "a" <= c <= "z":
flag = merc.BV27
while c != "a":
flag *= 2
c = chr(ord(c) - 1)
flags += flag
return pstr, flags
def item_bitvector_flag_str(bits: int, in_type="extra flags"):
if not bits or not in_type:
return None
if bits == 0:
return None
if "wear flags" in in_type:
bit_list = [(merc.ITEM_TAKE, "take"), (merc.ITEM_WEAR_FINGER, "left_finger, right_finger"), (merc.ITEM_WEAR_NECK, "neck_one, neck_two"),
(merc.ITEM_WEAR_BODY, "body"), (merc.ITEM_WEAR_HEAD, "head"), (merc.ITEM_WEAR_LEGS, "legs"), (merc.ITEM_WEAR_FEET, "feet"),
(merc.ITEM_WEAR_HANDS, "hands"), (merc.ITEM_WEAR_ARMS, "arms"), (merc.ITEM_WEAR_SHIELD, "right_hand, left_hand"),
(merc.ITEM_WEAR_ABOUT, "about_body"), (merc.ITEM_WEAR_WAIST, "waist"), (merc.ITEM_WEAR_WRIST, "left_wrist, right_wrist"),
(merc.ITEM_WIELD, "left_hand, right_hand"), (merc.ITEM_HOLD, "left_hand, right_hand"), (merc.ITEM_WEAR_FACE, "face")]
for (aa, bb) in bit_list:
if bits & aa:
return bb
else:
return None
if "extra flags" in in_type:
bit_list = [(merc.ITEM_GLOW, "glow"), (merc.ITEM_HUM, "hum"), (merc.ITEM_THROWN, "thrown"), (merc.ITEM_KEEP, "keep"),
(merc.ITEM_VANISH, "vanish"), (merc.ITEM_INVIS, "invis"), (merc.ITEM_MAGIC, "magic"), (merc.ITEM_NODROP, "no_drop"),
(merc.ITEM_BLESS, "bless"), (merc.ITEM_ANTI_GOOD, "anti_good"), (merc.ITEM_ANTI_EVIL, "anti_evil"),
(merc.ITEM_ANTI_NEUTRAL, "anti_neutral"), (merc.ITEM_NOREMOVE, "no_remove"), (merc.ITEM_INVENTORY, "inventory"),
(merc.ITEM_LOYAL, "loyal"), (merc.ITEM_SHADOWPLANE, "shadowplane")]
for (aa, bb) in bit_list:
if bits & aa:
return bb
else:
return None
if "sitem flags" in in_type:
bit_list = [(merc.SITEM_ACTIVATE, "activate"), (merc.SITEM_TWIST, "twist"), (merc.SITEM_PRESS, "press"), (merc.SITEM_PULL, "pull"),
(merc.SITEM_TARGET, "target"), (merc.SITEM_SPELL, "spell"), (merc.SITEM_TRANSPORTER, "transporter"),
(merc.SITEM_TELEPORTER, "teleporter"), (merc.SITEM_DELAY1, "delay1"), (merc.SITEM_DELAY2, "delay2"),
(merc.SITEM_OBJECT, "object"), (merc.SITEM_MOBILE, "mobile"), (merc.SITEM_ACTION, "action"), (merc.SITEM_MORPH, "morph"),
(merc.SITEM_SILVER, "silver"), (merc.SITEM_WOLFWEAPON, "wolfweapon"), (merc.SITEM_DROWWEAPON, "drowweapon"),
(merc.SITEM_CHAMPWEAPON, "champweapon"), (merc.SITEM_DEMONIC, "demonic"), (merc.SITEM_HIGHLANDER, "highlander")]
for (aa, bb) in bit_list:
if bits & aa:
return bb
else:
return None
def item_flags_from_bits(bits: int, out_data: collections.namedtuple, in_type="wear flags"):
if not out_data or not bits or not in_type:
return None
if bits == 0:
return None
if "wear flags" in in_type:
bit_list = [(merc.ITEM_WEAR_FINGER, ["left_finger", "right_finger"]), (merc.ITEM_WEAR_NECK, ["neck_one", "neck_two"]),
(merc.ITEM_WEAR_BODY, "body"), (merc.ITEM_WEAR_HEAD, "head"), (merc.ITEM_WEAR_LEGS, "legs"), (merc.ITEM_WEAR_FEET, "feet"),
(merc.ITEM_WEAR_HANDS, "hands"), (merc.ITEM_WEAR_ARMS, "arms"), (merc.ITEM_WEAR_SHIELD, ["right_hand", "left_hand"]),
(merc.ITEM_WEAR_ABOUT, "about_body"), (merc.ITEM_WEAR_WAIST, "waist"), (merc.ITEM_WEAR_WRIST, ["left_wrist", "right_wrist"]),
(merc.ITEM_WIELD, ["right_hand", "left_hand"]), (merc.ITEM_HOLD, ["right_hand", "left_hand"]), (merc.ITEM_WEAR_FACE, "face")]
for (aa, bb) in bit_list:
if bits & aa:
if type(bb) == list:
out_data.slots.update({str(s) for s in bb})
else:
out_data.slots.update({bb})
if bits & merc.ITEM_TAKE:
out_data.attributes.update({"take"})
if "extra flags" in in_type:
bit_list = [(merc.ITEM_GLOW, "glow"), (merc.ITEM_HUM, "hum"), (merc.ITEM_THROWN, "thrown"), (merc.ITEM_VANISH, "vanish"),
(merc.ITEM_INVIS, "invis"), (merc.ITEM_MAGIC, "magic"), (merc.ITEM_BLESS, "bless"), (merc.ITEM_INVENTORY, "inventory"),
(merc.ITEM_LOYAL, "loyal"), (merc.ITEM_SHADOWPLANE, "shadowplane")]
for (aa, bb) in bit_list:
if bits & aa:
out_data.attributes.update({bb})
bit_list = [(merc.ITEM_KEEP, "keep"), (merc.ITEM_NODROP, "no_drop"), (merc.ITEM_ANTI_GOOD, "anti_good"), (merc.ITEM_ANTI_EVIL, "anti_evil"),
(merc.ITEM_ANTI_NEUTRAL, "anti_neutral"), (merc.ITEM_NOREMOVE, "no_remove")]
for (aa, bb) in bit_list:
if bits & aa:
out_data.restrictions.update({bb})
if "sitem flags" in in_type:
bit_list = [(merc.SITEM_TRANSPORTER, "transporter"), (merc.SITEM_TELEPORTER, "teleporter"), (merc.SITEM_SILVER, "silver"),
(merc.SITEM_WOLFWEAPON, "wolfweapon"), (merc.SITEM_DROWWEAPON, "drowweapon"), (merc.SITEM_CHAMPWEAPON, "champweapon"),
(merc.SITEM_DEMONIC, "demonic"), (merc.SITEM_HIGHLANDER, "highlander")]
for (aa, bb) in bit_list:
if bits & aa:
out_data.attributes.update({bb})
def find_location(ch, arg):
if arg.isdigit():
vnum = int(arg)
if vnum in instance.room_templates.keys():
if vnum != merc.ROOM_VNUM_IN_OBJECT:
room_instance = instance.instances_by_room[vnum][0]
return instance.rooms[room_instance]
return None
victim = ch.get_char_world(arg)
if victim:
return victim.in_room
item = ch.get_item_world(arg)
if item:
if item.in_room:
return item.in_room
if item.in_living and item.in_living.in_room:
return item.in_living.in_room
if item.in_item and item.in_item.in_room:
return item.in_item.in_room
if item.in_item and item.in_item.in_living and item.in_item.in_living.in_room:
return item.in_item.in_living.in_room
return None
def append_file(ch, fp, pstr):
pstr = "[{:5}] {}: {}".format(ch.in_room.vnum, ch.name, pstr)
with open(fp, "a") as f:
f.write(pstr + "\n")
def read_to_eol(pstr):
locate = pstr.find("\n")
if locate == -1:
locate = len(pstr)
return pstr[locate+1:], pstr[:locate]
_breakup = re.compile(r"(\".*?\"|\'.*?\'|[^\s]+)")
def is_name(arg, name):
if not arg or not name:
return False
arg = arg.lower()
name = name.lower()
words = _breakup.findall(name)
for word in words:
if word[0] in ('"', "'"):
if word[0] == word[-1]:
word = word[1:-1]
else:
word = word[1:]
if word.startswith(arg):
return True
return False
def dice(number, size):
return sum([random.randint(1, int(size)) for _ in range(int(number))])
def number_fuzzy(number):
return number_range(number - 1, number + 1)
# Handles ranges where b > a, prevents error being raised.
def number_range(a, b):
if type(a) != int or type(b) != int:
comm.notify("number_range: ({}, {})".format(type(a), type(b)), merc.CONSOLE_WARNING)
return -1
if b < a:
tmp = b
b = a
a = tmp
return random.randint(a, b)
def number_bits(width):
return number_range(0, 1 << width - 1)
def number_argument(argument):
if not argument:
return 1, ""
if "." not in argument:
return 1, argument
dot = argument.find(".")
number = argument[:dot]
if number.isdigit():
return int(number), argument[dot + 1:]
else:
return 1, argument[dot + 1:]
def number_percent(num_float=False):
if not num_float:
return int(random.randint(1, 100))
else:
return float("{}.{:02}".format(random.randint(1, 100), random.randint(0, 99)))
# Simple linear interpolation.
def interpolate(level, value_00, value_32):
return value_00 + level * (value_32 - value_00) // 32
def mass_replace(pstr, pdict):
for k, v in pdict.items():
if v:
pstr = pstr.replace(k, v)
return pstr
def get_mob_id(npc=True):
if npc:
return "{}".format(time.time())
else:
return str(uuid.uuid4())
# Get an extra description from a list.
def get_extra_descr(name, edd_list):
if not edd_list:
return None
for edd in edd_list:
if is_name(name, edd.keyword):
return edd.description
return None
def to_integer(s: str):
try:
return int(s)
except ValueError:
return int(float(s))
def colorstrip(msg):
buf = []
letter = 0
while letter < len(msg):
if msg[letter] in ["#", "^"]:
letter += 1
if letter not in range(len(msg)):
buf += msg[letter - 1]
elif msg[letter] not in merc.ANSI_STRING1:
buf += msg[letter]
else:
buf += msg[letter]
letter += 1
return "".join(buf)
def str_between(value, a, b):
# Find and validate before-part.
pos_a = value.find(a)
if pos_a == -1:
return ""
# Find and validate after part.
pos_b = value.rfind(b)
if pos_b == -1:
return ""
# Return middle part.
adjusted_pos_a = pos_a + len(a)
if adjusted_pos_a >= pos_b:
return ""
return value[adjusted_pos_a:pos_b]
def str_before(value, a):
# Find first part and return slice before it.
pos_a = value.find(a)
if pos_a == -1:
return ""
return value[0:pos_a]
def str_after(value, a):
# Find and validate first part.
pos_a = value.rfind(a)
if pos_a == -1:
return ""
# Returns chars after the found string.
adjusted_pos_a = pos_a + len(a)
if adjusted_pos_a >= len(value):
return ""
return value[adjusted_pos_a:]
def str_infix(astr, bstr):
if not astr:
return False
c0 = astr[0].lower()
sstr1 = len(astr)
sstr2 = len(bstr)
for ichar in range(1 + (sstr2 - sstr1)):
if c0 == bstr[ichar].lower() and astr.startswith(bstr + ichar):
return True
return False
def str_prefix(astr, bstr, lower=True):
return len(astr) <= len(bstr) and str_cmp(astr, bstr[:len(astr)], lower)
def str_suffix(astr, bstr, lower=True):
return len(astr) <= len(bstr) and str_cmp(astr, bstr[-len(astr):], lower)
def is_in(arg, ip):
if not ip or ip[0] != "|":
return False
lo_arg = arg.lower()
ip = ip[1:].split("*")
fitted = [s for s in ip if s]
for aa in fitted:
if aa.lower() in lo_arg:
return True
return False
def all_in(arg, ip):
if not ip or ip[0] != "&":
return False
lo_arg = arg.lower()
ip = ip[1:].split("*")
fitted = [s for s in ip if s]
for aa in fitted:
if aa.lower() not in lo_arg:
return False
return True
| [
"jindrak@gmail.com"
] | jindrak@gmail.com |
b149337554b2282c3286a0bcf124a42801eccad7 | 682526c4fa74951f5551310d92b19f9948f67b89 | /tapioca_jarbas/tapioca_jarbas.py | 42fda2683f6ec05780471d136c39fce0d2c44ce2 | [
"MIT"
] | permissive | indigos33k3r/tapioca-jarbas | 458d8b0cefc0425c7d94ae25c572d0c931a62671 | e54846a1aa7a2b2bcaa23126f21492f9da475704 | refs/heads/master | 2020-04-13T23:18:13.797237 | 2017-11-01T21:13:01 | 2017-11-01T21:13:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 877 | py | from tapioca import (
TapiocaAdapter, generate_wrapper_from_adapter, JSONAdapterMixin)
from .resource_mapping import RESOURCE_MAPPING
class JarbasClientAdapter(JSONAdapterMixin, TapiocaAdapter):
api_root = 'https://jarbas.serenatadeamor.org/api/'
resource_mapping = RESOURCE_MAPPING
def get_iterator_list(self, response_data):
return response_data.get('results', response_data)
def get_iterator_next_request_kwargs(self, iterator_request_kwargs,
response_data, response):
next_url = response_data.get('next', '')
if not next_url:
return
iterator_request_kwargs['url'] = next_url
iterator_request_kwargs.pop('params', None) # these are sent in the next_url
return iterator_request_kwargs
Jarbas = generate_wrapper_from_adapter(JarbasClientAdapter)
| [
"daniloshiga@gmail.com"
] | daniloshiga@gmail.com |
c5df412987c4bf17583da28903931d117431accc | 279f415dd1e06c594c6c87deda57e201c73c4542 | /test/espnet2/layers/test_mask_along_axis.py | 61f62562a222d988d407a5c997e71dcd8802261d | [
"Apache-2.0"
] | permissive | espnet/espnet | f7ba47271c1a6b1ed606dbbfb04a7f14220bb585 | bcd20948db7846ee523443ef9fd78c7a1248c95e | refs/heads/master | 2023-08-28T23:43:34.238336 | 2023-08-23T02:51:39 | 2023-08-23T02:51:39 | 114,054,873 | 7,242 | 2,244 | Apache-2.0 | 2023-09-14T08:01:11 | 2017-12-13T00:45:11 | Python | UTF-8 | Python | false | false | 1,043 | py | import pytest
import torch
from espnet2.layers.mask_along_axis import MaskAlongAxis
@pytest.mark.parametrize("requires_grad", [False, True])
@pytest.mark.parametrize("replace_with_zero", [False, True])
@pytest.mark.parametrize("dim", ["freq", "time"])
def test_MaskAlongAxis(dim, replace_with_zero, requires_grad):
freq_mask = MaskAlongAxis(
dim=dim,
mask_width_range=30,
num_mask=2,
replace_with_zero=replace_with_zero,
)
x = torch.randn(2, 100, 80, requires_grad=requires_grad)
x_lens = torch.tensor([80, 78])
y, y_lens = freq_mask(x, x_lens)
assert all(l1 == l2 for l1, l2 in zip(x_lens, y_lens))
if requires_grad:
y.sum().backward()
@pytest.mark.parametrize("replace_with_zero", [False, True])
@pytest.mark.parametrize("dim", ["freq", "time"])
def test_MaskAlongAxis_repr(dim, replace_with_zero):
freq_mask = MaskAlongAxis(
dim=dim,
mask_width_range=30,
num_mask=2,
replace_with_zero=replace_with_zero,
)
print(freq_mask)
| [
"naoyuki.kamo829@gmail.com"
] | naoyuki.kamo829@gmail.com |
0c8fc6ce245ed6f32ae7a857ba2561de41e4a544 | 0f0f8b3b027f412930ca1890b0666538358a2807 | /dotop/tools/amount_to_text_en.py | ff67589e6bb8f9d80bfd30c551ac13aba3354988 | [] | no_license | konsoar/dotop_pos_v11 | 741bd5ca944dfd52eb886cab6f4b17b6d646e131 | 576c860917edd25661a72726d0729c769977f39a | refs/heads/master | 2021-09-06T13:25:34.783729 | 2018-02-07T02:11:12 | 2018-02-07T02:11:12 | 111,168,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,146 | py | # -*- coding: utf-8 -*-
# Part of dotop. See LICENSE file for full copyright and licensing details.
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import dotop.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
| [
"Administrator@20nuo003-PC"
] | Administrator@20nuo003-PC |
816d8629ef45304e5ba47462013cad82e344a259 | f5ce05395e4b37ea5d970073f95681d3a880aefd | /setup.py | 27845d5d9659006759224cf1dabf78b80890a412 | [
"MIT"
] | permissive | simondlevy/gym-mygame | 2ef960a8cfd546f3f4abd42e1bcd952840416223 | e04495425117f1cd8ffe2e840f4561d6fdcaf50d | refs/heads/master | 2022-07-13T16:52:39.760990 | 2020-05-12T20:44:41 | 2020-05-12T20:44:41 | 263,425,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #!/usr/bin/env python3
'''
Python distutils setup file for gym-mygame module.
Copyright (C) 2020 Simon D. Levy
MIT License
'''
#from distutils.core import setup
from setuptools import setup
setup (name = 'gym_mygame',
version = '0.1',
install_requires = ['gym', 'numpy'],
description = 'Gym environment for my CSCI 316 game',
packages = ['gym_mygame', 'gym_mygame.envs'],
author='Simon D. Levy',
author_email='simon.d.levy@gmail.com',
url='https://github.com/simondlevy/studenta21/gym-mygame',
license='MIT',
platforms='Linux; Windows; OS X'
)
| [
"simon.d.levy@gmail.com"
] | simon.d.levy@gmail.com |
9bd40a000147a571fe7d40700465c556556526c7 | 4567c7caa29288dda264cb78f6bc7ef2a6eeb756 | /SetDataStructure/MathOpsSet.py | 729dcee00cde6ed866eda38d638315232ea90155 | [] | no_license | JaspinderSingh786/Python3BasicsToAdvance | dc0c676e7efb0749288425dd3922a716b389199d | 00e9cb66bb2e5e35736fe8032e233a9d178cb038 | refs/heads/master | 2022-12-23T11:01:38.626288 | 2019-05-15T06:08:36 | 2019-05-15T06:08:36 | 300,102,348 | 0 | 0 | null | 2020-10-01T01:03:21 | 2020-10-01T01:03:21 | null | UTF-8 | Python | false | false | 672 | py | # union to return all the elements present in both sets
x =set(range(0,10,2))
y = set(range(6,20,2))
print(x|y)
print(x.union(y))
# intersection of x&y will return all the common elements present in both the set
print(x.intersection(y)) # or
print(x&y)
# difference x-y returns the elements present in x but not in y
print(x.difference(y))
print(x-y)
print(y.difference(x))
print(y-x)
# symmetric difference uncommon elements in both
print(x.symmetric_difference(y))
print(x^y)
# Membership Operator in, not in
print(10 in x)
print(10 not in x)
# Set comprehensions
s = {z*z for z in range(1,10)}
print(s)
s = {c**2 for c in range(1,10,2)}
print(s)
| [
"vivekgoswami71@gmail.com"
] | vivekgoswami71@gmail.com |
265cd5ce49260eb4a369231f4af087e09bb9f225 | 4042d12cc6ece8e690331a03fbe7936f2b85cc31 | /assets_app/models/assets_main.py | 4c6b8f4bc101bbfa53fb2e3250cde82403d6106e | [] | no_license | terroristhouse/Odoo13 | 551b65d18a934e7cfb1bcb2a571110ca524d80b8 | be4789c2c38dffe9afc3495c7f17f629cb458c89 | refs/heads/master | 2022-12-01T05:31:30.892018 | 2020-08-17T00:48:45 | 2020-08-17T00:48:45 | 278,875,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,056 | py | from odoo import fields, models, api
class AssetsMain(models.Model):
_name = 'assets.main'
_description = '资产'
_order = 'name'
name = fields.Char('设备编号', required=True) # 设备编号
desc_detail = fields.Text('备注') # 设备备注
number = fields.Integer('数量', required=True) # 资产数量
sequ = fields.Char('序列号') # 资产序列号
local_id = fields.Many2one('assets.site', '地点', required=True) # 所在地点
section_id = fields.Many2one('assets.section', '部门') # 所在部门
user_id = fields.Many2one('assets.user', '使用人') # 使用人
cate_id = fields.Many2one('assets.cate', '类别', required=True) # 资产类别
secret_id = fields.Selection(
[('gongkai', '公开'),
('mimi', '秘密'),
('jimi', '机密'),
('juemi', '绝密')], '密级', required=True
) # 资产密级
priority = fields.Selection(
[('0', 'Low'),
('1', 'Normal'),
('2', 'High')],
'Priority', default='1'
)
kanban_state = fields.Selection(
[('normal', 'In Progress'),
('blocked', 'Blocked'),
('done', 'Ready for next stage')],
'Kanban State', default='normal'
)
type_id = fields.Many2one('assets.type', '型号') # 资产型号
use_ids = fields.One2many('assets.use', 'zichan_id', string='使用记录') # 使用记录
_sql_constraints = [
('unique_course_name',
'unique(name)', '设备编号重复!'),
('unique_course_sequ',
'unique(sequ)', '设备序列号重复!')
]
@api.model
def _default_stage(self):
Stage = self.env['assets.main.stage']
return Stage.search([], limit=1)
@api.model
def _group_expand_stage_id(self, stages, domain, order):
return stages.search([], order=order)
stage_id = fields.Many2one('assets.main.stage', default=_default_stage, group_expand='_group_expand_stage_id')
state_use = fields.Selection(related='stage_id.state')
| [
"867940410@qq.com"
] | 867940410@qq.com |
23f0c67f201967b6850945aa7d07c32191f2f9b8 | 7489448f6279fb4821ad49bc9475a2ddafd2570f | /.venv/lib/python3.8/site-packages/finmarketpy/network_analysis/learn_network_structure.py | 821e5917c4becc640c3353909b9b755ed1ae70a5 | [
"MIT"
] | permissive | webclinic017/VectorBTanalysis | a37df299103e63e350a6fb83caaeb9b3dc0b9542 | bea3deaf2ee3fc114b308146f2af3e4f35f70197 | refs/heads/master | 2023-03-16T02:03:34.288818 | 2020-09-05T22:59:50 | 2020-09-05T22:59:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,358 | py | # Project: finmarketpy project
# Filename: learn_network_structure
# Objective: compute a network graph for a group of asset return time series
# Created: 2019-11-02 12:05
# Version: 0.0
# Author: FS
# importing packages
import numpy as np
from sklearn import cluster, covariance, manifold
def learn_network_structure(ts_returns_data, names, alphas=4, cv=5, mode='cd',
assume_centered = False,
n_components=2, n_neighbors=5,
eigen_solver="dense", method='standard',
neighbors_algorithm="auto",
random_state = None, n_jobs=None,
standardise=False):
"""
Parameters
----------
ts_returns_data : array-like of shape [n_samples, n_instruments]
time series matrix of returns
names : array-like of shape [n_samples, 1]
Individual names of the financial instrument
alphas : int or positive float, optional
Number of points on the grids to be used
cv : int, optional
Number of folds for cross-validation splitting strategy
mode : str, optional
Solver to use to compute the graph
assume_centered : bool, optional
Centre the data if False.
n_components : int
Number of components for the manifold
n_neighbors: int
Number of neighbours to consider for each point
eigen_solver : str
Algorithm to compute eigenvalues
method : str
Algorithm to use for local linear embedding
neighbors_algorithm : str
Algorithm to use for nearest neighbours search
random_state : int, RandomState instance or None, optional
If int, random_state is the seed used by the random number generator.
If RandomState instance, random_state is the random number generator.
If None, the random number generator is the RandomState instance used by np.random.
Used when eigen_solver == ‘arpack’
n_jobs : int or None, optional
number of parallel jobs to run
standardise : bool
standardise data if True
Returns : sklearn.covariance.graph_lasso_.GraphicalLassoCV
sklearn.manifold.locally_linear.LocallyLinearEmbedding
array-like of shape [n_components, n_instruments]
Transformed embedding vectors
array-like of shape [n_instruments, 1]
numeric identifier of each cluster
-------
"""
if not isinstance(ts_returns_data, (np.ndarray, np.generic)):
raise TypeError("ts_returns_data must be of class ndarray")
# learn graphical structure
edge_model = covariance.GraphicalLassoCV(alphas=alphas, cv=cv, mode=mode,
assume_centered=assume_centered)
edge_model.fit(ts_returns_data)
# cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# find low-dimension embedding - useful for 2D plane visualisation
node_position_model = manifold.LocallyLinearEmbedding(
n_components=n_components, eigen_solver=eigen_solver,
n_neighbors=n_neighbors, method=method,
neighbors_algorithm=neighbors_algorithm,
random_state=random_state, n_jobs=n_jobs)
embedding = node_position_model.fit_transform(ts_returns_data.T).T
if standardise:
# standardise returns
standard_ret = ts_returns_data.copy()
standard_ret /= ts_returns_data.std(axis=0)
# learn graph model
edge_model.fit(standard_ret)
# cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# find low-dimension embedding - useful for 2D plane visualisation
node_position_model = manifold.LocallyLinearEmbedding(
n_components=n_components, eigen_solver=eigen_solver,
n_neighbors=n_neighbors, method=method,
neighbors_algorithm=neighbors_algorithm,
random_state=random_state, n_jobs=n_jobs)
embedding = node_position_model.fit_transform(ts_returns_data.T).T
return edge_model, node_position_model, embedding, labels
| [
"eorlowski6@gmail.com"
] | eorlowski6@gmail.com |
9b34fda8067ba60916db6d5830d18b528fb2163a | bf813d2b877fb8ba62feb4263484db3d0f26d5cd | /coma/catalogue_manipulation/move_cat_to_d_coma.py | c4553074f71305e798c3de2117e40e6a93870ec9 | [] | no_license | 9217392354A/astro-scripts | 1e8e8c827097a877518d1f3e10870a5c2609417c | cd7a175bd504b4e291020b551db3077b067bc632 | refs/heads/master | 2021-01-13T00:40:57.481755 | 2016-03-25T17:04:28 | 2016-03-25T17:04:28 | 54,730,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,346 | py | #program to move a catalogue to the distance of coma
# Chris Fuller March 2014
#import modules
import numpy as np
from os.path import join as pj
import atpy as at
from copy import copy, deepcopy
import matplotlib.pyplot as plt
from pylab import bar
import pdb
#pdb.set_trace()
#i/o
print 'reading in cat . . .'
folder = '/Users/chrisfuller/Dropbox/phd/herchel/coma/aux_data'
cat = at.Table(pj(folder, 'fornax_input.fits'),type='fits')
output = 'fornax_at_100mpc-030314.fits'
#key parameters
#coor_names = ['RA (2000)', 'DEC (2000)'] # these are the colum names that containe ra and dec ### Virgo ###
coor_names = ['GRA2000', 'GDEC2000'] # these are the colum names that containe ra and dec ####### Fornax ###
optical_col = 'BTmag_1'
flux_cols = ['F100', 'F160', 'F250', 'F350', 'F500' ]
optical_lim = 14.89 # faintest magnitude that is possible to select at the distance of the coma cluster
x = 0.30 #conversion between deg and mpc
dist_x = 0.0289#scale fluxes
# conversion between degrees to mpc
#coma x = 1.77
#virgo x= 0.25
#fornax x=0.30
#flux scales
#coma = 1.0
#virgo = 0.0196
#fornax = 0.0289
# # # # # # # # # # # # # # # Function # # # # # # # # # # # # # # # # # # # # # # # # # #
#function to produce new cat with column added for the nth nearest neigboure
def nth_nearest_neighbour(t, coor_names):
print 'nth_nearest_neighbour....'
#add columnd for D1,D5, and D10
t.add_empty_column('D1', dtype = np.float)
t.add_empty_column('D5', dtype = np.float)
t.add_empty_column('D10', dtype = np.float)
t.add_empty_column('SIGMA1', dtype = np.float)
t.add_empty_column('SIGMA5', dtype = np.float)
t.add_empty_column('SIGMA10', dtype = np.float)
###### part 2 #######
# find nearest neighbours
#ra1 and dec1
ra_1 = t[coor_names[0]]
dec_1 = t[coor_names[1]]
#loop through all members of catA
for i in range(0, len(t)):
ra = t[coor_names[0]][i]
dec = t[coor_names[1]][i]
#ra2 and dec2
ra_2 = np.array([ra]*len(ra_1), dtype=np.float)
dec_2 = np.array([dec]*len(ra_1), dtype=np.float)
#caculate distance to all sources from ra1 and dec1
radius = np.sort(distance(ra_1, dec_1, ra_2, dec_2 ))
#print radius[1]*1.77*1000.0, np.min(radius)
#add values to table
t['D1'][i] = radius[1] * x
t['D5'][i] = radius[5] * x
t['D10'][i] = radius[10]* x
t['SIGMA1'][i] = np.log10(1.0 / (np.pi*(radius[1]*x)**2.0) )
t['SIGMA5'][i] = np.log10(5.0 / (np.pi*(radius[5]*x)**2.0) )
t['SIGMA10'][i] = np.log10(10.0 / (np.pi*(radius[10]*x)**2.0))
return t
#distance equation designed to do arraywise caculations
def distance(ra1, dec1, ra2, dec2):
delta_ra = (ra1 - ra2) * np.cos(np.radians((dec1+dec2)/2.0))
delta_dec = (dec1 - dec2)
return np.sqrt(delta_ra**2.0 + delta_dec**2.0)
# # # # # # # # # # # # # # # Main Program # # # # # # # # # # # # # # # # # # # # # # # #
#scale fluxes so as to appear at the distance of coma
for i in range(len(flux_cols)):
col = flux_cols[i]
#scale to the distance of coma
cat[col] = cat[col]*dist_x
#if less than 15mjy then set to 0
w = np.where(cat[col] < 0.015)[0]
cat[col][w] = 0.0
#make an optical selection for the cluster
optical = cat.where((cat[optical_col] <= optical_lim) & (np.nan_to_num(cat[optical_col]) != 0.0))
new_cat = nth_nearest_neighbour(optical, coor_names)
new_cat.write(pj(folder, output), overwrite=True)
| [
"chrisfuller@Chriss-MBP.lan"
] | chrisfuller@Chriss-MBP.lan |
27ed23f7457434fd19a3ba7ce1b446ac8006d7d4 | 02f565644b729c496bb4d802dfc6cb3a5db68ff1 | /tests/test_repeated_dna_sequences.py | fbfb36d28c09ab3fc23461b2dc41dd8bf4b564b5 | [] | no_license | saubhik/leetcode | 99a854ad87272eb82b16f22408ee7314ba0db099 | 221f0cb3105e4ccaec40cd1d37b9d7d5e218c731 | refs/heads/master | 2023-04-27T03:11:03.565056 | 2021-05-17T07:55:22 | 2021-05-17T07:55:22 | 275,324,914 | 3 | 1 | null | 2020-10-03T07:06:17 | 2020-06-27T07:48:37 | Python | UTF-8 | Python | false | false | 1,121 | py | import unittest
from repeated_dna_sequences import (
OfficialSolutionApproach2,
OfficialSolutionApproach3,
Solution,
)
class TestRepeatedDNASequences(unittest.TestCase):
def test_example_1(self):
assert Solution().findRepeatedDnaSequences(
s="AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
) == ["AAAAACCCCC", "CCCCCAAAAA"]
assert set(
OfficialSolutionApproach2().findRepeatedDnaSequences(
s="AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
)
) == {"CCCCCAAAAA", "AAAAACCCCC"}
assert set(
OfficialSolutionApproach3().findRepeatedDnaSequences(
s="AAAAACCCCCAAAAACCCCCCAAAAAGGGTTT"
)
) == {"CCCCCAAAAA", "AAAAACCCCC"}
def test_example_2(self):
assert Solution().findRepeatedDnaSequences(s="AAAAAAAAAAAAA") == ["AAAAAAAAAA"]
assert OfficialSolutionApproach2().findRepeatedDnaSequences(
s="AAAAAAAAAAAAA"
) == ["AAAAAAAAAA"]
assert OfficialSolutionApproach3().findRepeatedDnaSequences(
s="AAAAAAAAAAAAA"
) == ["AAAAAAAAAA"]
| [
"saubhik.mukherjee@gmail.com"
] | saubhik.mukherjee@gmail.com |
392b75c54f958a4bebd4f2b76e439193093387d0 | eef243e450cea7e91bac2f71f0bfd45a00c6f12c | /.history/run_20210124182546.py | 96b0a8d0a563b5c91efbe0ad25075a0f449732ac | [] | no_license | hoaf13/nlp-chatbot-lol | 910ab2ea3b62d5219901050271fc1a1340e46a2f | 18cb64efa9d6b4cafe1015f1cd94f4409271ef56 | refs/heads/master | 2023-05-08T04:17:19.450718 | 2021-02-02T02:37:38 | 2021-02-02T02:37:38 | 332,535,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | from app import app
from flask import request
from flask_socketio import SocketIO, send, emit, join_room, leave_room, close_room, rooms, disconnect
socketio = SocketIO(app, cors_allowed_origins='*')
@socketio.on('connected')
def test_connect(data):
send("User {} has connected".format(data), broadcast=True)
@socketio.on('disconnected')
def test_disconnect(data):
send("User {} has disconnected.".format(data), brsoadcast=True)
@socketio.on('client-send-data')
def test_emit(data):
print("data recived: {}".format(data))
send(data, broadcast=True)
@socketio.on('client-send-private-data')
def handle_send_private_data(msg):
response = "response-> " + msg
ans = dict()
ans['client-msg'] = msg
ans['server-msg'] = response
socketio.broadcast.emit("server-send-private-data", "hello private")
if __name__ == '__main__':
app.jinja_env.auto_reload = True
socketio.run(app)
| [
"samartcall@gmail.com"
] | samartcall@gmail.com |
8c6416ed9c7686e7035c91a619c60aa6c6150ff3 | a6b8f33193163de60eb17231a713083da4dea970 | /week_04/mini_projects/webpage_generator/wpgenerator.py | c09f7f4936f39ee4fc06e46531fa266b4da896c5 | [] | no_license | mingyyy/onsite | 4defd8d2e8bad6f2f1c61f756ee9269ec0ba5fe2 | 79c8fa30ca152161abfeef797d6eb357f764dc97 | refs/heads/master | 2022-12-14T18:50:13.514560 | 2019-04-02T11:56:37 | 2019-04-02T11:56:37 | 171,419,253 | 0 | 3 | null | 2022-12-08T01:41:15 | 2019-02-19T06:35:59 | Python | UTF-8 | Python | false | false | 445 | py |
path = "raw/ubud.txt"
with open(path, "r") as f:
file = f.readlines()
# first line is the title
counter = 0
for line in file:
print(line)
if counter == 0:
title = line.strip()
while line == "\n":
para = "".join(line)
break
counter += 1
page = f'''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<>
<title>{title}</title>
</head>
<body>
<h1></h1>
</body>
</html>
''' | [
"j.yanming@gmail.com"
] | j.yanming@gmail.com |
64298fe9b7f9838cf461141a6650770b5125dea2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/intersectingDiscs_20200810184541.py | f8db0d3333e9e5d6410298329455a497ce0a91d5 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | def discs(A):
start = [i-j for i,j in enumerate(A)]
discs([1,5,2,1,4,0]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
a528bab8dad447a472fead2de386caaa12a21e06 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/securityinsight/azure-mgmt-securityinsight/generated_samples/get_entity_queries.py | 8cee24ba13980a452d2c2b900ca52c9cfd64be9e | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,615 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.securityinsight import SecurityInsights
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-securityinsight
# USAGE
python get_entity_queries.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SecurityInsights(
credential=DefaultAzureCredential(),
subscription_id="d0cfe6b2-9ac0-4464-9919-dccaee2e48c0",
)
response = client.entity_queries.list(
resource_group_name="myRg",
workspace_name="myWorkspace",
)
for item in response:
print(item)
# x-ms-original-file: specification/securityinsights/resource-manager/Microsoft.SecurityInsights/preview/2022-12-01-preview/examples/entityQueries/GetEntityQueries.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
4d07737f103ae1cce749e1abaf6560be63c813fc | 8c50265b43add0e91e30245cc7af3c2558c248f5 | /example/rcnn/symnet/metric.py | fa8d7919e919244f30ccfca2fbaf238d92cf322d | [
"BSD-3-Clause",
"BSD-2-Clause-Views",
"Zlib",
"Apache-2.0",
"BSD-2-Clause",
"Intel",
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | awslabs/dynamic-training-with-apache-mxnet-on-aws | 6a67f35d7e4b12fa8bba628bd03b2b031924e211 | 1063a979417fee8c820af73860eebd2a4f670380 | refs/heads/master | 2023-08-15T11:22:36.922245 | 2022-07-06T22:44:39 | 2022-07-06T22:44:39 | 157,440,687 | 60 | 19 | Apache-2.0 | 2022-11-25T22:23:19 | 2018-11-13T20:17:09 | Python | UTF-8 | Python | false | false | 5,187 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
import numpy as np
def get_names():
pred = ['rpn_cls_prob', 'rpn_bbox_loss', 'rcnn_cls_prob', 'rcnn_bbox_loss', 'rcnn_label']
label = ['rpn_label', 'rpn_bbox_target', 'rpn_bbox_weight']
return pred, label
class RPNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNAccMetric, self).__init__('RPNAcc')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# pred (b, c, p) or (b, c, h, w)
pred_label = mx.ndarray.argmax_channel(pred).asnumpy().astype('int32')
pred_label = pred_label.reshape((pred_label.shape[0], -1))
# label (b, p)
label = label.asnumpy().astype('int32')
# filter with keep_inds
keep_inds = np.where(label != -1)
pred_label = pred_label[keep_inds]
label = label[keep_inds]
self.sum_metric += np.sum(pred_label.flat == label.flat)
self.num_inst += len(pred_label.flat)
class RCNNAccMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNAccMetric, self).__init__('RCNNAcc')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
label = preds[self.pred.index('rcnn_label')]
last_dim = pred.shape[-1]
pred_label = pred.asnumpy().reshape(-1, last_dim).argmax(axis=1).astype('int32')
label = label.asnumpy().reshape(-1,).astype('int32')
self.sum_metric += np.sum(pred_label.flat == label.flat)
self.num_inst += len(pred_label.flat)
class RPNLogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNLogLossMetric, self).__init__('RPNLogLoss')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rpn_cls_prob')]
label = labels[self.label.index('rpn_label')]
# label (b, p)
label = label.asnumpy().astype('int32').reshape((-1))
# pred (b, c, p) or (b, c, h, w) --> (b, p, c) --> (b*p, c)
pred = pred.asnumpy().reshape((pred.shape[0], pred.shape[1], -1)).transpose((0, 2, 1))
pred = pred.reshape((label.shape[0], -1))
# filter with keep_inds
keep_inds = np.where(label != -1)[0]
label = label[keep_inds]
cls = pred[keep_inds, label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class RCNNLogLossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNLogLossMetric, self).__init__('RCNNLogLoss')
self.pred, self.label = get_names()
def update(self, labels, preds):
pred = preds[self.pred.index('rcnn_cls_prob')]
label = preds[self.pred.index('rcnn_label')]
last_dim = pred.shape[-1]
pred = pred.asnumpy().reshape(-1, last_dim)
label = label.asnumpy().reshape(-1,).astype('int32')
cls = pred[np.arange(label.shape[0]), label]
cls += 1e-14
cls_loss = -1 * np.log(cls)
cls_loss = np.sum(cls_loss)
self.sum_metric += cls_loss
self.num_inst += label.shape[0]
class RPNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RPNL1LossMetric, self).__init__('RPNL1Loss')
self.pred, self.label = get_names()
def update(self, labels, preds):
bbox_loss = preds[self.pred.index('rpn_bbox_loss')].asnumpy()
bbox_weight = labels[self.label.index('rpn_bbox_weight')].asnumpy()
# calculate num_inst (average on those fg anchors)
num_inst = np.sum(bbox_weight > 0) / 4
self.sum_metric += np.sum(bbox_loss)
self.num_inst += num_inst
class RCNNL1LossMetric(mx.metric.EvalMetric):
def __init__(self):
super(RCNNL1LossMetric, self).__init__('RCNNL1Loss')
self.pred, self.label = get_names()
def update(self, labels, preds):
bbox_loss = preds[self.pred.index('rcnn_bbox_loss')].asnumpy()
label = preds[self.pred.index('rcnn_label')].asnumpy()
# calculate num_inst
keep_inds = np.where(label != 0)[0]
num_inst = len(keep_inds)
self.sum_metric += np.sum(bbox_loss)
self.num_inst += num_inst
| [
"vikumar@88e9fe53272d.ant.amazon.com"
] | vikumar@88e9fe53272d.ant.amazon.com |
b54cad4d67281209ae0454f243bca0b73a8d9bf8 | 3420dd606acc60f921efcc79160d85af92be3740 | /dexp/processing/denoising/_test/test_butterworth.py | f8a65768a97d976601687f947fe3ad4734ff0888 | [
"BSD-3-Clause"
] | permissive | royerlab/dexp | 3e9b67b4084eacf9de8006f75754292f8d7e0fb4 | 8e8399f5d0d8f1e1ae0ddfa6cb6011921929ae0b | refs/heads/master | 2023-05-26T04:03:44.833528 | 2023-04-10T16:06:09 | 2023-04-10T16:06:09 | 196,109,847 | 23 | 6 | BSD-3-Clause | 2023-04-07T21:48:25 | 2019-07-10T01:41:20 | Python | UTF-8 | Python | false | false | 251 | py | from dexp.processing.denoising.demo.demo_2D_butterworth import _demo_butterworth
from dexp.utils.testing.testing import execute_both_backends
@execute_both_backends
def test_butterworth():
assert _demo_butterworth(display=False) >= 0.608 - 0.03
| [
"noreply@github.com"
] | royerlab.noreply@github.com |
ce36258cadd509837e9c4a8b6f7c8c3d43ffad1c | 57fc5d54f5df359c7a53020fb903f36479d3a322 | /controllers/.history/supervisor/supervisor_20201127193541.py | de29736d2a7d8836760d56a0b3c6d96a070d790a | [] | no_license | shenwuyue-xie/webots_testrobots | 929369b127258d85e66c5275c9366ce1a0eb17c7 | 56e476356f3cf666edad6449e2da874bb4fb4da3 | refs/heads/master | 2023-02-02T11:17:36.017289 | 2020-12-20T08:22:59 | 2020-12-20T08:22:59 | 323,032,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,970 | py | import math
import numpy as np
from numpy import random
from numpy.core.fromnumeric import size
from numpy.lib.function_base import meshgrid
import utilities as utils
from deepbots.supervisor.controllers.supervisor_emitter_receiver import \
SupervisorCSV
# # from deepbots.supervisor.wrappers.tensorboard_wrapper import TensorboardLogger
from tensorboardX import SummaryWriter
from models.networks import TD3
from controller import Keyboard
import os
Max_robotnum = 6
OBSERVATION_SPACE = (Max_robotnum-1) * 4 + 7 + 9 * Max_robotnum
ACTION_SPACE = Max_robotnum * 2 + 3
MAX_DSNUM = (Max_robotnum-1) * 4 + 7
DIST_SENSORS_MM = {'min': 0, 'max': 1000}
XPOSITION = {'min':-2, 'max':2}
YPOSITION = {'min':-1.5 , 'max':1.5}
ZPOSITION = {'min': -1, 'max' : 8}
MAX_DISTANCE = {'min':0, 'max':10}
MAX_ANGLE = {'min':-math.pi, 'max':math.pi}
# import ptvsd
# print("waiting for debugger attach")
# ptvsd.enable_attach(address=("127.0.0.1",7788))
# ptvsd.wait_for_attach()
class TaskDecisionSupervisor(SupervisorCSV):
def __init__(self,robot,observation_space,log_dir,v_action,v_observation,v_reward,windows=[10,100,200]):
super(TaskDecisionSupervisor,self).__init__()
self.timestep = int(self.supervisor.getBasicTimeStep())
self.keyboard = Keyboard()
self.keyboard.enable(self.timestep)
self.emitter = self.supervisor.getEmitter('emitter')
self.receiver = self.supervisor.getReceiver('receiver')
self.robot_list = robot
self.robot_handles = []
self.observation = [0 for i in range(observation_space)]
self.findThreshold = 0.2
self.steps = 0
self.steps_threshold = 6000
self.endbattery = [50000 for i in range(Max_robotnum)]
self.final_distance = [50 for i in range(Max_robotnum)]
self.final_target = self.supervisor.getFromDef('final_target')
self.should_done = False
self.startbattery = 50000
self.setuprobots()
self.step_cntr = 0
self.step_global = 0
self.step_reset = 0
self.score = 0
self.score_history = []
self.v_action = v_action
self.v_observation = v_observation
self.v_reward = v_reward
self.windows = windows
self.file_writer = SummaryWriter(log_dir, flush_secs=30)
def setuprobots(self):
for defname in self.robot_list:
self.robot_handles.append(self.supervisor.getFromDef(defname))
def handle_receiver(self):
message = []
for i in range(self.robot_num):
if self.receiver.getQueueLength() > 0:
string_message = self.receiver.getData().decode("utf-8")
string_message = string_message.split(",")
for ms in string_message:
message.append(ms)
self.receiver.nextPacket()
return message
def get_observations(self):
self.ds_values = []
self.final_distance = [50 for i in range(Max_robotnum)]
self.message = [1000 for i in range(MAX_DSNUM)]
self.angles = []
observation = []
message = self.handle_receiver()
self.angles = [0 for i in range(Max_robotnum)]
if len(message) != 0:
for i in range(len(message)):
self.message[i] = float(message[i])
self.ds_values.append(float(message[i]))
for j in range(MAX_DSNUM):
observation.append(utils.normalize_to_range(float(self.message[j]),DIST_SENSORS_MM['min'],DIST_SENSORS_MM['max'], 0, 1))
for k in range(0,self.robot_num):
robot_position = []
robot_position = self.robot_handles[k].getPosition()
robot_rotation = []
robot_rotation = self.robot_handles[k].getOrientation()
observation.append(utils.normalize_to_range(float(robot_position[0]),XPOSITION['min'],XPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_position[1]),YPOSITION['min'],YPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_position[2]),ZPOSITION['min'],ZPOSITION['max'],0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[0]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[1]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[2]),-1,1,0,1))
observation.append(utils.normalize_to_range(float(robot_rotation[3]),-math.pi,math.pi,0,1))
self.final_distance[k] = utils.get_distance_from_target(self.robot_handles[k],self.final_target)
observation.append(utils.normalize_to_range(float(self.final_distance[k]),MAX_DISTANCE['min'],MAX_DISTANCE['max'],0,1))
self.angles[k] = utils.get_angle_from_target(self.robot_handles[k],self.final_target)
observation.append(utils.normalize_to_range(float(self.angles[k]),MAX_ANGLE['min'],MAX_ANGLE['max'],0,1))
for m in range(self.robot_num,Max_robotnum):
for n in range(9):
observation.append(0.5)
else :
observation = [0 for i in range(OBSERVATION_SPACE)]
self.observation = observation
return self.observation
# robot_children = self.robot_handles[k].getField('children')
# frontjoint_node = robot_children.getMFNode(3)
# frontjoint = frontjoint_node.getField('jointParameters')
# frontjoint = frontjoint.getSFNode()
# para = frontjoint.getField('position')
# front_hingeposition = para.getSFFloat()
# observation.append(utils.normalize_to_range(float(front_hingeposition),-math.pi/2,math.pi/2,0,1))
# front_ep = frontjoint_node.getField('endPoint')
# front_ep = front_ep.getSFNode()
# frontrotation_field = front_ep.getField('rotation')
# front_rotation = frontrotation_field.getSFRotation()
# for f in range(3):
# observation.append(utils.normalize_to_range(float(front_rotation[f]),-1,1,0,1))
# observation.append(utils.normalize_to_range(float(front_rotation[3]),-math.pi/2,math.pi/2,0,1))
# robot_children = self.robot_handles[k].getField('children')
# rearjoint_node = robot_children.getMFNode(4)
# rearjoint = rearjoint_node.getField('jointParameters')
# rearjoint = rearjoint.getSFNode()
# para = rearjoint.getField('position')
# rear_hingeposition = para.getSFFloat()
# observation.append(utils.normalize_to_range(float(rear_hingeposition),-math.pi/2,math.pi/2,0,1))
# rear_ep = rearjoint_node.getField('endPoint')
# rear_ep = rear_ep.getSFNode()
# rearrotation_field = rear_ep.getField('rotation')
# rear_rotation = rearrotation_field.getSFRotation()
# for r in range(3):
# observation.append(utils.normalize_to_range(float(rear_rotation[r]),-1,1,0,1))
# observation.append(utils.normalize_to_range(float(rear_rotation[3]),-math.pi/2,math.pi/2,0,1))
# final_position = []
# final_position = self.final_target.getPosition()
# observation.append(utils.normalize_to_range(float(final_position[0]),XPOSITION['min'],XPOSITION['max'],0,1))
# observation.append(utils.normalize_to_range(float(final_position[1]),YPOSITION['min'],YPOSITION['max'],0,1))
# observation.append(utils.normalize_to_range(float(final_position[2]),ZPOSITION['min'],ZPOSITION['max'],0,1))
# final_distance = []
# for d in range(self.robot_num):
# final_distance.append(utils.get_distance_from_target(self.robot_handles[d],self.final_target))
# self.final_distance[d] = final_distance[d]
def get_default_observation(self):
self.observation = [0 for i in range(OBSERVATION_SPACE)]
return self.observation
def empty_queue(self):
self.observation = [0 for i in range(OBSERVATION_SPACE)]
# self.shockcount = 0
self.overrangecount = 0
# self.flagadd = False
# self.flagreduce = False
self.dscount = 0
while self.supervisor.step(self.timestep) != -1:
if self.receiver.getQueueLength() > 0:
self.receiver.nextPacket()
else:
break
def get_reward(self,action):
if (self.observation == [0 for i in range(OBSERVATION_SPACE)] or len(self.observation) == 0 ) :
return 0
reward = 0
translations = []
for i in range(len(self.robot_handles)):
translation = self.robot_handles[i].getField('translation').getSFVec3f()
translations.append(translation)
if self.steps >= self.steps_threshold:
return -20
if np.min(self.ds_values) <= 50:
reward = reward -2
self.dscount = self.dscount + 1
if self.dscount > 60:
reward = reward -20
self.should_done = True
if self.dscount > 30:
reward = reward - 5
if np.min(self.ds_values) <= 150:
reward = reward -1
for j in range(len(self.robot_handles)):
if translations[j][2] <= ZPOSITION['min'] or translations[j][2] >= ZPOSITION['max']:
reward = reward - 2
self.overrangecount = self.overrangecount + 1
if translations[j][0] <= XPOSITION['min'] or translations[j][0] >= ZPOSITION['max']:
reward = reward - 2
self.overrangecount = self.overrangecount + 1
if self.overrangecount >40:
reward = reward -20
self.should_done = True
if min(self.final_distance) < self.findThreshold:
reward = reward + 100
for m in range(Max_robotnum):
consumption = self.startbattery - self.endbattery[m]
reward = reward - float(consumption/self.startbattery) * 6
return reward
else :
reward = reward - float(min(self.final_distance))
return reward
# """惩罚不停+-+-的行为 """
# if action[-1] > 0.9 :
# if self.flagreduce == True:
# self.shockcount = self.shockcount + 1
# self.flagadd = True
# self.flagreduce = False
# if action[-1] < 0.1:
# if self.flagadd == True:
# self.shockcount = self.shockcount + 1
# self.flagadd = False
# self.flagreduce =True
# if action[-1] >=0.1 and action[-1] <=0.9:
# self.shockcount = self.shockcount - 1
# self.flagadd = False
# self.flagreduce = False
# if self.shockcount >= 8:
# reward = reward - 4
# if self.shockcount >= 12:
# reward = reward - 8
# self.should_done = True
# """如果ban的动作值有十个值出现在动作区域,不稳定给负的reward,训练到100代左右时,模块几乎不再动自己的前后motor"""
# count = 0
# for k in range(12,24):
# action[k] = utils.normalize_to_range(float(action[k]),-0.2,1.2,0,1)
# if action[k] > 0.95 or action[k] < 0.05:
# count = count + 1
# if count > 9 :
# reward = reward - 2
"""something worse need to be modified"""
"""加机器人时还需要考虑rearmotor的位置,测试后发现是hingejoint的jointParameters域的position参数,需要找到这个参数"""
"""可以只改变相对应的hingejoint参数使两者结合,也可以改变模块位置和角度,但是改变模块位置和角度比较复杂"""
# position = abs(get...)
# 改变hingejoint,只需要改变front hingejoint的position参数
# 改变模块位置和角度
# deltax和deltaz可以根据position来计算,主要是rotation要更改,绕x轴旋转(1,0,0,rad)
# 但是之前寻找模块的位置时已经修改过自己的rotation,所以不好更改,并且更改了rotation,translation也要更改,用这套体姿表征体系更改起来特别复杂
# 另外,因为是往后加模块,所以除非尾巴上翘,否则都不能这样加(陷到地底下了)
# 况且,即便尾巴上翘,可以直接加到后ban上,可能也会因为重力原因把整个构型掀翻
# 综上所述,无论是可行性,还是稳定性原因,都建议只修改front_hingejoint的position值
def robot_step(self,action):
# x = np.random.rand()
# e = 0.8 + ep * 0.2/10000
# if x > e :
# action[-1] = np.random.rand()
if action[-1] > 0 and action[-1] <= 0.1 and self.robot_num < Max_robotnum:
last_translation = self.robot_handles[-1].getField('translation').getSFVec3f()
last_angle = self.robot_handles[-1].getField('rotation').getSFRotation()[3]
last_rotation = self.robot_handles[-1].getField('rotation').getSFRotation()
delta_z = 0.23 * math.cos(last_angle)
delta_x = 0.23 * math.sin(last_angle)
new_translation = []
new_translation.append(last_translation[0] - delta_x)
new_translation.append(last_translation[1])
new_translation.append(last_translation[2] - delta_z)
robot_children = self.robot_handles[-1].getField('children')
rearjoint_node = robot_children.getMFNode(4)
joint = rearjoint_node.getField('jointParameters')
joint = joint.getSFNode()
para = joint.getField('position')
hingeposition = para.getSFFloat()
if hingeposition > 0.8 or hingeposition < -0.8:
delta = 0.03 - 0.03 * math.cos(hingeposition)
delta_z = delta * math.cos(last_angle)
delta_x = delta * math.sin(last_angle)
new_translation[0] = new_translation[0] + delta_x
new_translation[2] = new_translation[2] + delta_z
new_rotation = []
for i in range(4):
new_rotation.append(last_rotation[i])
flag_translation = False
flag_rotation = False
flag_front = False
flag_frontposition = False
flag_frontrotation = False
battery_remain = float(self.endbattery[self.robot_num])
importname = "robot_" + str(self.robot_num) + '.wbo'
new_file =[]
with open(importname,'r') as f:
lines = f.readlines()
for line in lines:
if "translation" in line:
if flag_translation == False:
replace = "translation " + str(new_translation[0]) + " " + str(new_translation[1]) + " " + str(new_translation[2])
line = "\t" + replace +'\n'
flag_translation = True
if "rotation" in line:
if flag_rotation == False:
replace = "rotation " + str(new_rotation[0]) + " " + str(new_rotation[1]) + " " + str(new_rotation[2]) + " " \
+str(new_rotation[3])
line = "\t" + replace +'\n'
flag_rotation = True
if 'front HingeJoint' in line:
flag_front = True
if 'position' in line:
if flag_front == True and flag_frontposition ==False:
repalce = "position "+ str(hingeposition)
line = "\t\t\t\t" + repalce + '\n'
flag_frontposition = True
if 'rotation' in line :
if flag_front == True and flag_frontrotation == False:
replace = "rotation " + str()
if "50000" in line :
line = "\t\t" + str(battery_remain) + "," + " " + str(50000) + '\n'
new_file.append(line)
with open(importname,'w') as f:
for line in new_file:
f.write(line)
rootNode = self.supervisor.getRoot()
childrenField = rootNode.getField('children')
childrenField.importMFNode(-1,importname)
defname = 'robot_' + str(self.robot_num)
self.robot_handles.append(self.supervisor.getFromDef(defname))
self.robot_num = self.robot_num + 1
# new_translation_field = self.robot_handles[-1].getField('translation')
# new_translation_field.setSFVec3f(new_translation)
# new_rotation_field = self.robot_handles[-1].getField('rotation')
# new_rotation_field.setSFRotation(new_rotation)
# robot_children = self.robot_handles[-1].getField('children')
# frontjoint_node = robot_children.getMFNode(3)
# joint = frontjoint_node.getField('jointParameters')
# joint = joint.getSFNode()
# para = joint.getField('position')
# para.setSFFloat(-hingeposition)
# battery_remain = float(self.endbattery[self.robot_num - 1])
# battery_field = self.robot_handles[-1].getField('battery')
# battery_field.setMFFloat(0,battery_remain)
# battery_field.setMFFloat(1,self.startbattery)
elif action[-1] >= 0.9 and action[-1] < 1 and self.robot_num >1:
battery_field = self.robot_handles[-1].getField('battery')
battery_remain = battery_field.getMFFloat(0)
self.endbattery[self.robot_num - 1] = battery_remain
removerobot = self.robot_handles[-1]
removerobot.remove()
self.robot_num = self.robot_num - 1
del(self.robot_handles[-1])
def step(self,action):
if self.supervisor.step(self.timestep) == -1:
exit()
self.handle_emitter(action)
key = self.keyboard.getKey()
observation = self.get_observations()
reward = self.get_reward(action)
isdone = self.is_done()
info = self.get_info()
if key == Keyboard.CONTROL + ord("A"):
print()
print("Actions: ", action)
if key == ord("R"):
print()
print("Rewards: ", reward)
if key == Keyboard.CONTROL + ord("Y"):
print()
print("Observations: ", observation)
if key == Keyboard.CONTROL + ord("M"):
print()
print("message", self.message)
if (self.v_action > 1):
self.file_writer.add_histogram(
"Actions/Per Global Step",
action,
global_step=self.step_global)
if (self.v_observation > 1):
self.file_writer.add_histogram(
"Observations/Per Global Step",
observation,
global_step=self.step_global)
if (self.v_reward > 1):
self.file_writer.add_scalar("Rewards/Per Global Step", reward,
self.step_global)
if (isdone):
self.file_writer.add_scalar(
"Is Done/Per Reset step",
self.step_cntr,
global_step=self.step_reset)
self.file_writer.flush()
self.score += reward
self.step_cntr += 1
self.step_global += 1
return observation,reward,isdone,info
def is_done(self):
self.steps = self.steps + 1
self.file_writer.flush()
if min(self.final_distance) <= self.findThreshold:
print("======== + Solved + ========")
return True
if self.steps >= self.steps_threshold or self.should_done:
return True
# rotation_field = self.robot_handles[0].getField('rotation').getSFRotation()
# """需要计算出模块完全侧边倒的rotation是多少,遇到这种情况直接进行下一次迭代"""
# # if rotation_field[0] < -0.4 and rotation_field[1] > 0.4 and rotation_field[2] > 0.4 and rotation_field[3] < -1.5708:
# # return True
return False
def reset(self):
print("Reset simulation")
self.respawnRobot()
self.steps = 0
self.should_done = False
self.robot_num = 1
"""observation 源代码wrapper有问题"""
self.score_history.append(self.score)
if (self.v_reward > 0):
self.file_writer.add_scalar(
"Score/Per Reset", self.score, global_step=self.step_reset)
for window in self.windows:
if self.step_reset > window:
self.file_writer.add_scalar(
"Score/With Window {}".format(window),
np.average(self.score_history[-window:]),
global_step=self.step_reset - window)
self.file_writer.flush()
self.step_reset += 1
self.step_cntr = 0
self.score = 0
return self.get_default_observation()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
self._file_writer.close()
def get_info(self):
pass
def respawnRobot(self):
for robot in self.robot_handles:
robot.remove()
rootNode = self.supervisor.getRoot()
childrenField = rootNode.getField('children')
childrenField.importMFNode(-1,"robot_0.wbo")
# childrenField.importMFNode(-1,"robot_1.wbo")
# childrenField.importMFNode(-1,"robot_2.wbo")
# childrenField.importMFNode(-1,"robot_3.wbo")
# childrenField.importMFNode(-1,"robot_4.wbo")
# childrenField.importMFNode(-1,"robot_5.wbo")
self.robot_handles = []
for defrobotname in self.robot_list:
self.robot_handles.append(self.supervisor.getFromDef(defrobotname))
self.final_target = self.supervisor.getFromDef('final_target')
self.supervisor.simulationResetPhysics()
self._last_message = None
robot_defnames = ['robot_0']
supervisor_env = TaskDecisionSupervisor(robot_defnames, observation_space=OBSERVATION_SPACE,log_dir="logs/results/ddpg", v_action=1,v_observation=1,v_reward=1,windows=[10,\
10000, 2000])
agent = TD3(lr_actor=0.00025,
lr_critic=0.0025,
input_dims= OBSERVATION_SPACE,
gamma=0.99,
tau=0.001,
env=supervisor_env,
batch_size=512,
layer1_size=400,
layer2_size=300,
layer3_size=200,
layer4_size=400,
layer5_size=300,
layer6_size=200,
n_actions=ACTION_SPACE,
load_models=False,
save_dir='./models/saved/ddpg/')
score_history = []
np.random.seed(0)
for i in range(1, 20000):
done = False
score = 0
obs = list(map(float, supervisor_env.reset()))
supervisor_env.empty_queue()
first_iter = True
if i % 10000 == 0:
print("================= TESTING =================")
while not done:
act = agent.choose_action_test(obs).tolist()
supervisor_env.robot_step(act)
new_state, _, done, _ = supervisor_env.step(act)
obs = list(map(float, new_state))
else:
print("================= TRAINING =================")
while not done:
if (not first_iter):
act = agent.choose_action_train(obs).tolist()
else:
first_iter = False
act = [0,0]
for k in range(0,13):
act.append(0.5)
supervisor_env.robot_step(act)
new_state, reward, done, info = supervisor_env.step(act)
agent.remember(obs, act, reward, new_state, int(done))
agent.learn()
score += reward
obs = list(map(float, new_state))
score_history.append(score)
print("===== Episode", i, "score %.2f" % score,
"100 game average %.2f" % np.mean(score_history[-100:]))
if i % 100 == 0:
agent.save_models() | [
"1092673859@qq.com"
] | 1092673859@qq.com |
94f60f929cf72989003431c51a7ae1b30e26b12a | bb983b38f9be7b6fd4ab1a651484db37c1aeff39 | /1019/python_list_index.py | d54b78a345b11bda2588b2b6d799910da221d2b2 | [] | no_license | nakanishi-akitaka/python2018_backup | c214df78372cca993d69f8001010ec2f6dcaf1be | 45766d3c3777de2a91b3e2cf50c6bfedca8627da | refs/heads/master | 2023-02-18T08:04:28.625532 | 2022-06-07T01:02:53 | 2022-06-07T01:02:53 | 201,399,236 | 5 | 30 | null | 2023-02-10T21:06:51 | 2019-08-09T05:48:22 | Jupyter Notebook | UTF-8 | Python | false | false | 1,127 | py | # -*- coding: utf-8 -*-
"""
https://note.nkmk.me/python-list-index/
Created on Fri Oct 19 12:40:31 2018
@author: Akitaka
"""
l = list('abcde')
print(l)
print(l.index('a'))
print(l.index('c'))
def my_index(l, x, default=False):
if x in l:
return l.index(x)
else:
return default
print(my_index(l, 'd'))
print(my_index(l, 'x'))
print(my_index(l, 'x', -1))
#%%
l_dup = list('abcba')
print(l_dup)
print(l_dup.index('a'))
print(l_dup.index('b'))
#%%
print([i for i, x in enumerate(l_dup) if x == 'a'])
print([i for i, x in enumerate(l_dup) if x == 'b'])
print([i for i, x in enumerate(l_dup) if x == 'c'])
print([i for i, x in enumerate(l_dup) if x == 'x'])
#%%
def my_index_multi(l, x):
return [i for i, _x in enumerate(l) if _x == x]
print(my_index_multi(l_dup, 'a'))
print(my_index_multi(l_dup, 'c'))
print(my_index_multi(l_dup, 'x'))
#%%
t = tuple('abcde')
print(t)
print(t.index('a'))
print(my_index(t, 'c'))
print(my_index(t, 'x'))
t_dup = tuple('abcba')
print(t_dup)
print(my_index_multi(t_dup, 'a'))
| [
"noreply@github.com"
] | nakanishi-akitaka.noreply@github.com |
ec813cec9fde2a104a1cdad75cf78ecc5a255913 | 65b4522c04c2be071c2d42095956fe950fe1cebe | /inversions/inversion_one_chanel/run1/analysis/pred_disp_large_scale/plots/Raslip_vel/plot_displacement_contours.py | f75d0bde319f5b19287101cdc1313d0c9cf23b29 | [] | no_license | geodesy/viscojapan | ac0cd93f7a2134cd2651623b94879dcc21c0c46a | 03e70265b56eb5994e73bcb6066f0be338e42f27 | refs/heads/master | 2021-03-03T18:19:07.779601 | 2015-07-16T03:50:49 | 2015-07-16T03:50:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 964 | py | import numpy as np
import viscojapan as vj
from epochs import epochs
def load_lons_lats():
tp = np.loadtxt('../stations_large_scale.in', '4a,f,f')
lons = [ii[1] for ii in tp]
lats = [ii[2] for ii in tp]
return lons, lats
lons, lats = load_lons_lats()
reader = vj.inv.DeformPartitionResultReader(
'../../deformation_partition_large_scale.h5')
Ecumu = reader.Ecumu
contours = [0.05, 0.1, 0.5, 1, 2]
cmpt = 'Raslip'
obj = getattr(reader, cmpt)
for epoch in epochs:
print(cmpt, epoch)
if epoch == 0:
continue
mags = obj.get_velocity_hor_mag_at_epoch(epoch)
mags = mags*100*365 # m/day => cm/yr
plt = vj.displacement.plot.MagnitudeContoursPlotter()
plt.plot(lons, lats, mags,
'plots/%s_day%04d.png'%(cmpt,epoch),
contours = contours,
if_topo = False,
unit_label = 'cm/yr',
title = "Rate Raslip year %.3f"%(epoch/365)
)
| [
"zy31415@gmail.com"
] | zy31415@gmail.com |
c9c98e197cfaa40df88820f453e394610790ef19 | 3d62466a21dd4f9cce27544eb0318025949e2385 | /samples/WebApplication/Session.py | 4e487d2d1d9d729d2aa048e5fe7fb4606a779dad | [
"BSD-3-Clause"
] | permissive | zlorb/PyModel | eb6cd24e96429bdd57c3ed2a451d0f4f4073e353 | 502aa0a3708f549ecd803008ab6a2d63a59a2cd3 | refs/heads/master | 2023-08-09T15:32:53.183114 | 2022-02-23T00:13:02 | 2022-02-23T00:13:02 | 50,697,490 | 15 | 8 | NOASSERTION | 2023-07-25T18:13:49 | 2016-01-29T23:07:34 | Python | UTF-8 | Python | false | false | 2,381 | py | """
Experiment with code for WebApplication stepper
"""
import re
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import http.cookiejar
# Scrape page contents
def loginFailed(page):
return (page.find('Incorrect login') > -1)
intPattern = re.compile(r'Number: (\d+)')
def intContents(page):
m = intPattern.search(page)
if m:
return int(m.group(1))
else:
return None
def main():
# Configure. Web application in this sample requires cookies, redirect
cookies = http.cookiejar.CookieJar()
cookie_handler = urllib.request.HTTPCookieProcessor(cookies)
redirect_handler= urllib.request.HTTPRedirectHandler()
debug_handler = urllib.request.HTTPHandler(debuglevel=1) # print headers on console
opener = urllib.request.build_opener(cookie_handler,redirect_handler,debug_handler)
# Constants
site = 'http://localhost/'
path = 'nmodel/webapplication/php/'
webAppPage = 'doStuff.php' # Shouldn't this be called webAppPage, ...Url -?
logoutPage = 'logout.php'
webAppUrl = site + path + webAppPage
logoutUrl = site + path + logoutPage
print('GET to show login page')
print(opener.open(webAppUrl).read())
print('POST to login with sample username and password, pass separate args for POST')
args = urllib.parse.urlencode({'username':'user1', 'password':'123'})
page = opener.open(webAppUrl, args).read() # should show successful login
print(page)
if loginFailed(page):
print('Login FAILED')
print('GET with arg in URL to UpdateInt on server')
num = 99
wrongNum = 'xx'
numArg = urllib.parse.urlencode({'num':num})
print(opener.open("%s?%s" % (webAppUrl,numArg)).read())
print('GET to retrieve page with integer')
page = opener.open(webAppUrl).read()
print(page)
print('%s found in page, expected %s' % (intContents(page), num))
print()
print('GET to logout')
print(opener.open(logoutUrl).read())
print('GET to show login page -- again')
print(opener.open(webAppUrl).read())
print('POST to login with username and WRONG password')
args = urllib.parse.urlencode({'username':'user1', 'password':'321'}) # wrong pass
page = opener.open(webAppUrl, args).read() # should show login fail
print(page)
if loginFailed(page):
print('Login FAILED')
# No logout this time - we're not logged in
if __name__ == '__main__':
main()
| [
"raliclo@gmail.com"
] | raliclo@gmail.com |
b5ba9954411cf7814a07f2dd21891c315d9499f5 | f8d9f893a7afa667a9b615742019cd5c52ee2c59 | /scripts/linters/general_purpose_linter.py | f673a60f7274a7cfafd20f0b08a9c099ea38939d | [
"Apache-2.0"
] | permissive | FareesHussain/oppia | 2ac6c48aaea6a70452b79d665995f6ba6560f70d | 2862b7da750ce332c975b64237791f96189d7aa8 | refs/heads/develop | 2023-08-17T19:25:05.551048 | 2021-10-01T10:36:36 | 2021-10-01T10:36:36 | 323,160,532 | 2 | 0 | Apache-2.0 | 2020-12-20T20:38:45 | 2020-12-20T20:38:44 | null | UTF-8 | Python | false | false | 23,416 | py | # coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lint checks used by all the linters."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import re
from core import python_utils
from . import js_ts_linter
from . import warranted_angular_security_bypasses
from .. import build
from .. import common
from .. import concurrent_task_utils
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif', '*.png',
'*.webp', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/domain/proto/*.py', 'core/tests/data/*',
'core/tests/build_sources/*', '*.mp3', '*.mp4', 'node_modules/*',
'typings/*', 'local_compiled_js/*', 'webpack_bundles/*',
'core/tests/services_sources/*', 'core/tests/release_sources/tmp_unzip.zip',
'scripts/linters/test_files/*', 'proto_files/*',
'core/tests/release_sources/tmp_unzip.tar.gz',
'core/templates/combined-tests.spec.ts',
'core/templates/css/oppia-material.css',
'core/templates/google-analytics.initializer.ts',
'extensions/classifiers/proto/*',
'%s/*' % js_ts_linter.COMPILED_TYPESCRIPT_TMP_PATH)
GENERATED_FILE_PATHS = (
'core/templates/expressions/parser.js',)
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_STRINGS_CONSTANTS = {
'"DEV_MODE": false': {
'message': 'Please set the DEV_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
},
'"EMULATOR_MODE": false': {
'message': 'Please set the EMULATOR_MODE variable in constants.ts '
'to true before committing.',
'excluded_files': ()
}
}
BAD_PATTERNS = {
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^A-Z]+[^\w]*$'),
'message': 'Please assign TODO comments to a user '
'in the format TODO(username): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import unicode_literals'),
'message': 'Please ensure this file should contain unicode_literals '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object ' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\s+style\s*=\s*'),
'message': 'Please do not use inline styling.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'__author__'),
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'ndb\.'),
'message': (
'Please use datastore_services instead of ndb, for example:\n'
'\n'
'datastore_services = models.Registry.import_datastore_services()\n'
'\n'
'class SampleModel(datastore_services.Model):\n'
' ...\n'),
'excluded_files': (),
'excluded_dirs': ('core/platform',),
},
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*quote\('),
'message': 'Please use python_utils.url_quote().',
'excluded_files': ('core/python_utils.py', 'core/python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*unquote_plus\('),
'message': 'Please use python_utils.url_unquote_plus().',
'excluded_files': ('core/python_utils.py', 'core/python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlencode\('),
'message': 'Please use python_utils.url_encode().',
'excluded_files': ('core/python_utils.py', 'core/python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlretrieve\('),
'message': 'Please use python_utils.url_retrieve().',
'excluded_files': ('core/python_utils.py', 'core/python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*urlopen\('),
'message': 'Please use python_utils.url_open().',
'excluded_files': ('core/python_utils.py', 'core/python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*Request\('),
'message': 'Please use python_utils.url_request().',
'excluded_files': ('core/python_utils.py', 'core/python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'object\):'),
'message': 'Please use python_utils.OBJECT.',
'excluded_files': (),
'excluded_dirs': ()
},
]
BAD_PATTERNS_MAP = {
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
def is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool. Whether to exclude the given file from this
particular pattern check.
"""
return (any(
filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : Object containing details for
the pattern to be checked. Pattern to match:
message: str. Message to show if pattern matches.
excluded_files: tuple(str). Files to be excluded from matching.
excluded_dirs: tuple(str). Directories to be excluded from
matching).
Returns:
tuple(bool, list(str)). A 2-tuple whose first element is a bool
which set to True if there is bad pattern found else False, whose second
element is a list of failed messages.
"""
error_messages = []
failed = False
regexp = pattern['regexp']
if not (any(
filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or any(
filepath.endswith(excluded_file)
for excluded_file in pattern['excluded_files'])):
bad_pattern_count = 0
for line_num, line in enumerate(file_content, 1):
if line.endswith('\n'):
stripped_line = line[:-1]
else:
stripped_line = line
if stripped_line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(stripped_line):
error_message = ('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
error_messages.append(error_message)
bad_pattern_count += 1
if bad_pattern_count:
failed = True
return failed, error_messages
return failed, error_messages
def check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
error_messages = []
failed = False
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
total_error_count = 0
if pattern:
for regexp in pattern:
failed, error_message = check_bad_pattern_in_file(
filepath, content, regexp)
error_messages.extend(error_message)
if failed:
total_error_count += 1
if total_error_count:
failed = True
return failed, total_error_count, error_messages
class GeneralPurposeLinter(python_utils.OBJECT):
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
"""
def __init__(self, files_to_lint, file_cache):
"""Constructs a GeneralPurposeLinter object.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
self.files_to_lint = files_to_lint
self.file_cache = file_cache
@property
def all_filepaths(self):
"""Returns all file paths."""
return self.files_to_lint
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
error_messages = []
file_content = self.file_cache.readlines(filepath)
for index, regexp_to_check in enumerate(
pattern_list):
if (any(filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])) and (
not any(
filepath.endswith(
pattern) for pattern in (
regexp_to_check['excluded_files'] +
regexp_to_check['excluded_dirs'])))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
error_message = ('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
error_messages.append(error_message)
return failed, error_messages
def check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
name = 'Mandatory pattern'
error_messages = []
failed = False
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed, mandatory_error_messages = (
self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed))
error_messages.extend(mandatory_error_messages)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
name = 'Bad pattern'
total_files_checked = 0
total_error_count = 0
error_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('general_purpose_linter.py') or (
filepath.endswith('general_purpose_linter_test.py')))]
failed = False
for filepath in all_filepaths:
file_content = self.file_cache.readlines(filepath)
total_files_checked += 1
for pattern, error in BAD_PATTERNS.items():
if is_filepath_excluded_for_bad_patterns_check(
pattern, filepath):
continue
for line_num, line in enumerate(file_content):
if pattern in line:
failed = True
error_message = ('%s --> Line %s: %s' % (
filepath, line_num + 1,
error['message']))
error_messages.append(error_message)
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
bad_pattern_check_failed, bad_pattern_error_messages = (
check_bad_pattern_in_file(
filepath, file_content, regexp))
if bad_pattern_check_failed:
error_messages.extend(bad_pattern_error_messages)
total_error_count += 1
(
file_type_specific_bad_pattern_failed,
temp_count, bad_pattern_error_messages) = (
check_file_type_specific_bad_pattern(
filepath, file_content))
failed = (
failed or file_type_specific_bad_pattern_failed or
bad_pattern_check_failed)
total_error_count += temp_count
error_messages.extend(bad_pattern_error_messages)
if filepath == 'constants.ts':
for pattern, constants in BAD_STRINGS_CONSTANTS.items():
for line in file_content:
if pattern in line:
failed = True
error_message = ('%s --> %s' % (
filepath,
constants['message']))
error_messages.append(error_message)
total_error_count += 1
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_newline_at_eof(self):
"""This function is used to detect newline at the end of file."""
name = 'Newline at EOF'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
file_content = self.file_cache.readlines(filepath)
file_length = len(file_content)
if (
file_length >= 1 and
not re.search(r'[^\n]\n', file_content[-1])):
error_message = (
'%s --> There should be a single newline at the '
'end of file.' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_disallowed_flags(self):
"""This function is used to disallow flags."""
name = 'Disallow flags'
disallow_flag = (
'eslint-disable-next-line oppia/no-bypass-security-phrase')
error_messages = []
files_to_lint = self.all_filepaths
failed = False
excluded_files = (
warranted_angular_security_bypasses
.EXCLUDED_BYPASS_SECURITY_TRUST_FILES)
allowed_files = ''
for filepath in files_to_lint:
for excluded_file in excluded_files:
if excluded_file in filepath:
allowed_files = filepath
if not filepath.endswith('.ts') or filepath == allowed_files:
continue
file_content = self.file_cache.read(filepath)
if disallow_flag in file_content:
error_message = (
'%s --> Please do not use "no-bypass-security-phrase" flag.'
' It is only expected to be used in files listed in'
' warranted_angular_security_bypasses.py' % filepath)
error_messages.append(error_message)
failed = True
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def check_extra_js_files(self):
"""Checks if the changes made include extra js files in core
or extensions folder which are not specified in
build.JS_FILEPATHS_NOT_TO_BUILD.
Returns:
TaskResult. A TaskResult object representing the result of the lint
check.
"""
name = 'Extra JS files'
error_messages = []
files_to_lint = self.all_filepaths
failed = False
for filepath in files_to_lint:
if filepath.endswith(
('.js')) and filepath.startswith(
('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD
) and not filepath.endswith('protractor.js'):
error_message = (
'%s --> Found extra .js file' % filepath)
error_messages.append(error_message)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts')
error_messages.append(err_msg)
return concurrent_task_utils.TaskResult(
name, failed, error_messages, error_messages)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
list(TaskResult). A list of TaskResult objects representing the
results of the lint checks.
"""
if not self.all_filepaths:
return [
concurrent_task_utils.TaskResult(
'General purpose lint', False, [],
['There are no files to be checked.'])]
task_results = [
self.check_mandatory_patterns(), self.check_bad_patterns(),
self.check_newline_at_eof(), self.check_extra_js_files(),
self.check_disallowed_flags()]
return task_results
def get_linters(files_to_lint, file_cache):
"""Creates GeneralPurposeLinter object and returns it.
Args:
files_to_lint: list(str). A list of filepaths to lint.
file_cache: object(FileCache). Provides thread-safe access to cached
file content.
Returns:
tuple(GeneralPurposeLinter, None). A 2-tuple of custom and third_party
linter objects.
"""
custom_linter = GeneralPurposeLinter(files_to_lint, file_cache)
return custom_linter, None
| [
"noreply@github.com"
] | FareesHussain.noreply@github.com |
008051dab9733ed2d9f2fb7454f439c579ba2b1d | d0cbfb54c336582c72e8d36c26c03a41d81a1bf4 | /djblog/blog/urls.py | 3f9a68d0cf928b549348495c01231e5567c56b8b | [
"MIT"
] | permissive | ghacer/djwebapp-blog | ea523928112572d34caf62c1bcede2e52c71dc6b | 0101b0356a6fa2d364f0da04adc8956938cef78c | refs/heads/master | 2021-01-18T01:37:25.283289 | 2014-11-22T02:09:38 | 2014-11-22T02:09:38 | 39,756,290 | 0 | 1 | null | 2015-07-27T05:15:11 | 2015-07-27T05:15:11 | null | UTF-8 | Python | false | false | 370 | py | from django.conf.urls import patterns, include, url
# import .views
urlpatterns = patterns('',
url(r"^$", "blog.views.index", name="index"),
url(r"^post/(?P<pk>\d+)/$", "blog.views.post", name="post"),
url(r"^category/(?P<pk>\d+)/$", "blog.views.category", name="category"),
)
| [
"wwq0327@gmail.com"
] | wwq0327@gmail.com |
7cf68892b2e25b23ddffda245dbbce948ae8f6ce | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_206/717.py | f8d716b09ed85efab988ce26b530693ae8483649 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | t = int(raw_input()) # read a line with a single integer
import numpy as np
for i in xrange(1, t + 1):
D, N = [s for s in raw_input().split(" ")]
D = int(D) #destination distance
N = int(N) #number of horses
K = np.zeros(N) #start position
S = np.zeros(N) #speed
T = np.zeros(N) #arrival time
for j in xrange(0,N):
string = raw_input().split(" ")
K[j] = int(string[0]) #starting position of jth horse
S[j] = int(string[1]) #speed of jth horse
T[j] = float(D-K[j])/float(S[j])
time = max(T)
optimal_speed = D / float(time)
print "Case #{}: {}".format(i, optimal_speed)
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
035f8507902b4e954a369b882f9c67efe0e953c2 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/dis_content_rsp.py | 67f2dccb885bb01a85eae6061603d4173e14226a | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 5,056 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class DisContentRsp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'stream_name': 'str',
'ak': 'str',
'sk': 'str',
'project_id': 'str'
}
attribute_map = {
'stream_name': 'streamName',
'ak': 'ak',
'sk': 'sk',
'project_id': 'projectId'
}
def __init__(self, stream_name=None, ak=None, sk=None, project_id=None):
"""DisContentRsp
The model defined in huaweicloud sdk
:param stream_name: 通道名称
:type stream_name: str
:param ak: 租户的AK
:type ak: str
:param sk: 租户的SK
:type sk: str
:param project_id: 项目id
:type project_id: str
"""
self._stream_name = None
self._ak = None
self._sk = None
self._project_id = None
self.discriminator = None
if stream_name is not None:
self.stream_name = stream_name
if ak is not None:
self.ak = ak
if sk is not None:
self.sk = sk
if project_id is not None:
self.project_id = project_id
@property
def stream_name(self):
"""Gets the stream_name of this DisContentRsp.
通道名称
:return: The stream_name of this DisContentRsp.
:rtype: str
"""
return self._stream_name
@stream_name.setter
def stream_name(self, stream_name):
"""Sets the stream_name of this DisContentRsp.
通道名称
:param stream_name: The stream_name of this DisContentRsp.
:type stream_name: str
"""
self._stream_name = stream_name
@property
def ak(self):
"""Gets the ak of this DisContentRsp.
租户的AK
:return: The ak of this DisContentRsp.
:rtype: str
"""
return self._ak
@ak.setter
def ak(self, ak):
"""Sets the ak of this DisContentRsp.
租户的AK
:param ak: The ak of this DisContentRsp.
:type ak: str
"""
self._ak = ak
@property
def sk(self):
"""Gets the sk of this DisContentRsp.
租户的SK
:return: The sk of this DisContentRsp.
:rtype: str
"""
return self._sk
@sk.setter
def sk(self, sk):
"""Sets the sk of this DisContentRsp.
租户的SK
:param sk: The sk of this DisContentRsp.
:type sk: str
"""
self._sk = sk
@property
def project_id(self):
"""Gets the project_id of this DisContentRsp.
项目id
:return: The project_id of this DisContentRsp.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this DisContentRsp.
项目id
:param project_id: The project_id of this DisContentRsp.
:type project_id: str
"""
self._project_id = project_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DisContentRsp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
ff4c97e9a54013b3ff6342b1b25da663fd7d7cf0 | 6f097812440f1cf728d9a0c2706b66e706de0824 | /uclptb/models.py | 8a9993cf6b152f7ab43b7dc96673b9822736a3a6 | [] | no_license | medical-projects/uclp-tb | 105f915c3042c53b769681fb30d7f06fb21fd60a | ef9dbdb22846be1a0d38e63b34532f7ff414762d | refs/heads/master | 2021-06-22T01:30:55.287491 | 2016-07-05T16:45:20 | 2016-07-05T16:45:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | """
uclptb models.
"""
from django.db.models import fields
from opal import models
class Demographics(models.Demographics): pass
class Location(models.Location): pass
class Allergies(models.Allergies): pass
class Diagnosis(models.Diagnosis): pass
class PastMedicalHistory(models.PastMedicalHistory): pass
class Treatment(models.Treatment): pass
class Investigation(models.Investigation): pass
class ReferralRoute(models.ReferralRoute): pass
class SymptomComplex(models.SymptomComplex): pass
class PatientConsultation(models.PatientConsultation): pass
| [
"fredkingham@gmail.com"
] | fredkingham@gmail.com |
cf86932c5694e36b1c1333e4fd4e94fd12b2bb41 | 7876e76aa397b7c2dfae6fa9dbdeb9bd2c3e678a | /plugins/xafs/feffdat.py | f4045f5c9a23179651ca34e657a96122dc4b9261 | [
"BSD-2-Clause"
] | permissive | astrojuan/xraylarch | 5f8facebd22482b2218d320fe45757d4f7243579 | e094f42057b2c6f0f3aac3e46a43f75935f8b81b | refs/heads/master | 2020-03-19T02:46:48.485894 | 2018-05-30T04:41:07 | 2018-05-30T04:41:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,122 | py | #!/usr/bin/env python
"""
feffdat provides the following function related to
reading and dealing with Feff.data files in larch:
path1 = read_feffdat('feffNNNN.dat')
returns a Feff Group -- a special variation of a Group -- for
the path represented by the feffNNNN.dat
group = ff2chi(pathlist)
creates a group that contains the chi(k) for the sum of paths.
"""
import six
import numpy as np
from scipy.interpolate import UnivariateSpline
from lmfit import Parameters
from larch import (Group, Parameter, isParameter,
ValidateLarchPlugin,
param_value, isNamedClass)
from larch.utils.strutils import fix_varname, b32hash
from larch_plugins.xafs import ETOK, set_xafsGroup
from larch_plugins.xray import atomic_mass, atomic_symbol
from larch.fitting import group2params
SMALL = 1.e-6
class FeffDatFile(Group):
def __init__(self, filename=None, _larch=None, **kws):
self._larch = _larch
kwargs = dict(name='feff.dat: %s' % filename)
kwargs.update(kws)
Group.__init__(self, **kwargs)
if filename is not None:
self.__read(filename)
def __repr__(self):
if self.filename is not None:
return '<Feff.dat File Group: %s>' % self.filename
return '<Feff.dat File Group (empty)>'
def __copy__(self):
return FeffDatFile(filename=self.filename, _larch=self._larch)
def __deepcopy__(self, memo):
return FeffDatFile(filename=self.filename, _larch=self._larch)
@property
def reff(self): return self.__reff__
@reff.setter
def reff(self, val): pass
@property
def nleg(self): return self.__nleg__
@nleg.setter
def nleg(self, val): pass
@property
def rmass(self):
"""reduced mass for a path"""
if self.__rmass is None:
rmass = 0
for atsym, iz, ipot, amass, x, y, z in self.geom:
rmass += 1.0/max(1., amass)
self.__rmass = 1./rmass
return self.__rmass
@rmass.setter
def rmass(self, val): pass
def __read(self, filename):
try:
lines = open(filename, 'r').readlines()
except:
print( 'Error reading file %s ' % filename)
return
self.filename = filename
mode = 'header'
self.potentials, self.geom = [], []
data = []
pcounter = 0
iline = 0
for line in lines:
iline += 1
line = line[:-1]
if line.startswith('#'): line = line[1:]
line = line.strip()
if iline == 1:
self.title = line[:64].strip()
self.version = line[64:].strip()
continue
if line.startswith('k') and line.endswith('real[p]@#'):
mode = 'arrays'
continue
elif '----' in line[2:10]:
mode = 'path'
continue
#
if (mode == 'header' and
line.startswith('Abs') or line.startswith('Pot')):
words = line.replace('=', ' ').split()
ipot, z, rmt, rnm = (0, 0, 0, 0)
words.pop(0)
if line.startswith('Pot'):
ipot = int(words.pop(0))
iz = int(words[1])
rmt = float(words[3])
rnm = float(words[5])
self.potentials.append((ipot, iz, rmt, rnm))
elif mode == 'header' and line.startswith('Gam_ch'):
words = line.replace('=', ' ').split(' ', 2)
self.gam_ch = float(words[1])
self.exch = words[2]
elif mode == 'header' and line.startswith('Mu'):
words = line.replace('=', ' ').split()
self.mu = float(words[1])
self.kf = float(words[3])
self.vint = float(words[5])
self.rs_int= float(words[7])
elif mode == 'path':
pcounter += 1
if pcounter == 1:
w = [float(x) for x in line.split()[:5]]
self.__nleg__ = int(w.pop(0))
self.degen, self.__reff__, self.rnorman, self.edge = w
elif pcounter > 2:
words = line.split()
xyz = [float(x) for x in words[:3]]
ipot = int(words[3])
iz = int(words[4])
if len(words) > 5:
lab = words[5]
else:
lab = atomic_symbol(iz, _larch=self._larch)
amass = atomic_mass(iz, _larch=self._larch)
geom = [lab, iz, ipot, amass] + xyz
self.geom.append(tuple(geom))
elif mode == 'arrays':
d = np.array([float(x) for x in line.split()])
if len(d) == 7:
data.append(d)
data = np.array(data).transpose()
self.k = data[0]
self.real_phc = data[1]
self.mag_feff = data[2]
self.pha_feff = data[3]
self.red_fact = data[4]
self.lam = data[5]
self.rep = data[6]
self.pha = data[1] + data[3]
self.amp = data[2] * data[4]
self.__rmass = None # reduced mass of path
PATH_PARS = ('degen', 's02', 'e0', 'ei', 'deltar', 'sigma2', 'third', 'fourth')
PATHPAR_FMT = "%s__%s"
class FeffPathGroup(Group):
def __init__(self, filename=None, _larch=None,
label=None, s02=None, degen=None, e0=None,
ei=None, deltar=None, sigma2=None,
third=None, fourth=None, **kws):
kwargs = dict(name='FeffPath: %s' % filename)
kwargs.update(kws)
Group.__init__(self, **kwargs)
self._larch = _larch
self.filename = filename
self.params = None
self.label = label
self.spline_coefs = None
def_degen = 1
self._feffdat = None
if filename is not None:
self._feffdat = FeffDatFile(filename=filename, _larch=_larch)
self.geom = self._feffdat.geom
def_degen = self._feffdat.degen
if self.label is None:
self.label = self.__geom2label()
self.degen = def_degen if degen is None else degen
self.s02 = 1.0 if s02 is None else s02
self.e0 = 0.0 if e0 is None else e0
self.ei = 0.0 if ei is None else ei
self.deltar = 0.0 if deltar is None else deltar
self.sigma2 = 0.0 if sigma2 is None else sigma2
self.third = 0.0 if third is None else third
self.fourth = 0.0 if fourth is None else fourth
self.k = None
self.chi = None
if self._feffdat is not None:
self.create_spline_coefs()
def __geom2label(self):
"""generate label by hashing path geometry"""
rep = []
if self.geom is not None:
for atom in self.geom:
rep.extend(atom)
if self._feffdat is not None:
rep.append(self._feffdat.degen)
rep.append(self._feffdat.reff)
for attr in ('s02', 'e0', 'ei', 'deltar', 'sigma2', 'third', 'fourth'):
rep.append(getattr(self, attr, '_'))
s = "|".join([str(i) for i in rep])
return "p%s" % (b32hash(s)[:8].lower())
def __copy__(self):
return FeffPathGroup(filename=self.filename, _larch=self._larch,
s02=self.s02, degen=self.degen, e0=self.e0,
ei=self.ei, deltar=self.deltar, sigma2=self.sigma2,
third=self.third, fourth=self.fourth)
def __deepcopy__(self, memo):
return FeffPathGroup(filename=self.filename, _larch=self._larch,
s02=self.s02, degen=self.degen, e0=self.e0,
ei=self.ei, deltar=self.deltar, sigma2=self.sigma2,
third=self.third, fourth=self.fourth)
@property
def reff(self): return self._feffdat.reff
@reff.setter
def reff(self, val): pass
@property
def nleg(self): return self._feffdat.nleg
@nleg.setter
def nleg(self, val): pass
@property
def rmass(self): return self._feffdat.rmass
@rmass.setter
def rmass(self, val): pass
def __repr__(self):
if self.filename is not None:
return '<FeffPath Group %s>' % self.filename
return '<FeffPath Group (empty)>'
def create_path_params(self):
"""
create Path Parameters within the current fiteval
"""
self.params = Parameters(asteval=self._larch.symtable._sys.fiteval)
if self.label is None:
self.label = self.__geom2label()
self.store_feffdat()
for pname in PATH_PARS:
val = getattr(self, pname)
attr = 'value'
if isinstance(val, six.string_types):
attr = 'expr'
kws = {'vary': False, attr: val}
parname = fix_varname(PATHPAR_FMT % (pname, self.label))
self.params.add(parname, **kws)
def create_spline_coefs(self):
"""pre-calculate spline coefficients for feff data"""
self.spline_coefs = {}
fdat = self._feffdat
self.spline_coefs['pha'] = UnivariateSpline(fdat.k, fdat.pha, s=0)
self.spline_coefs['amp'] = UnivariateSpline(fdat.k, fdat.amp, s=0)
self.spline_coefs['rep'] = UnivariateSpline(fdat.k, fdat.rep, s=0)
self.spline_coefs['lam'] = UnivariateSpline(fdat.k, fdat.lam, s=0)
def store_feffdat(self):
"""stores data about this Feff path in the fiteval
symbol table for use as `reff` and in sigma2 calcs
"""
fiteval = self._larch.symtable._sys.fiteval
fdat = self._feffdat
fiteval.symtable['feffpath'] = fdat
fiteval.symtable['reff'] = fdat.reff
return fiteval
def __path_params(self, **kws):
"""evaluate path parameter value. Returns
(degen, s02, e0, ei, deltar, sigma2, third, fourth)
"""
# put 'reff' and '_feffdat' into the symboltable so that
# they can be used in constraint expressions, and get
# fiteval evaluator
self.store_feffdat()
if self.params is None:
self.create_path_params()
out = []
for pname in PATH_PARS:
val = kws.get(pname, None)
parname = fix_varname(PATHPAR_FMT % (pname, self.label))
if val is None:
val = self.params[parname]._getval()
out.append(val)
return out
def path_paramvals(self, **kws):
(deg, s02, e0, ei, delr, ss2, c3, c4) = self.__path_params()
return dict(degen=deg, s02=s02, e0=e0, ei=ei, deltar=delr,
sigma2=ss2, third=c3, fourth=c4)
def report(self):
"return text report of parameters"
(deg, s02, e0, ei, delr, ss2, c3, c4) = self.__path_params()
geomlabel = ' atom x y z ipot'
geomformat = ' %4s % .4f, % .4f, % .4f %i'
out = [' Path %s, Feff.dat file = %s' % (self.label, self.filename)]
out.append(geomlabel)
for atsym, iz, ipot, amass, x, y, z in self.geom:
s = geomformat % (atsym, x, y, z, ipot)
if ipot == 0: s = "%s (absorber)" % s
out.append(s)
stderrs = {}
out.append(' {:7s}= {:.5f}'.format('reff', self._feffdat.reff))
for pname in ('degen', 's02', 'e0', 'r',
'deltar', 'sigma2', 'third', 'fourth', 'ei'):
val = strval = getattr(self, pname, 0)
parname = fix_varname(PATHPAR_FMT % (pname, self.label))
std = None
if pname == 'r':
parname = fix_varname(PATHPAR_FMT % ('deltar', self.label))
par = self.params.get(parname, None)
val = par.value + self._feffdat.reff
strval = 'reff + ' + getattr(self, 'deltar', 0)
std = par.stderr
else:
par = self.params.get(parname, None)
if par is not None:
val = par.value
std = par.stderr
if std is None or std <= 0:
svalue = "{: 5f}".format(val)
else:
svalue = "{: 5f} +/- {:5f}".format(val, std)
if pname == 's02': pname = 'n*s02'
svalue = " {:7s}= {:s}".format(pname, svalue)
if isinstance(strval, six.string_types):
svalue = "{:s} '{:s}'".format(svalue, strval)
if val == 0 and pname in ('third', 'fourth', 'ei'):
continue
out.append(svalue)
return '\n'.join(out)
def _calc_chi(self, k=None, kmax=None, kstep=None, degen=None, s02=None,
e0=None, ei=None, deltar=None, sigma2=None,
third=None, fourth=None, debug=False, interp='cubic', **kws):
"""calculate chi(k) with the provided parameters"""
fdat = self._feffdat
if fdat.reff < 0.05:
self._larch.writer.write('reff is too small to calculate chi(k)')
return
# make sure we have a k array
if k is None:
if kmax is None:
kmax = 30.0
kmax = min(max(fdat.k), kmax)
if kstep is None: kstep = 0.05
k = kstep * np.arange(int(1.01 + kmax/kstep), dtype='float64')
reff = fdat.reff
# get values for all the path parameters
(degen, s02, e0, ei, deltar, sigma2, third, fourth) = \
self.__path_params(degen=degen, s02=s02, e0=e0, ei=ei,
deltar=deltar, sigma2=sigma2,
third=third, fourth=fourth)
# create e0-shifted energy and k, careful to look for |e0| ~= 0.
en = k*k - e0*ETOK
if min(abs(en)) < SMALL:
try:
en[np.where(abs(en) < 2*SMALL)] = SMALL
except ValueError:
pass
# q is the e0-shifted wavenumber
q = np.sign(en)*np.sqrt(abs(en))
# lookup Feff.dat values (pha, amp, rep, lam)
if interp.startswith('lin'):
pha = np.interp(q, fdat.k, fdat.pha)
amp = np.interp(q, fdat.k, fdat.amp)
rep = np.interp(q, fdat.k, fdat.rep)
lam = np.interp(q, fdat.k, fdat.lam)
else:
pha = self.spline_coefs['pha'](q)
amp = self.spline_coefs['amp'](q)
rep = self.spline_coefs['rep'](q)
lam = self.spline_coefs['lam'](q)
if debug:
self.debug_k = q
self.debug_pha = pha
self.debug_amp = amp
self.debug_rep = rep
self.debug_lam = lam
# p = complex wavenumber, and its square:
pp = (rep + 1j/lam)**2 + 1j * ei * ETOK
p = np.sqrt(pp)
# the xafs equation:
cchi = np.exp(-2*reff*p.imag - 2*pp*(sigma2 - pp*fourth/3) +
1j*(2*q*reff + pha +
2*p*(deltar - 2*sigma2/reff - 2*pp*third/3) ))
cchi = degen * s02 * amp * cchi / (q*(reff + deltar)**2)
cchi[0] = 2*cchi[1] - cchi[2]
# outputs:
self.k = k
self.p = p
self.chi = cchi.imag
self.chi_imag = -cchi.real
@ValidateLarchPlugin
def _path2chi(path, paramgroup=None, _larch=None, **kws):
"""calculate chi(k) for a Feff Path,
optionally setting path parameter values
output chi array will be written to path group
Parameters:
------------
path: a FeffPath Group
kmax: maximum k value for chi calculation [20].
kstep: step in k value for chi calculation [0.05].
k: explicit array of k values to calculate chi.
Returns:
---------
None - outputs are written to path group
"""
params = group2params(paramgroup, _larch=_larch)
if not isNamedClass(path, FeffPathGroup):
msg('%s is not a valid Feff Path' % path)
return
path.create_path_params()
path._calc_chi(**kws)
@ValidateLarchPlugin
def _ff2chi(pathlist, group=None, paramgroup=None, _larch=None,
k=None, kmax=None, kstep=0.05, **kws):
"""sum chi(k) for a list of FeffPath Groups.
Parameters:
------------
pathlist: a list of FeffPath Groups
paramgroup: a Parameter Group for calculating Path Parameters [None]
kmax: maximum k value for chi calculation [20].
kstep: step in k value for chi calculation [0.05].
k: explicit array of k values to calculate chi.
Returns:
---------
group contain arrays for k and chi
This essentially calls path2chi() for each of the paths in the
pathlist and writes the resulting arrays to group.k and group.chi.
"""
params = group2params(paramgroup, _larch=_larch)
msg = _larch.writer.write
for path in pathlist:
if not isNamedClass(path, FeffPathGroup):
msg('%s is not a valid Feff Path' % path)
return
path.create_path_params()
path._calc_chi(k=k, kstep=kstep, kmax=kmax)
k = pathlist[0].k[:]
out = np.zeros_like(k)
for path in pathlist:
out += path.chi
if group is None:
group = Group()
else:
group = set_xafsGroup(group, _larch=_larch)
group.k = k
group.chi = out
return group
def feffpath(filename=None, _larch=None, label=None, s02=None,
degen=None, e0=None,ei=None, deltar=None, sigma2=None,
third=None, fourth=None, **kws):
"""create a Feff Path Group from a *feffNNNN.dat* file.
Parameters:
-----------
filename: name (full path of) *feffNNNN.dat* file
label: label for path [file name]
degen: path degeneracy, N [taken from file]
s02: S_0^2 value or parameter [1.0]
e0: E_0 value or parameter [0.0]
deltar: delta_R value or parameter [0.0]
sigma2: sigma^2 value or parameter [0.0]
third: c_3 value or parameter [0.0]
fourth: c_4 value or parameter [0.0]
ei: E_i value or parameter [0.0]
For all the options described as **value or parameter** either a
numerical value or a Parameter (as created by param()) can be given.
Returns:
---------
a FeffPath Group.
"""
return FeffPathGroup(filename=filename, label=label, s02=s02,
degen=degen, e0=e0, ei=ei, deltar=deltar,
sigma2=sigma2, third=third, fourth=fourth,
_larch=_larch)
def registerLarchGroups():
return (FeffDatFile, FeffPathGroup)
def registerLarchPlugin():
return ('_xafs', {'feffpath': feffpath,
'path2chi': _path2chi,
'ff2chi': _ff2chi})
| [
"newville@cars.uchicago.edu"
] | newville@cars.uchicago.edu |
a962541a52c1468a9fc1c8e4406db08c41303414 | dae212cb615e5eba3fe8108799a39bc09d7bddb6 | /grokking-coding/cyclic_sort/problem_challenge_1.py | 572ef55c35c78732d5581d7b37aa4e9dcc615fb7 | [] | no_license | cs-cordero/interview-prep | a291b5ce2fb8461449e6e27a1f23e12b54223540 | c3b5b4612f3641572d2237e36aa23019c680c799 | refs/heads/master | 2022-05-23T10:39:59.817378 | 2020-04-29T12:57:12 | 2020-04-29T12:57:12 | 76,767,250 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | from typing import List
def find_corrupt_numbers(nums: List[int]) -> List[int]:
result = []
for i in range(len(nums)):
if nums[i] == i + 1:
continue
temp = nums[i]
nums[i] = None
while temp and temp > 0 and temp <= len(nums):
if temp == nums[temp - 1]:
result.append(temp)
break
next_temp = nums[temp - 1]
nums[temp - 1] = temp
temp = next_temp
for i, num in enumerate(nums):
if num is None:
result.append(i + 1)
break
return result
| [
"ccordero@protonmail.com"
] | ccordero@protonmail.com |
184619b837b7e49365075a3d962d2bbd1c417295 | 8256963b73a829ec5054b8c3cb707250a8c6054a | /scooter/models/__models.py | f5946eb13876de57266266c8cd2a86b855a4396b | [
"MIT"
] | permissive | vyahello/rent-electro-scooter | bbd2d8c51536a832baeadbcd2a328de2174638ac | 34b85b0538d61315e325842f4c1b5094a94d2c0d | refs/heads/master | 2021-07-06T11:48:20.303858 | 2021-04-23T16:06:33 | 2021-04-23T16:06:33 | 236,315,479 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # pylint: disable=unused-import
# noinspection PyUnresolvedReferences
from scooter.models import rentals # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import locations # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import scooters # noqa: F401
# noinspection PyUnresolvedReferences
from scooter.models import users # noqa: F401
| [
"vyahello@gmail.com"
] | vyahello@gmail.com |
a5e12c032d5bc0f2f18a84268727ab3ea96e0593 | ddb3656fbacef606ac3cfa53eb74a99be90202cd | /selfdrive/hardware/eon/androidd.py | b836eb01294dc6258395bbe29ba7b767d3aca242 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | ErichMoraga/openpilot | f70b353099d3643c9f8d16fb8003811418c95656 | 2f73be29651e34e62eaf18472f9219cea57c177a | refs/heads/812 | 2023-08-02T16:58:57.870050 | 2023-07-20T17:33:41 | 2023-07-20T17:33:41 | 140,953,335 | 58 | 77 | MIT | 2023-07-30T15:33:18 | 2018-07-14T14:41:16 | C | UTF-8 | Python | false | false | 2,295 | py | #!/usr/bin/env python3
import os
import time
import psutil
from typing import Optional
from common.realtime import set_core_affinity, set_realtime_priority
from selfdrive.swaglog import cloudlog
MAX_MODEM_CRASHES = 3
MODEM_PATH = "/sys/devices/soc/2080000.qcom,mss/subsys5"
WATCHED_PROCS = ["zygote", "zygote64", "/system/bin/servicemanager", "/system/bin/surfaceflinger"]
def get_modem_crash_count() -> Optional[int]:
try:
with open(os.path.join(MODEM_PATH, "crash_count")) as f:
return int(f.read())
except Exception:
cloudlog.exception("Error reading modem crash count")
return None
def get_modem_state() -> str:
try:
with open(os.path.join(MODEM_PATH, "state")) as f:
return f.read().strip()
except Exception:
cloudlog.exception("Error reading modem state")
return ""
def main():
set_core_affinity(1)
set_realtime_priority(1)
procs = {}
crash_count = 0
modem_killed = False
modem_state = "ONLINE"
while True:
# check critical android services
if any(p is None or not p.is_running() for p in procs.values()) or not len(procs):
cur = {p: None for p in WATCHED_PROCS}
for p in psutil.process_iter(attrs=['cmdline']):
cmdline = None if not len(p.info['cmdline']) else p.info['cmdline'][0]
if cmdline in WATCHED_PROCS:
cur[cmdline] = p
if len(procs):
for p in WATCHED_PROCS:
if cur[p] != procs[p]:
cloudlog.event("android service pid changed", proc=p, cur=cur[p], prev=procs[p])
procs.update(cur)
if os.path.exists(MODEM_PATH):
# check modem state
state = get_modem_state()
if state != modem_state and not modem_killed:
cloudlog.event("modem state changed", state=state)
modem_state = state
# check modem crashes
cnt = get_modem_crash_count()
if cnt is not None:
if cnt > crash_count:
cloudlog.event("modem crash", count=cnt)
crash_count = cnt
# handle excessive modem crashes
if crash_count > MAX_MODEM_CRASHES and not modem_killed:
cloudlog.event("killing modem")
with open("/sys/kernel/debug/msm_subsys/modem", "w") as f:
f.write("put")
modem_killed = True
time.sleep(1)
if __name__ == "__main__":
main()
| [
"user@comma.ai"
] | user@comma.ai |
0f213d8dd1ec7a658623f0215997a3592e0df9ed | de707c94c91f554d549e604737b72e6c86eb0755 | /math/0x02-calculus/10-matisse.py | 2580a5ebc2eeb7f237af29cff5d2d583248ae911 | [] | no_license | ejonakodra/holbertonschool-machine_learning-1 | 885cf89c1737573228071e4dc8e26304f393bc30 | 8834b201ca84937365e4dcc0fac978656cdf5293 | refs/heads/main | 2023-07-10T09:11:01.298863 | 2021-08-11T03:43:59 | 2021-08-11T03:43:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | #!/usr/bin/env python3
""" defines a function that calculates the derivative of a polynomial """
def poly_derivative(poly):
"""
calculates the derivative of the given polynomial
Parameters:
poly (list): list of coefficients representing a polynomial
the index of the list represents the power of x
the coefficient belongs to
Returns:
a new list of coefficients representing the derivative
[0], if the derivate is 0
None, if poly is not valid
"""
if type(poly) is not list or len(poly) < 1:
return None
for coefficient in poly:
if type(coefficient) is not int and type(coefficient) is not float:
return None
for power, coefficient in enumerate(poly):
if power is 0:
derivative = [0]
continue
if power is 1:
derivative = []
derivative.append(power * coefficient)
while derivative[-1] is 0 and len(derivative) > 1:
derivative = derivative[:-1]
return derivative
| [
"eislek02@gmail.com"
] | eislek02@gmail.com |
299b7afae0c73e134909d4228f2ad18889254403 | 3bf73a5ac2c8dbcee802a742ee31834c2bbfda4e | /viewer/converter.py | d4e98e5ba9d2b73fa658263024cc81b9108103e8 | [
"Unlicense",
"LicenseRef-scancode-public-domain"
] | permissive | pawlosck/epistolaire | c8708df67e324abce31bff5519967a2ba6ffcd31 | 56c3d8665e492e649c631953baadebc70404303d | refs/heads/master | 2021-05-17T16:19:37.762930 | 2020-03-25T22:29:57 | 2020-03-25T22:32:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,391 | py | #!/usr/bin/env python3
# This is free and unencumbered software released into the public domain.
# See LICENSE file for details.
import locale
import datetime
from pathlib import Path
import sys
import json
import xml.etree.ElementTree as ET
class Converter:
def import_data(self, path):
with open(path) as fd:
self.jfile = json.load(fd)
def convert(self):
seen = set()
for conversation in self.jfile['conversations']:
try:
addr = conversation[0]['address'].replace(' ', '')
except KeyError:
addr = ','.join(conversation[0]['addresses']).replace(' ', '')
outfile = Path(f"{addr[:200]}{addr[200:] and '...'}.html")
if outfile in seen:
raise FileExistsError(f"oops, {outfile} has already been used")
seen.add(outfile)
hconv = self.build_conversation(conversation)
html = ET.Element('html')
hhead = ET.SubElement(html, 'head')
ET.SubElement(hhead, 'link', rel='stylesheet', href='https://cdn.jsdelivr.net/gh/kognise/water.css@latest/dist/dark.css')
ET.SubElement(hhead, 'link', rel='stylesheet', href='style.css')
hbody = ET.SubElement(html, 'body')
hbody.append(hconv)
with outfile.open('wb') as fd:
fd.write(ET.tostring(html, method='html'))
def build_conversation(self, jconv):
hconv = ET.Element('div', **{
'itemscope': 'itemscope',
'itemtype': 'http://schema.org/Message',
})
for jmsg in sorted(jconv, key=lambda jmsg: jmsg['date']):
if 'parts' in jmsg:
self.build_mms(jmsg, hconv)
else:
self.build_sms(jmsg, hconv)
return hconv
def build_mms(self, jmsg, hconv):
parts = jmsg['parts']
text_part = next((part for part in parts if part['ct'] == 'text/plain'), None)
img_part = next((part for part in parts if part['ct'].startswith('image/')), None)
is_received = jmsg['msg_box'] == 1
dt = datetime.datetime.fromtimestamp(jmsg['date'] / 1000)
hmsg = ET.SubElement(
hconv, 'div', id=str(jmsg['_id']),
**{
'class': f'message message-{"received" if is_received else "sent"}',
'itemscope': 'itemscope',
'itemprop': 'hasPart',
'itemtype': 'http://schema.org/Message',
},
)
htime = ET.SubElement(
hmsg, 'time', **{
'class': 'message-date',
'itemprop': 'dateReceived',
'datetime': dt.isoformat(),
})
htime.text = dt.strftime('%Y-%m-%d %H:%M:%S')
if img_part:
hdimg = ET.SubElement(hmsg, 'div')
ET.SubElement(
hdimg, 'img', **{
'class': 'message-photo',
'src': f'data:{img_part["ct"]};base64,{img_part["my_content"]}',
})
if text_part:
hbody = ET.SubElement(hmsg, 'div', **{'class': 'message-body'})
hbody.text = text_part['text']
def build_sms(self, jmsg, hconv):
is_received = jmsg['type'] == 1
dt = datetime.datetime.fromtimestamp(jmsg['date'] / 1000)
hmsg = ET.SubElement(
hconv, 'div', id=str(jmsg['_id']),
**{
'class': f'message message-{"received" if is_received else "sent"}',
'itemscope': 'itemscope',
'itemprop': 'hasPart',
'itemtype': 'http://schema.org/Message',
},
)
# haddr = ET.SubElement(
# hmsg, 'div', **{
# 'class': 'message-address',
# 'itemprop': 'sender' if is_received else 'recipient',
# })
# haddr.text = jmsg['address']
htime = ET.SubElement(
hmsg, 'time', **{
'class': 'message-date',
'itemprop': 'dateReceived',
'datetime': dt.isoformat(),
})
htime.text = dt.strftime('%Y-%m-%d %H:%M:%S')
hbody = ET.SubElement(hmsg, 'div', **{'class': 'message-body'})
hbody.text = jmsg['body']
locale.setlocale(locale.LC_ALL, '')
c = Converter()
c.import_data(sys.argv[1])
c.convert()
| [
"dev@indigo.re"
] | dev@indigo.re |
f978c6e0a9bfde2190c40eb828a863cba0d926f4 | f93ea26173e6b72ff46b3abb2a5250bfb0636cdd | /eqsig/sdof.py | 503ab7098abb75b698e0fb48941656348ebe783d | [
"MIT"
] | permissive | eng-tools/eqsig | 53d1dc695ffbe132a7fef871d825d9b7011f821c | 8a70f4c7152bc0f0901d457b6acbca256d1a6473 | refs/heads/master | 2023-02-26T06:58:43.243878 | 2022-08-16T03:23:04 | 2022-08-16T03:23:04 | 125,842,866 | 22 | 10 | MIT | 2023-02-08T00:41:12 | 2018-03-19T10:46:43 | Python | UTF-8 | Python | false | false | 9,476 | py | import numpy as np
def single_elastic_response(motion, step, period, xi):
"""
Perform Duhamels integral to get the displacement.
http://www.civil.utah.edu/~bartlett/CVEEN7330/Duhamel%27s_integral.pdf
http://www1.aucegypt.edu/faculty/mharafa/MENG%20475/Forced%20Vibration.pdf
:param motion: acceleration in m/s2
:param step: the time step
:param period: The period of SDOF oscillator
:param xi: fraction of critical damping (e.g. 0.05)
:return:
"""
w_n = (2.0 * np.pi) / period
w_d = w_n * np.sqrt(1 - xi ** 2)
x_w_n = xi * w_n
length = len(motion)
time = step * np.arange(length + 1)
disp = np.zeros(length)
p = motion * step / w_d
for i in range(length):
dtn = time[:-i - 1]
d_new = p[i] * np.exp(-x_w_n * dtn) * np.sin(w_d * dtn)
disp[i:] += d_new
return disp
def slow_response_spectra(motion, step, periods, xis):
"""
Perform Duhamels integral to get the displacement.
http://www.civil.utah.edu/~bartlett/CVEEN7330/Duhamel%27s_integral.pdf
http://www1.aucegypt.edu/faculty/mharafa/MENG%20475/Forced%20Vibration.pdf
:param motion: acceleration in m/s2
:param step: the time step
:param period: The period of SDOF oscilator
:param xi: fraction of critical damping (e.g. 0.05)
:return:
"""
points = len(periods)
xi = xis[0]
s_d = np.zeros(points)
for i in range(points):
s_d[i] = max(abs(single_elastic_response(motion, step, periods[i], xi)))
s_v = s_d * 2 * np.pi / periods
s_a = s_d * (2 * np.pi / periods) ** 2
return s_d, s_v, s_a
def compute_a_and_b(xi, w, dt):
"""
From the paper by Nigam and Jennings (1968), computes the two matrices.
:param xi: critical damping ratio
:param w: angular frequencies
:param dt: time step
:return: matrices A and B
"""
# Reduce the terms since all is matrix multiplication.
xi2 = xi * xi # D2
w2 = w ** 2 # W2
one_ov_w2 = 1. / w2 # A7
sqrt_b2 = np.sqrt(1. - xi2)
w_sqrt_b2 = w * sqrt_b2 # A1
exp_b = np.exp(-xi * w * dt) # A0
two_b_ov_w2 = (2 * xi ** 2 - 1) / (w ** 2 * dt)
two_b_ov_w3 = 2 * xi / (w ** 3 * dt)
sin_wsqrt = np.sin(w_sqrt_b2 * dt) # A2
cos_wsqrt = np.cos(w_sqrt_b2 * dt) # A3
# A matrix
a_11 = exp_b * (xi / sqrt_b2 * sin_wsqrt + cos_wsqrt) # Eq 2.7d(1)
a_12 = exp_b / (w * sqrt_b2) * sin_wsqrt # Eq 2.7d(2)
a_21 = -w / sqrt_b2 * exp_b * sin_wsqrt # Eq 2.7d(3)
a_22 = exp_b * (cos_wsqrt - xi / sqrt_b2 * sin_wsqrt) # Eq 2.7d(4)
a = np.array([[a_11, a_12], [a_21, a_22]])
# B matrix
bsqrd_ov_w2_p_xi_ov_w = two_b_ov_w2 + xi / w
sin_ov_wsqrt = sin_wsqrt / w_sqrt_b2
xwcos = xi * w * cos_wsqrt
wsqrtsin = w_sqrt_b2 * sin_wsqrt
# Eq 2.7e
b_11 = exp_b * (bsqrd_ov_w2_p_xi_ov_w * sin_ov_wsqrt + (two_b_ov_w3 + one_ov_w2) * cos_wsqrt) - two_b_ov_w3
b_12 = -exp_b * (two_b_ov_w2 * sin_ov_wsqrt + two_b_ov_w3 * cos_wsqrt) - one_ov_w2 + two_b_ov_w3
b_21 = exp_b * (bsqrd_ov_w2_p_xi_ov_w * (cos_wsqrt - xi / sqrt_b2 * sin_wsqrt)
- (two_b_ov_w3 + one_ov_w2) * (wsqrtsin + xwcos)) + one_ov_w2 / dt
b_22 = -exp_b * (two_b_ov_w2 * (cos_wsqrt - xi / sqrt_b2 * sin_wsqrt) - two_b_ov_w3 * (wsqrtsin + xwcos)) - one_ov_w2 / dt
b = np.array([[b_11, b_12], [b_21, b_22]])
return a, b
def nigam_and_jennings_response(acc, dt, periods, xi):
"""
Implementation of the response spectrum calculation from Nigam and Jennings (1968).
Ref: Nigam, N. C., Jennings, P. C. (1968) Digital calculation of response spectra from strong-motion earthquake
records. National Science Foundation.
:param acc: acceleration in m/s2
:param periods: response periods of interest
:param dt: time step of the acceleration time series
:param xi: critical damping factor
:return: response displacement, response velocity, response acceleration
"""
acc = -np.array(acc, dtype=float)
periods = np.array(periods, dtype=float)
if periods[0] == 0:
s = 1
else:
s = 0
w = 6.2831853 / periods[s:]
dt = float(dt)
xi = float(xi)
# implement: delta_t should be less than period / 20
a, b = compute_a_and_b(xi, w, dt)
resp_u = np.zeros([len(periods), len(acc)], dtype=float)
resp_v = np.zeros([len(periods), len(acc)], dtype=float)
for i in range(len(acc) - 1): # possibly speed up using scipy.signal.lfilter
# x_i+1 = A cross (u, v) + B cross (acc_i, acc_i+1) # Eq 2.7a
resp_u[s:, i + 1] = (a[0][0] * resp_u[s:, i] + a[0][1] * resp_v[s:, i] + b[0][0] * acc[i] + b[0][1] * acc[i + 1])
resp_v[s:, i + 1] = (a[1][0] * resp_u[s:, i] + a[1][1] * resp_v[s:, i] + b[1][0] * acc[i] + b[1][1] * acc[i + 1])
w2 = w ** 2
if s:
sdof_acc = np.zeros_like(resp_u, dtype=float)
sdof_acc[s:] = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:]
sdof_acc[0] = acc
else:
sdof_acc = -2 * xi * w[:, np.newaxis] * resp_v[s:] - w2[:, np.newaxis] * resp_u[s:]
return resp_u, resp_v, sdof_acc
def absmax(a, axis=None):
amax = a.max(axis)
amin = a.min(axis)
return abs(np.where(-amin > amax, amin, amax))
def pseudo_response_spectra(motion, dt, periods, xi):
"""
Computes the maximum response displacement, pseudo velocity and pseudo acceleration.
:param motion: array floats, acceleration in m/s2
:param dt: float, the time step
:param periods: array floats, The period of SDOF oscilator
:param xi: float, fraction of critical damping (e.g. 0.05)
:return: tuple floats, (spectral displacement, pseudo spectral velocity, pseudo spectral acceleration)
"""
periods = np.array(periods, dtype=float)
if periods[0] == 0:
s = 1
w = np.ones_like(periods)
w[1:] = 2 * np.pi / periods[1:]
else:
s = 0
w = 2 * np.pi / periods
resp_u, resp_v, resp_a = nigam_and_jennings_response(motion, dt, periods, xi)
sds = absmax(resp_u, axis=1)
svs = w * sds
sas = w ** 2 * sds
sas = np.where(periods < dt * 6, absmax(motion), sas)
return sds, svs, sas
def response_series(motion, dt, periods, xi):
"""
Computes the elastic response to the acceleration time series
:param motion: array floats, acceleration in m/s2
:param dt: float, the time step
:param periods: array floats, The period of SDOF oscillator
:param xi: float, fraction of critical damping (e.g. 0.05)
:return: tuple of float arrays, (response displacements, response velocities, response accelerations)
"""
return nigam_and_jennings_response(motion, dt, periods, xi)
def true_response_spectra(motion, dt, periods, xi):
"""
Computes the actual maximum response values, not the pseudo values
:param motion: array floats, acceleration in m/s2
:param dt: float, the time step
:param periods: array floats, The period of SDOF oscilator
:param xi: float, fraction of critical damping (e.g. 0.05)
:return: tuple floats, (spectral displacement, spectral velocity, spectral acceleration)
"""
resp_u, resp_v, resp_a = nigam_and_jennings_response(motion, dt, periods, xi)
sas = absmax(resp_a, axis=1)
svs = absmax(resp_v, axis=1)
sds = absmax(resp_u, axis=1)
sas = np.where(periods < dt * 6, absmax(motion), sas)
return sds, svs, sas
# def plot_response_spectra():
# import matplotlib.pyplot as plt
# step = 0.01
# xis = [0.05]
# periods = np.arange(1, 5, 0.5)
# motion = np.sin(0.1 * np.arange(1000)) * 0.01
# s_d, s_v, s_a = response_spectra(motion, step, periods, xis)
#
# plt.plot(periods, s_a)
# plt.show()
#
#
def time_the_generation_of_response_spectra():
step = 0.01
xi = 0.05
periods = np.linspace(1, 5, 500)
# periods = np.array([0.01])
motion = np.sin(0.1 * np.arange(100000)) * 0.01
# s_d, s_v, s_a = all_at_once_response_spectra(values, step, periods, xis)
s_d, s_v, s_a = pseudo_response_spectra(motion, step, periods, xi)
def calc_resp_uke_spectrum(acc_signal, periods=None, xi=None):
"""
Calculates the sdof response (kinematic + stored) energy spectrum
:param acc_signal:
:param periods:
:param xi:
:return:
"""
if periods is None:
periods = acc_signal.response_times
else:
periods = np.array(periods)
if xi is None:
xi = 0.05
resp_u, resp_v, resp_a = response_series(acc_signal.values, acc_signal.dt, periods, xi)
mass = 1
kin_energy = 0.5 * resp_v ** 2 * mass
delta_energy = np.diff(kin_energy)
# double for strain then half since only positive increasing
cum_delta_energy = np.sum(abs(delta_energy), axis=1)
return cum_delta_energy
def calc_input_energy_spectrum(acc_signal, periods=None, xi=None, series=False):
if periods is None:
periods = acc_signal.response_times
if xi is None:
xi = 0.05
resp_u, resp_v, resp_a = response_series(acc_signal.values, acc_signal.dt, periods, xi)
if series:
return np.cumsum(acc_signal.values * resp_v * acc_signal.dt, axis=1)
else:
return np.sum(acc_signal.values * resp_v * acc_signal.dt, axis=1)
if __name__ == '__main__':
# time_response_spectra()
time_the_generation_of_response_spectra()
# import cProfile
# cProfile.run('time_the_generation_of_response_spectra()')
| [
"maxim.millen@gmail.com"
] | maxim.millen@gmail.com |
252968fc95b8ee95bcdff316f26b7222dc1805b1 | cba0f1286e4271ac35101a25d5040b2e4f405bde | /cgi-bin/admin/severe2/advanced/answerKey/edit.py.cln | ba798fa61b4625939db956fa6a9d944db2d181ef | [] | no_license | akrherz/pals | 271c92d098909abb5b912db4ae08f0c3589e5ec7 | adc213333fb23dc52d6784ce160c4ff8a8f193e3 | refs/heads/master | 2021-01-10T15:01:59.570168 | 2019-12-18T16:59:08 | 2019-12-18T16:59:08 | 45,484,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | cln | #!/usr/local/bin/python
# This program changes db stuff
# Daryl Herzmann 8-16-99
import cgi, pg, style, time
mydb = pg.connect('severe2_adv', 'localhost', 5555)
def get_question(question_num):
entry = mydb.query("SELECT * from questions WHERE q_id = '"+question_num+"' ").dictresult()
return entry
def get_old_answer(caseNum, q_id):
select = mydb.query("SELECT answer, correct, wrong from answers WHERE casenum = '"+caseNum+"' and q_id = '"+q_id+"' ").getresult()
if len(select) > 0:
ans = select[0][0]
cor_comments = select[0][1]
wro_comments = select[0][2]
return ans, cor_comments, wro_comments
else:
return "","",""
def mk_option(ans, letter, optionval):
if letter == ans and optionval != 'N':
print '<option value="'+letter+'" SELECTED>'+letter+'. '+optionval[:80]+' ...'
elif optionval != 'N':
print '<option value="'+letter+'">'+letter+'. '+optionval[:80]+' ...'
def Main():
form = cgi.FormContent()
caseNum = form["caseNum"][0]
question_num = form["question_num"][0]
style.header("Edit answer for Generic Question", "white")
quest = get_question(question_num)
print '<H3>This is Question number '+question_num+' from caseNum '+caseNum+' </H3>'
question = quest[0]["question"]
optiona = quest[0]["optiona"]
optionb = quest[0]["optionb"]
optionc = quest[0]["optionc"]
optiond = quest[0]["optiond"]
optione = quest[0]["optione"]
optionf = quest[0]["optionf"]
optiong = quest[0]["optiong"]
optionh = quest[0]["optionh"]
ans, cor_comments, wro_comments = get_old_answer(caseNum, question_num)
print '<form method="POST" action="change.py">'
print '<input type="hidden" value="'+question_num+'" name="question_num">'
print '<input type="hidden" value="'+caseNum+'" name="caseNum">'
print '<B>Edit the answer for this question:</B><BR>'
print '<dd>'+question+'</dd><BR>'
print '<B>Select the correct answer:</B><BR>'
print '<SELECT name="answer">'
mk_option(ans, "A", optiona)
mk_option(ans, "B", optionb)
mk_option(ans, "C", optionc)
mk_option(ans, "D", optiond)
mk_option(ans, "E", optione)
mk_option(ans, "F", optionf)
mk_option(ans, "G", optiong)
mk_option(ans, "H", optionh)
print '</SELECT>'
print '<BR><B>Input the correct comments</B>'
print '<textarea name="cor_comments" cols="70" rows="10" WRAP>'+cor_comments+'</textarea>'
print '<BR><B>Input the wrong comments</B>'
print '<textarea name="wro_comments" cols="70" rows="10" WRAP>'+wro_comments+'</textarea>'
print '<BR><BR>'
print '<input type="submit" value="SUBMIT ANSWER">'
print '</form>'
style.std_bot()
Main()
| [
"akrherz@iastate.edu"
] | akrherz@iastate.edu |
e19d8f8f88840156c1eeb8d48d212e59b617dba8 | 34ec93dd1846270d7999e03db4f2f877ea1af005 | /nfldb/__init__.py | b2fdac632ab95a72060604da2e99ceda8b7bbc64 | [
"Unlicense"
] | permissive | micahstone20/nfldb | 4469fc466d3e8b065cf669362b0d13d6033bae2d | 61a5ae56be627a1ad5be93ea25ac494ee0ff292d | refs/heads/master | 2017-12-02T12:33:48.929627 | 2014-05-10T16:13:11 | 2014-05-10T16:13:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,759 | py | """
Module nfldb provides command line tools and a library for maintaining
and querying a relational database of play-by-play NFL data. The data
is imported from [nflgame](https://github.com/BurntSushi/nflgame),
which in turn gets its data from a JSON feed on NFL.com's live
GameCenter pages. This data includes, but is not limited to, game
schedules, scores, rosters and play-by-play data for every preseason,
regular season and postseason game dating back to 2009.
Here is a small teaser that shows how to use nfldb to find the top five
passers in the 2012 regular season:
#!python
import nfldb
db = nfldb.connect()
q = nfldb.Query(db)
q.game(season_year=2012, season_type='Regular')
for pp in q.sort('passing_yds').limit(5).as_aggregate():
print pp.player, pp.passing_yds
And the output is:
[andrew@Liger ~] python2 top-five.py
Drew Brees (NO, QB) 5177
Matthew Stafford (DET, QB) 4965
Tony Romo (DAL, QB) 4903
Tom Brady (NE, QB) 4799
Matt Ryan (ATL, QB) 4719
In theory, both `nfldb` and `nflgame` provide access to the same data.
The difference is in the execution. In order to search data in nflgame,
a large JSON file needs to be read from disk and loaded into Python
data structures for each game. Conversely, nfldb's data is stored in
a relational database, which can be searched and retrieved faster
than nflgame by a few orders of magnitude. Moreover, the relational
organization of data in nfldb allows for a convenient
[query interface](http://goo.gl/Sd6MN2) to search NFL play data.
The database can be updated with real time data from active games by
running the `nfldb-update` script included with this module as often
as you're comfortable pinging NFL.com. (N.B. The JSON data itself only
updates every 15 seconds, so running `nfldb-update` faster than that
would be wasteful.) Roster updates are done automatically at a minimum
interval of 12 hours.
nfldb has [comprehensive API documentation](http://pdoc.burntsushi.net/nfldb)
and a [wiki with examples](https://github.com/BurntSushi/nfldb/wiki).
nfldb can be used in conjunction with
[nflvid](https://pypi.python.org/pypi/nflvid)
to
[search and watch NFL game footage](http://goo.gl/Mckaf0).
If you need help, please join us at our IRC channel `#nflgame` on
FreeNode.
"""
from __future__ import absolute_import, division, print_function
from nfldb.db import __pdoc__ as __db_pdoc__
from nfldb.db import api_version, connect, now, set_timezone, schema_version
from nfldb.db import Tx
from nfldb.query import __pdoc__ as __query_pdoc__
from nfldb.query import aggregate, current, guess_position, player_search
from nfldb.query import Query, QueryOR
from nfldb.team import standard_team
from nfldb.types import __pdoc__ as __types_pdoc__
from nfldb.types import select_columns, stat_categories
from nfldb.types import Category, Clock, Enums, Drive, FieldPosition, Game
from nfldb.types import Play, Player, PlayPlayer, PossessionTime, Team
from nfldb.version import __pdoc__ as __version_pdoc__
from nfldb.version import __version__
__pdoc__ = __db_pdoc__
__pdoc__ = dict(__pdoc__, **__query_pdoc__)
__pdoc__ = dict(__pdoc__, **__types_pdoc__)
__pdoc__ = dict(__pdoc__, **__version_pdoc__)
# Export selected identifiers from sub-modules.
__all__ = [
# nfldb.db
'api_version', 'connect', 'now', 'set_timezone', 'schema_version',
'Tx',
# nfldb.query
'aggregate', 'current', 'guess_position', 'player_search',
'Query', 'QueryOR',
# nfldb.team
'standard_team',
# nfldb.types
'select_columns', 'stat_categories',
'Category', 'Clock', 'Enums', 'Drive', 'FieldPosition', 'Game',
'Play', 'Player', 'PlayPlayer', 'PossessionTime', 'Team',
# nfldb.version
'__version__',
]
| [
"jamslam@gmail.com"
] | jamslam@gmail.com |
10a63c1f20bce5638d2acc7a6327beab0a37f250 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/galex_j23054-4046/sdB_GALEX_J23054-4046_coadd.py | 2c395340b31353193613a52ea32cda925e2a3290 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[346.356125,-40.776181], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_GALEX_J23054-4046/sdB_GALEX_J23054-4046_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_GALEX_J23054-4046/sdB_GALEX_J23054-4046_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"thomas@boudreauxmail.com"
] | thomas@boudreauxmail.com |
7088e6f502c1cdeacd741bb0c4bd166fe030c4ad | 364d77b02d62d45ea588dbada7da16540e6a1f0c | /PyQt5/_table.py | 526ddea155e5a727ca8028812b9d53c62c2ffefe | [] | no_license | BaranAkcakaya/PythonProgramming | 3021f5b3452495fcc34ab9bbfce441976bb63456 | a0cc0f60dce3d50fe9bcf68a7255a71b3e81351d | refs/heads/main | 2023-01-07T09:20:33.695241 | 2020-11-02T07:25:49 | 2020-11-02T07:25:49 | 309,286,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,077 | py | from PyQt5 import QtWidgets
from PyQt5.QtWidgets import QTableWidgetItem
from _tableForm import Ui_MainWindow
import sys
class Window(QtWidgets.QMainWindow):
def __init__(self):
super(Window, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.loadProducts()
self.ui.btnSave.clicked.connect(self.saveProduct)
self.ui.tableProducts.doubleClicked.connect(self.doubleClick)
def doubleClick(self):
for item in self.ui.tableProducts.selectedItems():
print(item.row(), item.column(), item.text())
def saveProduct(self):
name = self.ui.txtName.text()
price = self.ui.txtPrice.text()
if name and price is not None:
rowCount = self.ui.tableProducts.rowCount()
print(rowCount)
self.ui.tableProducts.insertRow(rowCount)
self.ui.tableProducts.setItem(rowCount,0, QTableWidgetItem(name))
self.ui.tableProducts.setItem(rowCount,1, QTableWidgetItem(price))
def loadProducts(self):
products = [
{'name': 'Samsung S5', 'price': 2000},
{'name': 'Samsung S6', 'price': 3000},
{'name': 'Samsung S7', 'price': 4000},
{'name': 'Samsung S8', 'price': 5000}
]
self.ui.tableProducts.setRowCount(len(products))
self.ui.tableProducts.setColumnCount(2)
self.ui.tableProducts.setHorizontalHeaderLabels(('Name','Price'))
self.ui.tableProducts.setColumnWidth(0,200)
self.ui.tableProducts.setColumnWidth(1,100)
rowIndex = 0
for product in products:
self.ui.tableProducts.setItem(rowIndex,0, QTableWidgetItem(product['name']))
self.ui.tableProducts.setItem(rowIndex,1, QTableWidgetItem(str(product['price'])))
rowIndex+=1
def app():
app = QtWidgets.QApplication(sys.argv)
win = Window()
win.show()
sys.exit(app.exec_())
app()
| [
"noreply@github.com"
] | BaranAkcakaya.noreply@github.com |
999e154742b6bdc53d8b6a9fa2225b844a90b729 | 7d27c71588c08e2a56807d5e670ef48e1985b3b5 | /Python/kraken/core/__init__.py | 1f2bf6959ded98791ac377e9905699510cf005f1 | [
"BSD-3-Clause"
] | permissive | BigRoy/Kraken | 6fcc5cf55c412751180d930c2c56a37084f5c5a3 | 8744f9ef3eec4f7d94f28a1433c6e89ca9cd0f6b | refs/heads/develop2.X | 2021-01-18T00:01:42.721175 | 2016-02-11T03:34:26 | 2016-02-11T03:34:26 | 51,552,149 | 1 | 0 | null | 2016-02-11T22:34:36 | 2016-02-11T22:34:36 | null | UTF-8 | Python | false | false | 468 | py | """Kraken Core."""
VERSION_MAJOR = 1
VERSION_MINOR = 0
VERSION_BUILD = 0
VERSION_SUFFIX = ""
def getVersion():
"""Contatenates the version globals and returns the current version of
Kraken.
Returns:
str: Current version of Kraken.
"""
versionString = str(VERSION_MAJOR) + "." + str(VERSION_MINOR) + "." + str(VERSION_BUILD)
if VERSION_SUFFIX:
versionString = versionString + "-" + VERSION_SUFFIX
return versionString
| [
"ethivierge@gmail.com"
] | ethivierge@gmail.com |
5b035d24a819e715e34b2f925f799a3b6312348f | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_network_profiles_operations.py | c964535bc8b442e57c7fe86c57a22be226ab4306 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 23,938 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NetworkProfilesOperations:
"""NetworkProfilesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
network_profile_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_profile_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the NetworkProfile.
:type network_profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_profile_name=network_profile_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_profile_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.NetworkProfile":
"""Gets the specified network profile in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the public IP prefix.
:type network_profile_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
network_profile_name: str,
parameters: "_models.NetworkProfile",
**kwargs
) -> "_models.NetworkProfile":
"""Creates or updates a network profile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to the create or update network profile operation.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.NetworkProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkProfile')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_profile_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.NetworkProfile":
"""Updates network profile tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_profile_name: The name of the network profile.
:type network_profile_name: str
:param parameters: Parameters supplied to update network profile tags.
:type parameters: ~azure.mgmt.network.v2020_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkProfile, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.NetworkProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkProfileName': self._serialize.url("network_profile_name", network_profile_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles/{networkProfileName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.NetworkProfileListResult"]:
"""Gets all the network profiles in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkProfiles'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.NetworkProfileListResult"]:
"""Gets all network profiles in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkProfileListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.NetworkProfileListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkProfileListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkProfileListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkProfiles'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
863ceb86e30e5bcaec6018ee17468974dbc00861 | 6448cd8b6fc0104362924fe1aa788cbd58abe17d | /ABCNN/test_abcnn.py | b2575e8f945de2bcfcdcaad1429d5fe680eac788 | [
"Apache-2.0"
] | permissive | RandolphVI/Text-Pairs-Relation-Classification | 8e54c21fcc97be81c0c797a83d3212c1a854a318 | 25a746ac9e72efdc79c9d90af9769e02587cf650 | refs/heads/master | 2021-06-05T21:58:11.686850 | 2020-11-18T02:24:55 | 2020-11-18T02:24:55 | 83,399,665 | 156 | 52 | null | null | null | null | UTF-8 | Python | false | false | 6,218 | py | # -*- coding:utf-8 -*-
__author__ = 'Randolph'
import os
import sys
import time
import logging
import numpy as np
sys.path.append('../')
logging.getLogger('tensorflow').disabled = True
import tensorflow as tf
from utils import checkmate as cm
from utils import data_helpers as dh
from utils import param_parser as parser
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
args = parser.parameter_parser()
MODEL = dh.get_model_name()
logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime()))
CPT_DIR = 'runs/' + MODEL + '/checkpoints/'
BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/'
SAVE_DIR = 'output/' + MODEL
def create_input_data(data: dict):
return zip(data['f_pad_seqs'], data['b_pad_seqs'], data['onehot_labels'])
def test_abcnn():
"""Test ABCNN model."""
# Print parameters used for the model
dh.tab_printer(args, logger)
# Load word2vec model
word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file)
# Load data
logger.info("Loading data...")
logger.info("Data processing...")
test_data = dh.load_data_and_labels(args, args.test_file, word2idx)
# Load abcnn model
OPTION = dh._option(pattern=1)
if OPTION == 'B':
logger.info("Loading best model...")
checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True)
else:
logger.info("Loading latest model...")
checkpoint_file = tf.train.latest_checkpoint(CPT_DIR)
logger.info(checkpoint_file)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x_front = graph.get_operation_by_name("input_x_front").outputs[0]
input_x_behind = graph.get_operation_by_name("input_x_behind").outputs[0]
input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
is_training = graph.get_operation_by_name("is_training").outputs[0]
# Tensors we want to evaluate
scores = graph.get_operation_by_name("output/topKPreds").outputs[0]
predictions = graph.get_operation_by_name("output/topKPreds").outputs[1]
loss = graph.get_operation_by_name("loss/loss").outputs[0]
# Split the output nodes name by '|' if you have several output nodes
output_node_names = "output/topKPreds"
# Save the .pb model file
output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def,
output_node_names.split("|"))
tf.train.write_graph(output_graph_def, "graph", "graph-abcnn-{0}.pb".format(MODEL), as_text=False)
# Generate batches for one epoch
batches_test = dh.batch_iter(list(create_input_data(test_data)), args.batch_size, 1, shuffle=False)
# Collect the predictions here
test_counter, test_loss = 0, 0.0
true_labels = []
predicted_labels = []
predicted_scores = []
for batch_test in batches_test:
x_f, x_b, y_onehot = zip(*batch_test)
feed_dict = {
input_x_front: x_f,
input_x_behind: x_b,
input_y: y_onehot,
dropout_keep_prob: 1.0,
is_training: False
}
batch_predicted_scores, batch_predicted_labels, batch_loss \
= sess.run([scores, predictions, loss], feed_dict)
for i in y_onehot:
true_labels.append(np.argmax(i))
for j in batch_predicted_scores:
predicted_scores.append(j[0])
for k in batch_predicted_labels:
predicted_labels.append(k[0])
test_loss = test_loss + batch_loss
test_counter = test_counter + 1
test_loss = float(test_loss / test_counter)
# Calculate Precision & Recall & F1
test_acc = accuracy_score(y_true=np.array(true_labels), y_pred=np.array(predicted_labels))
test_pre = precision_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_rec = recall_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
test_F1 = f1_score(y_true=np.array(true_labels),
y_pred=np.array(predicted_labels), average='micro')
# Calculate the average AUC
test_auc = roc_auc_score(y_true=np.array(true_labels),
y_score=np.array(predicted_scores), average='micro')
logger.info("All Test Dataset: Loss {0:g} | Acc {1:g} | Precision {2:g} | "
"Recall {3:g} | F1 {4:g} | AUC {5:g}"
.format(test_loss, test_acc, test_pre, test_rec, test_F1, test_auc))
# Save the prediction result
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", front_data_id=test_data['f_id'],
behind_data_id=test_data['b_id'], true_labels=true_labels,
predict_labels=predicted_labels, predict_scores=predicted_scores)
logger.info("All Done.")
if __name__ == '__main__':
test_abcnn()
| [
"chinawolfman@hotmail.com"
] | chinawolfman@hotmail.com |
7b8e14dedc35d80a37f531e52050c5e7631b4e23 | 03e115c1937ec7bd1e249f82db0225828eaaa186 | /2-GUI (tkinter)/5marcos2.py | ba25a0d2f06d0cb9bab02c46d760c7a49c2eaa32 | [] | no_license | mivargas/Master-python | 236c04205637ddd44d1cc879de2b7c48418153f9 | 9d1c04a8d658aa0dd8620ed792fa2133adfa57e7 | refs/heads/master | 2023-03-06T13:35:58.177058 | 2021-02-16T00:06:00 | 2021-02-16T00:06:00 | 321,731,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,866 | py | from tkinter import *
ventana = Tk()
ventana.title("Marcos | Master en python")
ventana.geometry("700x700")
marco_padre = Frame(ventana, width=250, height=250)
marco_padre.config(
bg="lightblue"
)
marco_padre.pack(side=TOP, anchor=N, fill=X, expand=YES) #el anchor es para que se apegue lo mas posible al borde superor, np basta el top es igual en el caso del de abajo seria N con bottom
marco = Frame(marco_padre, width=250, height=250) #este y el de abajo estan contendios en el marco padre
marco.config(
bg="blue",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=RIGHT, anchor=SE)
marco = Frame(marco_padre, width=250, height=250)
marco.config(
bg="yellow",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=LEFT, anchor=SW)
marco.pack_propagate(False) #sin esto al incluir el label el marco se contrae (se hace pequeño y pierde estilo)
texto = Label(marco, text="primer marco")
texto.config(
bg="red",
fg="white",
font=("Arial", 20),
#height=10, usamos fill x y expand yes para lograr esto
#width=10,
bd=3,
relief=SOLID,
anchor=CENTER
)
texto.pack(fill=Y, expand=YES)
marco_padre = Frame(ventana, width=250, height=250)
marco_padre.config(
bg="lightblue"
)
marco_padre.pack(side=BOTTOM, anchor=S, fill=X, expand=YES)
marco = Frame(marco_padre, width=250, height=250) #este y el de abajo estan contendios en el marco padre
marco.config(
bg="red",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=RIGHT, anchor=SE)
marco = Frame(marco_padre, width=250, height=250)
marco.config(
bg="green",
bd=5, #borde (tamaño)
relief="solid" #relieve del borde
#relief="raised"
)
marco.pack(side=LEFT, anchor=SW)
ventana.mainloop() | [
"miguelvargas619@gmail.com"
] | miguelvargas619@gmail.com |
bd6ef2fdadfa54e915b11813bf6ee532622609f2 | b0814b43440a36c9998924c9fe05f335302a2717 | /venv/lib/python2.7/site-packages/nipype/interfaces/semtools/registration/tests/test_auto_BRAINSResize.py | 7b3f8c8ee38a2a381f8e16950d90eff5ec613387 | [
"MIT"
] | permissive | nagyistge/electrode-gui | 0b47324ce8c61ffb54c24c400aee85f16fd79c7a | 6d89c78ea61935042ead5df5e1474101df3557eb | refs/heads/master | 2021-06-03T22:47:30.329355 | 2016-09-13T19:43:31 | 2016-09-13T19:43:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.semtools.registration.brainsresize import BRAINSResize
def test_BRAINSResize_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
pixelType=dict(argstr='--pixelType %s',
),
scaleFactor=dict(argstr='--scaleFactor %f',
),
terminal_output=dict(nohash=True,
),
)
inputs = BRAINSResize.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSResize_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSResize.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| [
"xavierislam@gmail.com"
] | xavierislam@gmail.com |
3d5c505cb30f8c8837d93f222fe86e1aeb19d869 | 62e58c051128baef9452e7e0eb0b5a83367add26 | /edifact/D01C/DOCADVD01CUN.py | 316d74fc708591564f4d9989068543c3bfebce05 | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 2,529 | py | #Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD01CUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'RFF', MIN: 1, MAX: 1},
{ID: 'BUS', MIN: 1, MAX: 1},
{ID: 'INP', MIN: 1, MAX: 10},
{ID: 'FCA', MIN: 1, MAX: 3},
{ID: 'DTM', MIN: 1, MAX: 3},
{ID: 'FTX', MIN: 0, MAX: 20},
{ID: 'FII', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 2},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'NAD', MIN: 1, MAX: 9, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 1},
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
{ID: 'DTM', MIN: 1, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 1, MAX: 1},
]},
{ID: 'MOA', MIN: 1, MAX: 5, LEVEL: [
{ID: 'ALC', MIN: 0, MAX: 1, LEVEL: [
{ID: 'PCD', MIN: 0, MAX: 2},
]},
]},
{ID: 'LOC', MIN: 1, MAX: 3, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'PAI', MIN: 1, MAX: 1, LEVEL: [
{ID: 'FII', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 1},
]},
{ID: 'PAT', MIN: 1, MAX: 5, LEVEL: [
{ID: 'FII', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
]},
{ID: 'TOD', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 1},
]},
{ID: 'TSR', MIN: 0, MAX: 1, LEVEL: [
{ID: 'LOC', MIN: 0, MAX: 5},
]},
{ID: 'INP', MIN: 0, MAX: 5, LEVEL: [
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 2},
]},
{ID: 'RFF', MIN: 1, MAX: 9, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 2},
]},
{ID: 'DOC', MIN: 1, MAX: 20, LEVEL: [
{ID: 'MOA', MIN: 0, MAX: 1},
{ID: 'PCD', MIN: 0, MAX: 1},
{ID: 'LOC', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 1},
{ID: 'ICD', MIN: 0, MAX: 20, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 9},
]},
{ID: 'ALI', MIN: 0, MAX: 9, LEVEL: [
{ID: 'NAD', MIN: 0, MAX: 3, LEVEL: [
{ID: 'CTA', MIN: 0, MAX: 1},
{ID: 'COM', MIN: 0, MAX: 5},
]},
]},
]},
{ID: 'AUT', MIN: 0, MAX: 1, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 1},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
| [
"jason.capriotti@gmail.com"
] | jason.capriotti@gmail.com |
ac63ea0619ec21a0f49f2fc1b0976f2fb087d8aa | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03567/s276827154.py | a80ce1471f8b7edb52e8a4923ffd580a19f23626 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | n=input()
c=0
for i in range(1,len(n)):
if n[i-1:i+1]=="AC":
c=1
print("Yes"if c!=0else"No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c4c1858df652ab42311df23401c4eac2e1bf7dcb | 7fc26de436ad958fc02e11fc7f7486f9ac775d0b | /services/url_lookup/project/tests/test_url.py | 717ba731587a4df19ed77f4afbd8868a2d611887 | [] | no_license | chenjienan/url_lookup_service | 633071d78598b2ee248b6a6fc3ceee2bf4ccca9b | ef10d58450af97221697ac0fa26cfb9e5a43415e | refs/heads/master | 2023-05-12T00:09:36.278356 | 2019-08-06T16:45:05 | 2019-08-06T16:45:05 | 199,910,038 | 0 | 0 | null | 2023-05-01T21:14:08 | 2019-07-31T18:36:20 | Python | UTF-8 | Python | false | false | 4,009 | py | import json
import unittest
from project.tests.base import BaseTestCase
from project import db
from project.api.models import Url
class TestUrlService(BaseTestCase):
"""Tests for the URL Lookup Service."""
def test_urls(self):
"""Ensure the /ping route behaves correctly."""
# Action
response = self.client.get('/ping')
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 200)
self.assertIn('pong!', data['message'])
self.assertIn('success', data['status'])
def test_add_url(self):
"""Ensure a new url can be added to the database."""
# Arrange
with self.client:
# Action
response = self.client.post(
'/urls',
data=json.dumps({
'url': 'google.com'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 201)
self.assertIn('google.com was added!', data['message'])
self.assertIn('success', data['status'])
def test_add_url_invalid_json(self):
"""Ensure error is thrown if the JSON object is empty."""
# Arrange
with self.client:
# Action
response = self.client.post(
'/urls',
data=json.dumps({}),
content_type='application/json',
)
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 400)
self.assertIn('Invalid payload.', data['message'])
self.assertIn('fail', data['status'])
def test_add_duplicate_url(self):
"""Ensure error is thrown if the url already exists."""
# Arrange
with self.client:
self.client.post(
'/urls',
data=json.dumps({
'url': 'amazon.com'
}),
content_type='application/json',
)
# Action
response = self.client.post(
'/urls',
data=json.dumps({
'url': 'amazon.com'
}),
content_type='application/json',
)
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 400)
self.assertIn('That url already exists.', data['message'])
self.assertIn('fail', data['status'])
def test_get_urlinfo_url_not_exist(self):
"""Ensure get URL info behaves correctly."""
# Arrange
with self.client:
# Action
response = self.client.get(f'/urlinfo/google.com:443/something.html%3Fq%3Dgo%2Blang')
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertIn('false', data['isMalware'])
def test_get_urlinfo_url_exists(self):
"""Ensure get URL info behaves correctly when url is empty."""
# Arrange
url = Url(url='abc.com')
db.session.add(url)
db.session.commit()
with self.client:
# Action
response = self.client.get(f'/urlinfo/abc.com/somepath?q=abc')
data = json.loads(response.data.decode())
# Assert
self.assertEqual(response.status_code, 200)
self.assertIn('success', data['status'])
self.assertIn('true', data['isMalware'])
def test_get_urlinfo_url_empty(self):
# Arrange
with self.client:
# Action
response = self.client.get(f'/urlinfo/')
# Assert
self.assertEqual(response.status_code, 404)
if __name__ == '__main__':
unittest.main()
| [
"chenjienan2009@gmail.com"
] | chenjienan2009@gmail.com |
9ab6311a01d824701beb7379e05276521f44673f | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/cyphon/cyphon/aggregator/filters/tests/test_services.py | ea76a8d0f809dacc5a2677be2ac5ed231cb91e30 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"GPL-3.0-only",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-other-copyleft"
] | permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 2,269 | py | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Tests Filter services.
"""
# standard library
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
# third party
from django.test import TestCase
# local
from aggregator.filters.models import Filter
from aggregator.filters.services import execute_filter_queries
from aggregator.reservoirs.models import Reservoir
from tests.fixture_manager import get_fixtures
class ExecuteFilterQueriesTestCase(TestCase):
"""
Tests the execute_filter_queries function.
"""
fixtures = get_fixtures([])
def test_execute_filter_queries(self):
"""
Tests the execute_filter_queries function.
"""
query = 'mock_query'
stream_task = 'BKGD_SRCH'
doc_ids = [3, 4, 5]
mock_results = Mock()
mock_pumproom = Mock()
mock_pumproom.get_results = Mock(return_value=mock_results)
with patch('aggregator.filters.services.PumpRoom',
return_value=mock_pumproom) as new_pumproom:
with patch('aggregator.filters.services.Reservoir.objects'):
Filter.objects.create_reservoir_query = Mock(return_value=query)
Reservoir.objects.find_enabled = Mock(return_value=doc_ids)
results = execute_filter_queries()
new_pumproom.assert_called_once_with(reservoirs=doc_ids,
task=stream_task)
mock_pumproom.get_results.assert_called_once_with(query)
self.assertEqual(results, mock_results)
| [
"a.songer@protonmail.com"
] | a.songer@protonmail.com |
f696eeeb42b422e8eabc13eea85b6dc8b527d15b | f388385e4a2eb63dda0ac2697f9065efd2bcac7e | /test/test4.py | c68535a0d167e72494184404ac28a57152c985c1 | [] | no_license | supercp3/MasterResearch | 03ebae46f8e151b919b46a862bbf132174d30db2 | a15e3c489a21ecdff77e0adb2683c7b95fca3842 | refs/heads/master | 2020-04-13T09:50:54.029493 | 2019-01-18T09:49:07 | 2019-01-18T09:49:07 | 163,122,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | import numpy as np
for i in np.arange(0,0.2,100):
print(i) | [
"13281099@bjtu.edu.cn"
] | 13281099@bjtu.edu.cn |
fbfb59163e735907eafbee626470acc4c0e48d44 | 37c3b81ad127c9e3cc26fa9168fda82460ca9bda | /SW_expert/sw_3752_가능한시험점수.py | 1d952fc7daa9160bad7302dde04267851a61397f | [] | no_license | potomatoo/TIL | 5d85b69fdaed68966db7cfe2a565b7c64ed3e816 | 395dc190fa13e5ed036e1e3c7d9e0bc2e1ee4d6c | refs/heads/master | 2021-07-08T16:19:40.410097 | 2021-04-19T02:33:40 | 2021-04-19T02:33:40 | 238,872,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import sys
sys.stdin = open('./input/input_3752.txt','r')
T = int(input())
for t in range(1, T+1):
N = int(input())
test = list(map(int, input().split()))
arr = []
for i in range(N):
arr = list(set(arr))
if not arr:
arr.append(test[i])
continue
x = len(arr)
for a in range(x):
s = arr[a] + test[i]
arr.append(s)
arr.append(test[i])
ans = len(list(set(arr)))+1
print('#{} {}'.format(t, ans)) | [
"duseh73@gmail.com"
] | duseh73@gmail.com |
b53bd82679e0afced3b977fbf6b6929fcff84246 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/databoxedge/azure-mgmt-databoxedge/azure/mgmt/databoxedge/v2020_09_01/operations/_triggers_operations.py | f6b42b941e51c27903c2638705efe229843bc299 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 21,148 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class TriggersOperations(object):
"""TriggersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.databoxedge.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_data_box_edge_device(
self,
device_name, # type: str
resource_group_name, # type: str
filter=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.TriggerList"]
"""Lists all the triggers configured in the device.
:param device_name: The device name.
:type device_name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param filter: Specify $filter='CustomContextTag eq :code:`<tag>`' to filter on custom context
tag property.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TriggerList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.databoxedge.v2020_09_01.models.TriggerList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TriggerList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_data_box_edge_device.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('TriggerList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/triggers'} # type: ignore
def get(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Trigger"
"""Get a specific trigger by name.
:param device_name: The device name.
:type device_name: str
:param name: The trigger name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Trigger, or the result of cls(response)
:rtype: ~azure.mgmt.databoxedge.v2020_09_01.models.Trigger
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Trigger"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Trigger', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/triggers/{name}'} # type: ignore
def _create_or_update_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
trigger, # type: "_models.Trigger"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Trigger"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Trigger"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(trigger, 'Trigger')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Trigger', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/triggers/{name}'} # type: ignore
def begin_create_or_update(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
trigger, # type: "_models.Trigger"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Trigger"]
"""Creates or updates a trigger.
:param device_name: Creates or updates a trigger.
:type device_name: str
:param name: The trigger name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param trigger: The trigger.
:type trigger: ~azure.mgmt.databoxedge.v2020_09_01.models.Trigger
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Trigger or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.databoxedge.v2020_09_01.models.Trigger]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Trigger"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
trigger=trigger,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Trigger', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/triggers/{name}'} # type: ignore
def _delete_initial(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/triggers/{name}'} # type: ignore
def begin_delete(
self,
device_name, # type: str
name, # type: str
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the trigger on the gateway device.
:param device_name: The device name.
:type device_name: str
:param name: The trigger name.
:type name: str
:param resource_group_name: The resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
device_name=device_name,
name=name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'deviceName': self._serialize.url("device_name", device_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/triggers/{name}'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
a44003bb5206592292825279248f7d3fb178359c | 1d7bb0175edf39a04ca665c46e80fc6da8085747 | /trash/IdealGasLaw.py | 8a3b41b715b6959d03f06c8a3bfec6f7fc89ac70 | [] | no_license | ElenaGramellini/PlayingWithCEvNS | 211d54514c0fab2358ea8bc1058fe093303c366f | fb3500c2b25bdbc3d81b12d19da8d1750989f412 | refs/heads/master | 2020-07-31T16:56:53.649085 | 2019-09-24T19:46:44 | 2019-09-24T19:46:44 | 210,683,533 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | ###############################
### Important notes ###
### the v is in mm/microsec ###
### the E is in V/cm ###
###############################
import argparse
import math
R = 8.314 # m3 Pa K-1
#m = 1000 # gr
M = 20.1797 # gr/mol
pa2Atm = 9.86923e-6
def V2P(V, T,m):
p = pa2Atm*m*R*T/(V*M)
return p
import matplotlib.pyplot as plt
import numpy as np
fig1 = plt.figure(facecolor='white')
t1 = np.arange(0.1, 10.0, 0.1)
f2 = np.vectorize(V2P)
line1 = plt.plot(t1, f2(t1,100.,1000),label="T = 100 K, m = 1 Kg, 49.5 mols",linewidth=2.0)
line2 = plt.plot(t1, f2(t1,100.,10000),label="T = 100 K, m = 10 Kg, 495 mols",linewidth=2.0)
#line3 = plt.plot(t1, f2(t1,100.,100000),label="T = 100 K, m = 100 Kg, 4950 mols",linewidth=2.0)
#line2 = plt.plot(t1, f2(t1,200.,1),label="T = 200 K, m = 1 Kg, 49.5 mols",linewidth=2.0)
#line3 = plt.plot(t1, f2(t1,300.),label="T = 300 K, m = 1 Kg, 49.5 mols",linewidth=2.0)
#line4 = plt.plot(t1, f2(t1,93.0),label="T = 93.0 K",linewidth=2.0)
plt.legend(bbox_to_anchor=(0.8, 0.5),
bbox_transform=plt.gcf().transFigure)
plt.grid(True)
plt.title('Ideal Gas Law Neon, molar Mass 20.2 g/mol')
font = {'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 30,
}
plt.text(1, 12, r'$PV = \frac{m}{M} RT$', fontdict=font)
plt.xlabel('Volume [m^3]')
plt.ylabel('Pressure [atm] ')
plt.show()
#plt.plot(t1, E2v(t1,87), 'bo')
| [
"elena.gramellini@yale.edu"
] | elena.gramellini@yale.edu |
ff701be8781c6fbba6a1c24f8f2dbb0e157d6411 | 455a501b6e7579a8d150d40645311433bf22d3c4 | /Day 17/q3.py | 20189d7217d9c34eb7311662bc29ede4156da973 | [] | no_license | Infinidrix/competitive-programming | e77e442b73590b9bf42a40832323d87f57bbbdf4 | 6cf7a9de7d076405990d497871bb2ccfe04fc6f3 | refs/heads/master | 2023-02-09T04:02:31.389806 | 2023-02-02T11:10:10 | 2023-02-02T11:10:10 | 222,917,959 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | def substring_adder(string, lookup):
index = 0
subsum = 0
for i in range(len(string)):
if string[i] in lookup:
index += 1
else:
subsum += (index)*(index+1)/2
index = 0
return int(subsum + (index) * (index + 1) / 2)
no_uses = input()
string = input()
lookup = input().split()
print(substring_adder(string, lookup)) | [
"biruksolomon11@gmail.com"
] | biruksolomon11@gmail.com |
71c0a2e9e86e5b8aff5a4085668128ef7b76a6eb | d64ff38360527cb1a1aa45ba2869a95cdf33ea52 | /src/vumi/webapp/api/urls.py | 69cb428ce821bf2cda3b388b61e7e337c4f7b611 | [] | no_license | smn/richmond | 9d3d8b3e52d89a71181300149f15116e0eec7e64 | 2593293ef5b8fbd659da12ff46c5b6aad1764add | refs/heads/master | 2020-05-20T12:36:59.670573 | 2010-11-15T20:45:26 | 2010-11-15T20:45:26 | 629,376 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from django.conf.urls.defaults import *
from piston.resource import Resource
from piston.authentication import HttpBasicAuthentication
from vumi.webapp.api import handlers
from vumi.webapp.api import views
ad = {'authentication': HttpBasicAuthentication(realm="Vumi")}
url_callback_resource = Resource(handler=handlers.URLCallbackHandler, **ad)
conversation_resource = Resource(handler=handlers.ConversationHandler, **ad)
urlpatterns = patterns('',
(r'^conversation\.yaml$', conversation_resource, {
'emitter_format': 'yaml'
}, 'conversation'),
(r'^account/callbacks\.json$', url_callback_resource, {}, 'url-callbacks-list'),
(r'^account/callbacks/(?P<callback_id>\d+)\.json$', url_callback_resource, {}, 'url-callback'),
(r'^callback\.html$', views.example_sms_callback, {}, 'sms-example-callback'),
)
# gateways
urlpatterns += patterns('',
(r'^sms/clickatell/',
include('vumi.webapp.api.gateways.clickatell.urls',
namespace='clickatell')),
(r'^sms/opera/',
include('vumi.webapp.api.gateways.opera.urls',
namespace='opera')),
(r'^sms/e-scape/',
include('vumi.webapp.api.gateways.e_scape.urls',
namespace='e-scape')),
(r'^sms/techsys/',
include('vumi.webapp.api.gateways.techsys.urls',
namespace='techsys')),
) | [
"simon@soocial.com"
] | simon@soocial.com |
422244505be179d682f30089b16d093e458be9c7 | 06e897ed3b6effc280eca3409907acc174cce0f5 | /plugins/filetime_from_git/content_adapter.py | e3a951272c66b56efff2754d9c4969e311d3d9ae | [
"AGPL-3.0-only",
"MIT"
] | permissive | JackMcKew/jackmckew.dev | ae5a32da4f1b818333ae15c6380bca1329d38f1e | b5d68070b6f15677a183424c84e30440e128e1ea | refs/heads/main | 2023-09-02T14:42:19.010294 | 2023-08-15T22:08:19 | 2023-08-15T22:08:19 | 213,264,451 | 15 | 8 | MIT | 2023-02-14T21:50:28 | 2019-10-07T00:18:15 | JavaScript | UTF-8 | Python | false | false | 2,755 | py | # -*- coding: utf-8 -*-
"""
Wraps a content object to provide some git information
"""
import logging
from pelican.utils import memoized
from .git_wrapper import git_wrapper
DEV_LOGGER = logging.getLogger(__name__)
class GitContentAdapter(object):
"""
Wraps a content object to provide some git information
"""
def __init__(self, content):
self.content = content
self.git = git_wrapper(".")
self.tz_name = content.settings.get("TIMEZONE", None)
self.follow = content.settings["GIT_HISTORY_FOLLOWS_RENAME"]
@memoized
def is_committed(self):
"""
Is committed
"""
return len(self.get_commits()) > 0
@memoized
def is_modified(self):
"""
Has content been modified since last commit
"""
return self.git.is_file_modified(self.content.source_path)
@memoized
def is_managed_by_git(self):
"""
Is content stored in a file managed by git
"""
return self.git.is_file_managed_by_git(self.content.source_path)
@memoized
def get_commits(self):
"""
Get all commits involving this filename
:returns: List of commits newest to oldest
"""
if not self.is_managed_by_git():
return []
return self.git.get_commits(self.content.source_path, self.follow)
@memoized
def get_oldest_commit(self):
"""
Get oldest commit involving this file
:returns: Oldest commit
"""
return self.git.get_commits(self.content.source_path, self.follow)[-1]
@memoized
def get_newest_commit(self):
"""
Get oldest commit involving this file
:returns: Newest commit
"""
return self.git.get_commits(self.content.source_path, follow=False)[0]
@memoized
def get_oldest_filename(self):
"""
Get the original filename of this content. Implies follow
"""
commit_and_name_iter = self.git.get_commits_and_names_iter(
self.content.source_path
)
_commit, name = next(commit_and_name_iter)
return name
@memoized
def get_oldest_commit_date(self):
"""
Get datetime of oldest commit involving this file
:returns: Datetime of oldest commit
"""
oldest_commit = self.get_oldest_commit()
return self.git.get_commit_date(oldest_commit, self.tz_name)
@memoized
def get_newest_commit_date(self):
"""
Get datetime of newest commit involving this file
:returns: Datetime of newest commit
"""
newest_commit = self.get_newest_commit()
return self.git.get_commit_date(newest_commit, self.tz_name)
| [
"jackmckew2@gmail.com"
] | jackmckew2@gmail.com |
f82fb02818c9fd23a4cf44fa31f43ad48cd5a419 | d3e6d6555b0314936902727af36de2f1b7432bf8 | /h-index/h-index.py | 96658ad52e376ae31f028b62e5323dcc366f65b1 | [] | no_license | fly2rain/LeetCode | 624b1e06e1aa3174dfb5c81834b58cc8fd7ad073 | 4ddb5a051c6e2051f016a675fd2f5d566c800c2a | refs/heads/master | 2021-01-18T03:12:22.402044 | 2015-12-28T04:31:19 | 2015-12-28T04:31:19 | 85,842,050 | 0 | 1 | null | 2017-03-22T15:05:20 | 2017-03-22T15:05:19 | null | UTF-8 | Python | false | false | 600 | py |
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
citations.sort()
h_index = 0
for i in reversed(citations):
if h_index + 1 <= i:
h_index += 1
else:
return h_index
return h_index
if __name__ == '__main__':
print Solution().hIndex([3,0,6,1,5])
print Solution().hIndex([0,0,0])
print Solution().hIndex([0,6,5])
print Solution().hIndex([1])
print Solution().hIndex([1, 1])
print Solution().hIndex([])
| [
"xuzheng1111@gmail.com"
] | xuzheng1111@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.