hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d5ab4988e7871ee69682e36554918210cbdcc91a
| 37,491
|
py
|
Python
|
oss_src/unity/python/sframe/util/cloudpickle.py
|
parquette/ParFrame
|
0522aa6afdf529b3e91505b70e918f1500aae886
|
[
"BSD-3-Clause"
] | null | null | null |
oss_src/unity/python/sframe/util/cloudpickle.py
|
parquette/ParFrame
|
0522aa6afdf529b3e91505b70e918f1500aae886
|
[
"BSD-3-Clause"
] | null | null | null |
oss_src/unity/python/sframe/util/cloudpickle.py
|
parquette/ParFrame
|
0522aa6afdf529b3e91505b70e918f1500aae886
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
"""
This class is defined to override standard pickle functionality
The goals of it follow:
-Serialize lambdas and nested functions to compiled byte code
-Deal with main module correctly
-Deal with other non-serializable objects
It does not include an unpickler, as standard python unpickling suffices.
This module was extracted from the `cloud` package, developed by `PiCloud, Inc.
<http://www.picloud.com>`_.
Copyright (c) 2012, Regents of the University of California.
Copyright (c) 2009 `PiCloud, Inc. <http://www.picloud.com>`_.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the University of California, Berkeley nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import operator
import os
import pickle
import struct
import sys
import types
from functools import partial
import itertools
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import new
import dis
import traceback
#relevant opcodes
STORE_GLOBAL = chr(dis.opname.index('STORE_GLOBAL'))
DELETE_GLOBAL = chr(dis.opname.index('DELETE_GLOBAL'))
LOAD_GLOBAL = chr(dis.opname.index('LOAD_GLOBAL'))
GLOBAL_OPS = [STORE_GLOBAL, DELETE_GLOBAL, LOAD_GLOBAL]
HAVE_ARGUMENT = chr(dis.HAVE_ARGUMENT)
EXTENDED_ARG = chr(dis.EXTENDED_ARG)
import logging
cloudLog = logging.getLogger("Cloud.Transport")
try:
import ctypes
except (MemoryError, ImportError):
logging.warning('Exception raised on importing ctypes. Likely python bug.. some functionality will be disabled', exc_info = True)
ctypes = None
PyObject_HEAD = None
else:
# for reading internal structures
PyObject_HEAD = [
('ob_refcnt', ctypes.c_size_t),
('ob_type', ctypes.c_void_p),
]
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# These helper functions were copied from PiCloud's util module.
def islambda(func):
return getattr(func,'func_name') == '<lambda>'
def xrange_params(xrangeobj):
"""Returns a 3 element tuple describing the xrange start, step, and len
respectively
Note: Only guarentees that elements of xrange are the same. parameters may
be different.
e.g. xrange(1,1) is interpretted as xrange(0,0); both behave the same
though w/ iteration
"""
xrange_len = len(xrangeobj)
if not xrange_len: #empty
return (0,1,0)
start = xrangeobj[0]
if xrange_len == 1: #one element
return start, 1, 1
return (start, xrangeobj[1] - xrangeobj[0], xrange_len)
#debug variables intended for developer use:
printSerialization = False
printMemoization = False
useForcedImports = True #Should I use forced imports for tracking?
class CloudPickler(pickle.Pickler):
dispatch = pickle.Pickler.dispatch.copy()
savedForceImports = False
savedDjangoEnv = False #hack tro transport django environment
def __init__(self, file, protocol=None, min_size_to_save= 0):
pickle.Pickler.__init__(self,file,protocol)
self.modules = set() #set of modules needed to depickle
self.globals_ref = {} # map ids to dictionary. used to ensure that functions can share global env
def dump(self, obj):
# note: not thread safe
# minimal side-effects, so not fixing
recurse_limit = 3000
base_recurse = sys.getrecursionlimit()
if base_recurse < recurse_limit:
sys.setrecursionlimit(recurse_limit)
self.inject_addons()
try:
return pickle.Pickler.dump(self, obj)
except RuntimeError, e:
if 'recursion' in e.args[0]:
msg = """Could not pickle object as excessively deep recursion required.
Try _fast_serialization=2 or contact PiCloud support"""
raise pickle.PicklingError(msg)
finally:
new_recurse = sys.getrecursionlimit()
if new_recurse == recurse_limit:
sys.setrecursionlimit(base_recurse)
def save_buffer(self, obj):
"""Fallback to save_string"""
pickle.Pickler.save_string(self,str(obj))
dispatch[buffer] = save_buffer
#block broken objects
def save_unsupported(self, obj, pack=None):
raise pickle.PicklingError("Cannot pickle objects of type %s" % type(obj))
dispatch[types.GeneratorType] = save_unsupported
#python2.6+ supports slice pickling. some py2.5 extensions might as well. We just test it
try:
slice(0,1).__reduce__()
except TypeError: #can't pickle -
dispatch[slice] = save_unsupported
#itertools objects do not pickle!
for v in itertools.__dict__.values():
if type(v) is type:
dispatch[v] = save_unsupported
def save_dict(self, obj):
"""hack fix
If the dict is a global, deal with it in a special way
"""
#print 'saving', obj
if obj is __builtins__:
self.save_reduce(_get_module_builtins, (), obj=obj)
else:
pickle.Pickler.save_dict(self, obj)
dispatch[pickle.DictionaryType] = save_dict
def save_module(self, obj, pack=struct.pack):
"""
Save a module as an import
"""
#print 'try save import', obj.__name__
self.modules.add(obj)
self.save_reduce(subimport,(obj.__name__,), obj=obj)
dispatch[types.ModuleType] = save_module #new type
def save_codeobject(self, obj, pack=struct.pack):
"""
Save a code object
"""
#print 'try to save codeobj: ', obj
args = (
obj.co_argcount, obj.co_nlocals, obj.co_stacksize, obj.co_flags, obj.co_code,
obj.co_consts, obj.co_names, obj.co_varnames, obj.co_filename, obj.co_name,
obj.co_firstlineno, obj.co_lnotab, obj.co_freevars, obj.co_cellvars
)
self.save_reduce(types.CodeType, args, obj=obj)
dispatch[types.CodeType] = save_codeobject #new type
def save_function(self, obj, name=None, pack=struct.pack):
""" Registered with the dispatch to handle all function types.
Determines what kind of function obj is (e.g. lambda, defined at
interactive prompt, etc) and handles the pickling appropriately.
"""
write = self.write
name = obj.__name__
try:
modname = pickle.whichmodule(obj, name)
#print 'which gives %s %s %s' % (modname, obj, name)
themodule = sys.modules[modname]
except ImportError as e:
# package 'six' put arbitray MovedModules in sys.modules which
# could cause ImportError, we just ignore that for now
logging.warning('Ignored import exception while resolving object "%s", exception message: %s' % (name, e.message))
modname = '__main__'
except KeyError: # eval'd items such as namedtuple give invalid items for their function __module__
modname = '__main__'
if modname == '__main__':
themodule = None
if themodule:
self.modules.add(themodule)
if not self.savedDjangoEnv:
#hack for django - if we detect the settings module, we transport it
django_settings = os.environ.get('DJANGO_SETTINGS_MODULE', '')
if django_settings:
django_mod = sys.modules.get(django_settings)
if django_mod:
cloudLog.debug('Transporting django settings %s during save of %s', django_mod, name)
self.savedDjangoEnv = True
self.modules.add(django_mod)
write(pickle.MARK)
self.save_reduce(django_settings_load, (django_mod.__name__,), obj=django_mod)
write(pickle.POP_MARK)
# if func is lambda, def'ed at prompt, is in main, or is nested, then
# we'll pickle the actual function object rather than simply saving a
# reference (as is done in default pickler), via save_function_tuple.
if islambda(obj) or obj.func_code.co_filename == '<stdin>' or themodule == None:
#Force server to import modules that have been imported in main
modList = None
if themodule == None and not self.savedForceImports:
mainmod = sys.modules['__main__']
if useForcedImports and hasattr(mainmod,'___pyc_forcedImports__'):
modList = list(mainmod.___pyc_forcedImports__)
self.savedForceImports = True
self.save_function_tuple(obj, modList)
return
else: # func is nested
klass = getattr(themodule, name, None)
if klass is None or klass is not obj:
self.save_function_tuple(obj, [themodule])
return
if obj.__dict__:
# essentially save_reduce, but workaround needed to avoid recursion
self.save(_restore_attr)
write(pickle.MARK + pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
self.save(obj.__dict__)
write(pickle.TUPLE + pickle.REDUCE)
else:
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
dispatch[types.FunctionType] = save_function
def save_function_tuple(self, func, forced_imports):
""" Pickles an actual func object.
A func comprises: code, globals, defaults, closure, and dict. We
extract and save these, injecting reducing functions at certain points
to recreate the func object. Keep in mind that some of these pieces
can contain a ref to the func itself. Thus, a naive save on these
pieces could trigger an infinite loop of save's. To get around that,
we first create a skeleton func object using just the code (this is
safe, since this won't contain a ref to the func), and memoize it as
soon as it's created. The other stuff can then be filled in later.
"""
save = self.save
write = self.write
# save the modules (if any)
if forced_imports:
write(pickle.MARK)
save(_modules_to_main)
#print 'forced imports are', forced_imports
forced_names = map(lambda m: m.__name__, forced_imports)
save((forced_names,))
#save((forced_imports,))
write(pickle.REDUCE)
write(pickle.POP_MARK)
code, f_globals, defaults, closure, dct, base_globals = self.extract_func_data(func)
save(_fill_function) # skeleton function updater
write(pickle.MARK) # beginning of tuple that _fill_function expects
# create a skeleton function object and memoize it
save(_make_skel_func)
save((code, len(closure), base_globals))
write(pickle.REDUCE)
self.memoize(func)
# save the rest of the func data needed by _fill_function
save(f_globals)
save(defaults)
save(closure)
save(dct)
write(pickle.TUPLE)
write(pickle.REDUCE) # applies _fill_function on the tuple
@staticmethod
def extract_code_globals(co):
"""
Find all globals names read or written to by codeblock co
"""
code = co.co_code
names = co.co_names
out_names = set()
n = len(code)
i = 0
extended_arg = 0
while i < n:
op = code[i]
i = i+1
if op >= HAVE_ARGUMENT:
oparg = ord(code[i]) + ord(code[i+1])*256 + extended_arg
extended_arg = 0
i = i+2
if op == EXTENDED_ARG:
extended_arg = oparg*65536L
if op in GLOBAL_OPS:
out_names.add(names[oparg])
#print 'extracted', out_names, ' from ', names
return out_names
def extract_func_data(self, func):
"""
Turn the function into a tuple of data necessary to recreate it:
code, globals, defaults, closure, dict
"""
code = func.func_code
# extract all global ref's
func_global_refs = CloudPickler.extract_code_globals(code)
if code.co_consts: # see if nested function have any global refs
for const in code.co_consts:
if type(const) is types.CodeType and const.co_names:
func_global_refs = func_global_refs.union( CloudPickler.extract_code_globals(const))
# process all variables referenced by global environment
f_globals = {}
for var in func_global_refs:
#Some names, such as class functions are not global - we don't need them
if func.func_globals.has_key(var):
f_globals[var] = func.func_globals[var]
# defaults requires no processing
defaults = func.func_defaults
def get_contents(cell):
try:
return cell.cell_contents
except ValueError, e: #cell is empty error on not yet assigned
raise pickle.PicklingError('Function to be pickled has free variables that are referenced before assignment in enclosing scope')
# process closure
if func.func_closure:
closure = map(get_contents, func.func_closure)
else:
closure = []
# save the dict
dct = func.func_dict
if printSerialization:
outvars = ['code: ' + str(code) ]
outvars.append('globals: ' + str(f_globals))
outvars.append('defaults: ' + str(defaults))
outvars.append('closure: ' + str(closure))
print 'function ', func, 'is extracted to: ', ', '.join(outvars)
base_globals = self.globals_ref.get(id(func.func_globals), {})
self.globals_ref[id(func.func_globals)] = base_globals
return (code, f_globals, defaults, closure, dct, base_globals)
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
modname = getattr(obj, "__module__", None)
if modname is None:
modname = pickle.whichmodule(obj, name)
try:
__import__(modname)
themodule = sys.modules[modname]
except (ImportError, KeyError, AttributeError): #should never occur
raise pickle.PicklingError(
"Can't pickle %r: Module %s cannot be found" %
(obj, modname))
if modname == '__main__':
themodule = None
if themodule:
self.modules.add(themodule)
sendRef = True
typ = type(obj)
#print 'saving', obj, typ
try:
try: #Deal with case when getattribute fails with exceptions
klass = getattr(themodule, name)
except (AttributeError):
if modname == '__builtin__': #new.* are misrepeported
modname = 'new'
__import__(modname)
themodule = sys.modules[modname]
try:
klass = getattr(themodule, name)
except AttributeError, a:
#print themodule, name, obj, type(obj)
raise pickle.PicklingError("Can't pickle builtin %s" % obj)
else:
raise
except (ImportError, KeyError, AttributeError):
if typ == types.TypeType or typ == types.ClassType:
sendRef = False
else: #we can't deal with this
raise
else:
if klass is not obj and (typ == types.TypeType or typ == types.ClassType):
sendRef = False
if not sendRef:
#note: Third party types might crash this - add better checks!
d = dict(obj.__dict__) #copy dict proxy to a dict
if not isinstance(d.get('__dict__', None), property): # don't extract dict that are properties
d.pop('__dict__',None)
d.pop('__weakref__',None)
# hack as __new__ is stored differently in the __dict__
new_override = d.get('__new__', None)
if new_override:
d['__new__'] = obj.__new__
self.save_reduce(type(obj),(obj.__name__,obj.__bases__,
d),obj=obj)
#print 'internal reduce dask %s %s' % (obj, d)
return
if self.proto >= 2:
code = _extension_registry.get((modname, name))
if code:
assert code > 0
if code <= 0xff:
write(pickle.EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (pickle.EXT2, code&0xff, code>>8))
else:
write(pickle.EXT4 + pack("<i", code))
return
write(pickle.GLOBAL + modname + '\n' + name + '\n')
self.memoize(obj)
dispatch[types.ClassType] = save_global
dispatch[types.BuiltinFunctionType] = save_global
dispatch[types.TypeType] = save_global
def save_instancemethod(self, obj):
#Memoization rarely is ever useful due to python bounding
self.save_reduce(types.MethodType, (obj.im_func, obj.im_self,obj.im_class), obj=obj)
dispatch[types.MethodType] = save_instancemethod
def save_inst_logic(self, obj):
"""Inner logic to save instance. Based off pickle.save_inst
Supports __transient__"""
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
pickle._keep_alive(args, memo)
else:
args = ()
write(pickle.MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(pickle.OBJ)
else:
for arg in args:
save(arg)
write(pickle.INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
#remove items if transient
if hasattr(obj, '__transient__'):
transient = obj.__transient__
stuff = stuff.copy()
for k in list(stuff.keys()):
if k in transient:
del stuff[k]
else:
stuff = getstate()
pickle._keep_alive(stuff, memo)
save(stuff)
write(pickle.BUILD)
def save_inst(self, obj):
# Hack to detect PIL Image instances without importing Imaging
# PIL can be loaded with multiple names, so we don't check sys.modules for it
if hasattr(obj,'im') and hasattr(obj,'palette') and 'Image' in obj.__module__:
self.save_image(obj)
else:
self.save_inst_logic(obj)
dispatch[types.InstanceType] = save_inst
def save_property(self, obj):
# properties not correctly saved in python
self.save_reduce(property, (obj.fget, obj.fset, obj.fdel, obj.__doc__), obj=obj)
dispatch[property] = save_property
def save_itemgetter(self, obj):
"""itemgetter serializer (needed for namedtuple support)
a bit of a pain as we need to read ctypes internals"""
class ItemGetterType(ctypes.Structure):
_fields_ = PyObject_HEAD + [
('nitems', ctypes.c_size_t),
('item', ctypes.py_object)
]
itemgetter_obj = ctypes.cast(ctypes.c_void_p(id(obj)), ctypes.POINTER(ItemGetterType)).contents
return self.save_reduce(operator.itemgetter, (itemgetter_obj.item,))
if PyObject_HEAD:
dispatch[operator.itemgetter] = save_itemgetter
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
"""Modified to support __transient__ on new objects
Change only affects protocol level 2 (which is always used by PiCloud"""
# Assert that args is a tuple or None
if not isinstance(args, types.TupleType):
raise pickle.PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise pickle.PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
#Added fix to allow transient
cls = args[0]
if not hasattr(cls, "__new__"):
raise pickle.PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise pickle.PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
#Don't pickle transient entries
if hasattr(obj, '__transient__'):
transient = obj.__transient__
state = state.copy()
for k in list(state.keys()):
if k in transient:
del state[k]
save(args)
write(pickle.NEWOBJ)
else:
save(func)
save(args)
write(pickle.REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
#print 'obj %s has state %s' % (obj, state)
save(state)
write(pickle.BUILD)
def save_xrange(self, obj):
"""Save an xrange object in python 2.5
Python 2.6 supports this natively
"""
range_params = xrange_params(obj)
self.save_reduce(_build_xrange,range_params)
#python2.6+ supports xrange pickling. some py2.5 extensions might as well. We just test it
try:
xrange(0).__reduce__()
except TypeError: #can't pickle -- use PiCloud pickler
dispatch[xrange] = save_xrange
def save_partial(self, obj):
"""Partial objects do not serialize correctly in python2.x -- this fixes the bugs"""
self.save_reduce(_genpartial, (obj.func, obj.args, obj.keywords))
if sys.version_info < (2,7): #2.7 supports partial pickling
dispatch[partial] = save_partial
def save_file(self, obj):
"""Save a file"""
import StringIO as pystringIO #we can't use cStringIO as it lacks the name attribute
from ..transport.adapter import SerializingAdapter
if not hasattr(obj, 'name') or not hasattr(obj, 'mode'):
raise pickle.PicklingError("Cannot pickle files that do not map to an actual file")
if obj.name == '<stdout>':
return self.save_reduce(getattr, (sys,'stdout'), obj=obj)
if obj.name == '<stderr>':
return self.save_reduce(getattr, (sys,'stderr'), obj=obj)
if obj.name == '<stdin>':
raise pickle.PicklingError("Cannot pickle standard input")
if hasattr(obj, 'isatty') and obj.isatty():
raise pickle.PicklingError("Cannot pickle files that map to tty objects")
if 'r' not in obj.mode:
raise pickle.PicklingError("Cannot pickle files that are not opened for reading")
name = obj.name
try:
fsize = os.stat(name).st_size
except OSError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be stat" % name)
if obj.closed:
#create an empty closed string io
retval = pystringIO.StringIO("")
retval.close()
elif not fsize: #empty file
retval = pystringIO.StringIO("")
try:
tmpfile = file(name)
tst = tmpfile.read(1)
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
tmpfile.close()
if tst != '':
raise pickle.PicklingError("Cannot pickle file %s as it does not appear to map to a physical, real file" % name)
elif fsize > SerializingAdapter.max_transmit_data:
raise pickle.PicklingError("Cannot pickle file %s as it exceeds cloudconf.py's max_transmit_data of %d" %
(name,SerializingAdapter.max_transmit_data))
else:
try:
tmpfile = file(name)
contents = tmpfile.read(SerializingAdapter.max_transmit_data)
tmpfile.close()
except IOError:
raise pickle.PicklingError("Cannot pickle file %s as it cannot be read" % name)
retval = pystringIO.StringIO(contents)
curloc = obj.tell()
retval.seek(curloc)
retval.name = name
self.save(retval) #save stringIO
self.memoize(obj)
dispatch[file] = save_file
"""Special functions for Add-on libraries"""
def inject_numpy(self):
numpy = sys.modules.get('numpy')
if not numpy or not hasattr(numpy, 'ufunc'):
return
self.dispatch[numpy.ufunc] = self.__class__.save_ufunc
numpy_tst_mods = ['numpy', 'scipy.special']
def save_ufunc(self, obj):
"""Hack function for saving numpy ufunc objects"""
name = obj.__name__
for tst_mod_name in self.numpy_tst_mods:
tst_mod = sys.modules.get(tst_mod_name, None)
if tst_mod:
if name in tst_mod.__dict__:
self.save_reduce(_getobject, (tst_mod_name, name))
return
raise pickle.PicklingError('cannot save %s. Cannot resolve what module it is defined in' % str(obj))
def inject_timeseries(self):
"""Handle bugs with pickling scikits timeseries"""
tseries = sys.modules.get('scikits.timeseries.tseries')
if not tseries or not hasattr(tseries, 'Timeseries'):
return
self.dispatch[tseries.Timeseries] = self.__class__.save_timeseries
def save_timeseries(self, obj):
import scikits.timeseries.tseries as ts
func, reduce_args, state = obj.__reduce__()
if func != ts._tsreconstruct:
raise pickle.PicklingError('timeseries using unexpected reconstruction function %s' % str(func))
state = (1,
obj.shape,
obj.dtype,
obj.flags.fnc,
obj._data.tostring(),
ts.getmaskarray(obj).tostring(),
obj._fill_value,
obj._dates.shape,
obj._dates.__array__().tostring(),
obj._dates.dtype, #added -- preserve type
obj.freq,
obj._optinfo,
)
return self.save_reduce(_genTimeSeries, (reduce_args, state))
def inject_email(self):
"""Block email LazyImporters from being saved"""
email = sys.modules.get('email')
if not email:
return
self.dispatch[email.LazyImporter] = self.__class__.save_unsupported
def inject_unity_proxy(self):
# get the top level module
gl = __import__(__name__.split('.')[0])
if not gl:
return
## Make sure the unity objects are not picklable ##
self.dispatch[gl.SArray] = self.__class__.save_unsupported
self.dispatch[gl.SFrame] = self.__class__.save_unsupported
self.dispatch[gl.SGraph] = self.__class__.save_unsupported
self.dispatch[gl.Graph] = self.__class__.save_unsupported
self.dispatch[gl.Sketch] = self.__class__.save_unsupported
self.dispatch[gl.Model] = self.__class__.save_unsupported
## Make sure the underlying cython objects are not picklable ##
self.dispatch[gl.cython.cy_sarray.UnitySArrayProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_sframe.UnitySFrameProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_sketch.UnitySketchProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_graph.UnityGraphProxy] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_model.UnityModel] = self.__class__.save_unsupported
self.dispatch[gl.cython.cy_ipc.PyCommClient] = self.__class__.save_unsupported
def inject_addons(self):
"""Plug in system. Register additional pickling functions if modules already loaded"""
self.inject_numpy()
self.inject_timeseries()
self.inject_email()
self.inject_unity_proxy()
"""Python Imaging Library"""
def save_image(self, obj):
if not obj.im and obj.fp and 'r' in obj.fp.mode and obj.fp.name \
and not obj.fp.closed and (not hasattr(obj, 'isatty') or not obj.isatty()):
#if image not loaded yet -- lazy load
self.save_reduce(_lazyloadImage,(obj.fp,), obj=obj)
else:
#image is loaded - just transmit it over
self.save_reduce(_generateImage, (obj.size, obj.mode, obj.tostring()), obj=obj)
def memoize(self, obj):
if not id(obj) in self.memo:
pickle.Pickler.memoize(self, obj)
if printMemoization:
print 'memoizing ' + str(obj)
# Shorthands for legacy support
def dump(obj, file, protocol=2):
CloudPickler(file, protocol).dump(obj)
def dumps(obj, protocol=2):
file = StringIO()
cp = CloudPickler(file,protocol)
cp.dump(obj)
#print 'cloud dumped', str(obj), str(cp.modules)
return file.getvalue()
#hack for __import__ not working as desired
def subimport(name):
__import__(name)
return sys.modules[name]
#hack to load django settings:
def django_settings_load(name):
modified_env = False
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = name # must set name first due to circular deps
modified_env = True
try:
module = subimport(name)
except Exception, i:
print >> sys.stderr, 'Cloud not import django settings %s:' % (name)
print_exec(sys.stderr)
if modified_env:
del os.environ['DJANGO_SETTINGS_MODULE']
else:
#add project directory to sys,path:
if hasattr(module,'__file__'):
dirname = os.path.split(module.__file__)[0] + '/'
sys.path.append(dirname)
# restores function attributes
def _restore_attr(obj, attr):
for key, val in attr.items():
setattr(obj, key, val)
return obj
def _get_module_builtins():
return pickle.__builtins__
def print_exec(stream):
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
def _modules_to_main(modList):
"""Force every module in modList to be placed into main"""
if not modList:
return
main = sys.modules['__main__']
for modname in modList:
if type(modname) is str:
try:
mod = __import__(modname)
except Exception, i: #catch all...
sys.stderr.write('warning: could not import %s\n. Your function may unexpectedly error due to this import failing; \
A version mismatch is likely. Specific error was:\n' % modname)
print_exec(sys.stderr)
else:
setattr(main,mod.__name__, mod)
else:
#REVERSE COMPATIBILITY FOR CLOUD CLIENT 1.5 (WITH EPD)
#In old version actual module was sent
setattr(main,modname.__name__, modname)
#object generators:
def _build_xrange(start, step, len):
"""Built xrange explicitly"""
return xrange(start, start + step*len, step)
def _genpartial(func, args, kwds):
if not args:
args = ()
if not kwds:
kwds = {}
return partial(func, *args, **kwds)
def _fill_function(func, globals, defaults, closure, dict):
""" Fills in the rest of function data into the skeleton function object
that were created via _make_skel_func().
"""
func.func_globals.update(globals)
func.func_defaults = defaults
func.func_dict = dict
if len(closure) != len(func.func_closure):
raise pickle.UnpicklingError("closure lengths don't match up")
for i in range(len(closure)):
_change_cell_value(func.func_closure[i], closure[i])
return func
def _make_skel_func(code, num_closures, base_globals = None):
""" Creates a skeleton function object that contains just the provided
code and the correct number of cells in func_closure. All other
func attributes (e.g. func_globals) are empty.
"""
#build closure (cells):
if not ctypes:
raise Exception('ctypes failed to import; cannot build function')
cellnew = ctypes.pythonapi.PyCell_New
cellnew.restype = ctypes.py_object
cellnew.argtypes = (ctypes.py_object,)
dummy_closure = tuple(map(lambda i: cellnew(None), range(num_closures)))
if base_globals is None:
base_globals = {}
base_globals['__builtins__'] = __builtins__
return types.FunctionType(code, base_globals,
None, None, dummy_closure)
# this piece of opaque code is needed below to modify 'cell' contents
cell_changer_code = new.code(
1, 1, 2, 0,
''.join([
chr(dis.opmap['LOAD_FAST']), '\x00\x00',
chr(dis.opmap['DUP_TOP']),
chr(dis.opmap['STORE_DEREF']), '\x00\x00',
chr(dis.opmap['RETURN_VALUE'])
]),
(), (), ('newval',), '<nowhere>', 'cell_changer', 1, '', ('c',), ()
)
def _change_cell_value(cell, newval):
""" Changes the contents of 'cell' object to newval """
return new.function(cell_changer_code, {}, None, (), (cell,))(newval)
"""Constructors for 3rd party libraries
Note: These can never be renamed due to client compatibility issues"""
def _getobject(modname, attribute):
mod = __import__(modname)
return mod.__dict__[attribute]
def _generateImage(size, mode, str_rep):
"""Generate image from string representation"""
import Image
i = Image.new(mode, size)
i.fromstring(str_rep)
return i
def _lazyloadImage(fp):
import Image
fp.seek(0) #works in almost any case
return Image.open(fp)
"""Timeseries"""
def _genTimeSeries(reduce_args, state):
import scikits.timeseries.tseries as ts
from numpy import ndarray
from numpy.ma import MaskedArray
time_series = ts._tsreconstruct(*reduce_args)
#from setstate modified
(ver, shp, typ, isf, raw, msk, flv, dsh, dtm, dtyp, frq, infodict) = state
#print 'regenerating %s' % dtyp
MaskedArray.__setstate__(time_series, (ver, shp, typ, isf, raw, msk, flv))
_dates = time_series._dates
#_dates.__setstate__((ver, dsh, typ, isf, dtm, frq)) #use remote typ
ndarray.__setstate__(_dates,(dsh,dtyp, isf, dtm))
_dates.freq = frq
_dates._cachedinfo.update(dict(full=None, hasdups=None, steps=None,
toobj=None, toord=None, tostr=None))
# Update the _optinfo dictionary
time_series._optinfo.update(infodict)
return time_series
| 37.193452
| 144
| 0.619055
|
30e0368287de7918f586e68038890e4315f548b4
| 250
|
py
|
Python
|
students/K33402/Beresnev_Andrey/practical2.3/django_project_beresnev/project_first_app/forms.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 4
|
2020-09-03T15:41:42.000Z
|
2021-12-24T15:28:20.000Z
|
students/K33402/Beresnev_Andrey/practical2.3/django_project_beresnev/project_first_app/forms.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 48
|
2020-09-13T20:22:42.000Z
|
2021-04-30T11:13:30.000Z
|
students/K33402/Beresnev_Andrey/practical2.3/django_project_beresnev/project_first_app/forms.py
|
agentofknowledge/ITMO_ICT_WebDevelopment_2020-2021
|
7d5eab0d68af378083f21473cbbd5e5def6aa60a
|
[
"MIT"
] | 69
|
2020-09-06T10:32:37.000Z
|
2021-11-28T18:13:17.000Z
|
from django import forms
from .models import Car, Owner
class AddCarForm(forms.ModelForm):
class Meta:
model = Car
fields = [
"brand",
"model",
"color",
"plate_number"
]
| 15.625
| 34
| 0.496
|
f307a8a9ba0dee1a00e2a362e176a0730d62ae4a
| 955
|
py
|
Python
|
architect.py
|
HankKung/DistNAS
|
10eb040a54f5ee04b59a9293480890992923a4f6
|
[
"Apache-2.0"
] | 11
|
2019-08-12T07:47:03.000Z
|
2021-06-07T06:14:49.000Z
|
architect.py
|
HankKung/DistNAS
|
10eb040a54f5ee04b59a9293480890992923a4f6
|
[
"Apache-2.0"
] | 1
|
2020-01-30T09:50:15.000Z
|
2020-01-31T00:51:37.000Z
|
architect.py
|
HankKung/DistNAS
|
10eb040a54f5ee04b59a9293480890992923a4f6
|
[
"Apache-2.0"
] | 2
|
2019-08-12T08:54:36.000Z
|
2020-02-29T11:47:44.000Z
|
import torch
import numpy as np
import torch.nn as nn
class Architect () :
def __init__(self, model, args):
self.network_momentum = args.momentum
self.network_weight_decay = args.weight_decay
self.model = model
self.optimizer = torch.optim.Adam(self.model.arch_parameters(),
lr=args.arch_lr, betas=(0.9, 0.999), weight_decay=args.arch_weight_decay)
def step (self, input_valid, target_valid) :
# self.model.soft_parameters()
# print(self.model.arch_parameters()[0][:2])
# print(self.model.arch_parameters()[1][:2])
# print(self.model.arch_parameters()[2])
self.optimizer.zero_grad ()
self._backward_step(input_valid, target_valid)
self.optimizer.step()
def _backward_step (self, input_valid, target_valid) :
logit, device_logit, loss, _ , _, _= self.model._loss (input_valid, target_valid)
loss.backward ()
| 31.833333
| 89
| 0.651309
|
1b31ded4c61f4c9c7ef4942289ff6ca6ecc766db
| 8,183
|
py
|
Python
|
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_MPLS_LSR_EXT_STD_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_MPLS_LSR_EXT_STD_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_MPLS_LSR_EXT_STD_MIB.py
|
Maikor/ydk-py
|
b86c4a7c570ae3b2c5557d098420446df5de4929
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" CISCO_MPLS_LSR_EXT_STD_MIB
Copyright (c) 2012 IETF Trust and the persons identified
as the document authors. All rights reserved.
This MIB module contains generic object definitions for
MPLS LSR in transport networks.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class CISCOMPLSLSREXTSTDMIB(Entity):
"""
.. attribute:: cmplsxcexttable
This table sparse augments the mplsXCTable of MPLS\-LSR\-STD\-MIB [RFC3813] to provide MPLS\-TP specific information about associated tunnel information
**type**\: :py:class:`CmplsXCExtTable <ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB.CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable>`
"""
_prefix = 'CISCO-MPLS-LSR-EXT-STD-MIB'
_revision = '2012-04-30'
def __init__(self):
super(CISCOMPLSLSREXTSTDMIB, self).__init__()
self._top_entity = None
self.yang_name = "CISCO-MPLS-LSR-EXT-STD-MIB"
self.yang_parent_name = "CISCO-MPLS-LSR-EXT-STD-MIB"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cmplsXCExtTable", ("cmplsxcexttable", CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable))])
self._leafs = OrderedDict()
self.cmplsxcexttable = CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable()
self.cmplsxcexttable.parent = self
self._children_name_map["cmplsxcexttable"] = "cmplsXCExtTable"
self._segment_path = lambda: "CISCO-MPLS-LSR-EXT-STD-MIB:CISCO-MPLS-LSR-EXT-STD-MIB"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOMPLSLSREXTSTDMIB, [], name, value)
class CmplsXCExtTable(Entity):
"""
This table sparse augments the mplsXCTable of
MPLS\-LSR\-STD\-MIB [RFC3813] to provide MPLS\-TP specific
information about associated tunnel information
.. attribute:: cmplsxcextentry
An entry in this table extends the cross connect information represented by an entry in the mplsXCTable in MPLS\-LSR\-STD\-MIB [RFC3813] through a sparse augmentation. An entry can be created by a network administrator via SNMP SET commands, or in response to signaling protocol events
**type**\: list of :py:class:`CmplsXCExtEntry <ydk.models.cisco_ios_xe.CISCO_MPLS_LSR_EXT_STD_MIB.CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable.CmplsXCExtEntry>`
"""
_prefix = 'CISCO-MPLS-LSR-EXT-STD-MIB'
_revision = '2012-04-30'
def __init__(self):
super(CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable, self).__init__()
self.yang_name = "cmplsXCExtTable"
self.yang_parent_name = "CISCO-MPLS-LSR-EXT-STD-MIB"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_classes = OrderedDict([("cmplsXCExtEntry", ("cmplsxcextentry", CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable.CmplsXCExtEntry))])
self._leafs = OrderedDict()
self.cmplsxcextentry = YList(self)
self._segment_path = lambda: "cmplsXCExtTable"
self._absolute_path = lambda: "CISCO-MPLS-LSR-EXT-STD-MIB:CISCO-MPLS-LSR-EXT-STD-MIB/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable, [], name, value)
class CmplsXCExtEntry(Entity):
"""
An entry in this table extends the cross connect
information represented by an entry in
the mplsXCTable in MPLS\-LSR\-STD\-MIB [RFC3813] through
a sparse augmentation. An entry can be created by
a network administrator via SNMP SET commands, or in
response to signaling protocol events.
.. attribute:: mplsxcindex (key)
**type**\: str
**length:** 1..24
**refers to**\: :py:class:`mplsxcindex <ydk.models.cisco_ios_xe.MPLS_LSR_STD_MIB.MPLSLSRSTDMIB.MplsXCTable.MplsXCEntry>`
.. attribute:: mplsxcinsegmentindex (key)
**type**\: str
**length:** 1..24
**refers to**\: :py:class:`mplsxcinsegmentindex <ydk.models.cisco_ios_xe.MPLS_LSR_STD_MIB.MPLSLSRSTDMIB.MplsXCTable.MplsXCEntry>`
.. attribute:: mplsxcoutsegmentindex (key)
**type**\: str
**length:** 1..24
**refers to**\: :py:class:`mplsxcoutsegmentindex <ydk.models.cisco_ios_xe.MPLS_LSR_STD_MIB.MPLSLSRSTDMIB.MplsXCTable.MplsXCEntry>`
.. attribute:: cmplsxcexttunnelpointer
This object indicates the back pointer to the tunnel entry segment. This object cannot be modified if mplsXCRowStatus for the corresponding entry in the mplsXCTable is active(1)
**type**\: str
**pattern:** (([0\-1](\\.[1\-3]?[0\-9]))\|(2\\.(0\|([1\-9]\\d\*))))(\\.(0\|([1\-9]\\d\*)))\*
.. attribute:: cmplsxcoppositedirxcptr
This object indicates the pointer to the opposite direction XC entry. This object cannot be modified if mplsXCRowStatus for the corresponding entry in the mplsXCTable is active(1)
**type**\: str
**pattern:** (([0\-1](\\.[1\-3]?[0\-9]))\|(2\\.(0\|([1\-9]\\d\*))))(\\.(0\|([1\-9]\\d\*)))\*
"""
_prefix = 'CISCO-MPLS-LSR-EXT-STD-MIB'
_revision = '2012-04-30'
def __init__(self):
super(CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable.CmplsXCExtEntry, self).__init__()
self.yang_name = "cmplsXCExtEntry"
self.yang_parent_name = "cmplsXCExtTable"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['mplsxcindex','mplsxcinsegmentindex','mplsxcoutsegmentindex']
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('mplsxcindex', (YLeaf(YType.str, 'mplsXCIndex'), ['str'])),
('mplsxcinsegmentindex', (YLeaf(YType.str, 'mplsXCInSegmentIndex'), ['str'])),
('mplsxcoutsegmentindex', (YLeaf(YType.str, 'mplsXCOutSegmentIndex'), ['str'])),
('cmplsxcexttunnelpointer', (YLeaf(YType.str, 'cmplsXCExtTunnelPointer'), ['str'])),
('cmplsxcoppositedirxcptr', (YLeaf(YType.str, 'cmplsXCOppositeDirXCPtr'), ['str'])),
])
self.mplsxcindex = None
self.mplsxcinsegmentindex = None
self.mplsxcoutsegmentindex = None
self.cmplsxcexttunnelpointer = None
self.cmplsxcoppositedirxcptr = None
self._segment_path = lambda: "cmplsXCExtEntry" + "[mplsXCIndex='" + str(self.mplsxcindex) + "']" + "[mplsXCInSegmentIndex='" + str(self.mplsxcinsegmentindex) + "']" + "[mplsXCOutSegmentIndex='" + str(self.mplsxcoutsegmentindex) + "']"
self._absolute_path = lambda: "CISCO-MPLS-LSR-EXT-STD-MIB:CISCO-MPLS-LSR-EXT-STD-MIB/cmplsXCExtTable/%s" % self._segment_path()
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(CISCOMPLSLSREXTSTDMIB.CmplsXCExtTable.CmplsXCExtEntry, ['mplsxcindex', 'mplsxcinsegmentindex', 'mplsxcoutsegmentindex', 'cmplsxcexttunnelpointer', 'cmplsxcoppositedirxcptr'], name, value)
def clone_ptr(self):
self._top_entity = CISCOMPLSLSREXTSTDMIB()
return self._top_entity
| 43.759358
| 295
| 0.618111
|
d68bdec232d69055000ccd0752df89bb8016b49d
| 284
|
py
|
Python
|
Lista_Introducao/Lista 1/Ex9.py
|
beatrizdaddea/Ex.Python
|
052cfa4f31c90d83d1904f8c63abfdbe8181177c
|
[
"MIT"
] | null | null | null |
Lista_Introducao/Lista 1/Ex9.py
|
beatrizdaddea/Ex.Python
|
052cfa4f31c90d83d1904f8c63abfdbe8181177c
|
[
"MIT"
] | null | null | null |
Lista_Introducao/Lista 1/Ex9.py
|
beatrizdaddea/Ex.Python
|
052cfa4f31c90d83d1904f8c63abfdbe8181177c
|
[
"MIT"
] | null | null | null |
"""9.Escreva uma expressão para determinar se uma pessoa deve ou não pagar imposto. Considere que pagam imposto pessoas cujo salário é maior que R$ 1.200,00"""
salario = float(input("Digite seu salário: "))
pagar_imposto = salario > 1200
print("Vai pagar imporsto: ", pagar_imposto)
| 47.333333
| 159
| 0.757042
|
268acb15a53985085e8d1c12f2c24bcc829820b3
| 1,373
|
py
|
Python
|
assignment_fourteen_2021_10_21/student.py
|
Soyvolon/CISS_380
|
81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08
|
[
"Unlicense"
] | null | null | null |
assignment_fourteen_2021_10_21/student.py
|
Soyvolon/CISS_380
|
81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08
|
[
"Unlicense"
] | null | null | null |
assignment_fourteen_2021_10_21/student.py
|
Soyvolon/CISS_380
|
81ba41ef45ba8f4a4cfc55f9e20b87c5feddba08
|
[
"Unlicense"
] | null | null | null |
# 2. Write Python program that will allow a professor to maintain a list of
# students for a given course. The program should contain a basic Student
# class with attributes for the first name, last name and test score. Write
# the basic __init__, __str__ methods and get and set methods for the attributes.
class Student(object):
def __init__(self, firstName, lastName, score = None):
self.firstName = firstName
self.lastName = lastName
self.score = score
def get_first_name(self):
return self.firstName
def set_first_name(self, firstName):
self.firstname = firstName
def get_last_name(self):
return self.lastName
def set_last_name(self, lastName):
self.lastname = lastName
def get_score(self):
return self.score
def set_score(self, score):
self.score = score
def get_full_name(self):
return self.firstName + " " + self.lastName
def __str__(self):
return "{ \"first_name\": \"{}\", \"last_name\": \"{}\", \"score\": \"{}\" }"\
.format(self.firstName, self.lastName, self.score)
def __eq__(self, o: object) -> bool:
if not isinstance(o, Student):
return False
return o.firstName.lower() == self.firstName.lower() \
and o.lastName.lower() == self.lastName.lower()
| 32.690476
| 86
| 0.631464
|
f389570f741fd6171adb3290a92791527b196bef
| 715
|
py
|
Python
|
10082/wertyu.py
|
sc458/uHunt-solutions
|
37464e1db98c897995eab79caa6c70f379ad877a
|
[
"MIT"
] | null | null | null |
10082/wertyu.py
|
sc458/uHunt-solutions
|
37464e1db98c897995eab79caa6c70f379ad877a
|
[
"MIT"
] | null | null | null |
10082/wertyu.py
|
sc458/uHunt-solutions
|
37464e1db98c897995eab79caa6c70f379ad877a
|
[
"MIT"
] | null | null | null |
correct = ['1','2','3','4','5','6','7','8','9','0','-','=','W','E',
'R','T','Y','U','I','O','P','[',']',chr(92),'S','D','F','G','H','J',
'K','L',';',"'",'X','C','V','B','N','M',',','.','/',' '];
new = ['`','1','2','3','4','5','6','7','8','9','0','-','Q','W','E',
'R','T','Y','U','I','O','P','[',']','A','S','D','F','G','H','J',
'K','L',';','Z','X','C','V','B','N','M',',','.',' '];
while(True):
try:
inp = input()
if not inp:
raise ValueError
except ValueError:
break
except EOFError:
break
res = ''
for i in range(0,len(inp)):
ind = -1
for j in range(0,len(correct)):
if(inp[i] == correct[j]):
ind = j
break
res = res + new[ind]
print(res)
| 21.666667
| 69
| 0.359441
|
a88e844197a3f2fab279a323db19e11eea7421f6
| 450
|
py
|
Python
|
blousebrothers/catalogue/migrations/0011_product_for_sale.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 1
|
2022-01-27T11:58:10.000Z
|
2022-01-27T11:58:10.000Z
|
blousebrothers/catalogue/migrations/0011_product_for_sale.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | 5
|
2021-03-19T00:01:54.000Z
|
2022-03-11T23:46:21.000Z
|
blousebrothers/catalogue/migrations/0011_product_for_sale.py
|
sladinji/blousebrothers
|
461de3ba011c0aaed3f0014136c4497b6890d086
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-10-31 15:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0010_product_conf'),
]
operations = [
migrations.AddField(
model_name='product',
name='for_sale',
field=models.BooleanField(default=False),
),
]
| 21.428571
| 53
| 0.615556
|
d73bed8faa822a28420b4783c3b9f615c58a490c
| 495
|
py
|
Python
|
tarambay/tarambay/users/migrations/0005_auto_20150816_0329.py
|
radeinla/tarambay
|
7146ce785a8844f3c2dc229c713722bb63d78200
|
[
"MIT"
] | null | null | null |
tarambay/tarambay/users/migrations/0005_auto_20150816_0329.py
|
radeinla/tarambay
|
7146ce785a8844f3c2dc229c713722bb63d78200
|
[
"MIT"
] | null | null | null |
tarambay/tarambay/users/migrations/0005_auto_20150816_0329.py
|
radeinla/tarambay
|
7146ce785a8844f3c2dc229c713722bb63d78200
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import uuid
from django.db import models, migrations
def add_uuid(apps, schema_editor):
Invited = apps.get_model('users', 'Invited')
for invited in Invited.objects.all():
invited.uuid = u'' + str(uuid.uuid1().hex)
invited.save()
class Migration(migrations.Migration):
dependencies = [
('users', '0004_invited_uuid'),
]
operations = [
migrations.RunPython(add_uuid),
]
| 20.625
| 50
| 0.648485
|
0c3652fcd12d22c721bd95a9a010be5065904305
| 6,706
|
py
|
Python
|
scripts/eval_tuned.py
|
qai222/CompAugCycleGAN
|
1a9d54237f4470a4fd5ab215993ed5b373a87e86
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/eval_tuned.py
|
qai222/CompAugCycleGAN
|
1a9d54237f4470a4fd5ab215993ed5b373a87e86
|
[
"CC-BY-4.0"
] | null | null | null |
scripts/eval_tuned.py
|
qai222/CompAugCycleGAN
|
1a9d54237f4470a4fd5ab215993ed5b373a87e86
|
[
"CC-BY-4.0"
] | null | null | null |
import glob
import logging
import os.path
import pprint
from collections import Counter
from cacgan.data import FormulaDataset, GroupAB
from cacgan.gans import Trainer
from cacgan.utils import *
"""
evaluate the tuned model
download the tuned model from https://doi.org/10.5281/zenodo.5721355
(you can also use `zenodo_get 10.5281/zenodo.5721355` in cmd to download)
unzip and place the folders at ../tuned/
"""
def plot_bar_element_ratio(real_ratios, saveas="cnratio_barplot.tiff"):
c = Counter(real_ratios)
p = {k: v / sum(c.values()) for k, v in c.items()}
pp = {}
for k in p:
near_ps = [kk for kk in pp if abs(kk - k) < 1e-3]
if len(near_ps) == 0:
pp[k] = p[k]
else:
pp[near_ps[0]] += p[k]
p = pp
x = [k for k in p]
y = [v for v in p.values()]
f, (ax, ax2) = plt.subplots(2, 1, sharex='all')
width = 0.05
ax.bar(x, y, width=width, facecolor="k", label=r"$C_B$")
ax2.bar(x, y, width=width, facecolor="k")
ax.set_ylim(.8, 0.86) # outliers only
ax2.set_ylim(0, .06) # most of the data
ax.spines['bottom'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
ax.tick_params(labeltop=False) # don't put tick labels at the top
ax2.xaxis.tick_bottom()
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax.transAxes, color='k', clip_on=False)
ax.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
kwargs.update(transform=ax2.transAxes) # switch to the bottom axes
ax2.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax2.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
plt.xlim([0., 4.1])
ax2.set_xlabel("C/N ratio")
ax2.set_ylabel("Probability", loc="top")
ax2.yaxis.set_label_coords(-0.12, 1.4)
ax.legend()
ax.tick_params(length=0.2, top="off", pad=8)
ax2.tick_params(length=0, pad=8)
plt.savefig(saveas, dpi=600, bbox_inches='tight', pil_kwargs={"compression": "tiff_lzw"})
plt.clf()
def plot_hist_element_ratio(reals, fakes, saveas="cnratio.tiff"):
from collections import Counter
plt.xlim([0, 4.1])
plt.hist(fakes, bins=400, label=r"$C'_B$", alpha=0.5, facecolor="purple", density=True)
pprint.pprint({k: v / sum(Counter(reals).values()) for k, v in Counter(reals).items()})
save_pkl(reals, "cnratio_real.pkl")
plt.xlabel("C/N ratio")
plt.ylabel("Probability density")
plt.legend()
plt.savefig(saveas, dpi=600, bbox_inches='tight', pil_kwargs={"compression": "tiff_lzw"})
plt.clf()
def eval_one_fold(dataset: FormulaDataset, cvfolder: typing.Union[str, pathlib.Path], nsamples: int = 50):
logging.warning("working on: {}".format(cvfolder))
trainer = Trainer.load(dataset, os.path.join(cvfolder, "Trainer.yml"), change_wdir=cvfolder)
trainer.load_model()
trainer.plot_loss()
prior_ds, prior_ds_names = trainer.eval_model("prior", eval_quantity="mindist", plot=False, ntrials=nsamples, std=1)
opt_ds, opt_ds_names = trainer.eval_model("opt", eval_quantity="mindist", plot=False, zlim=5, steps=100)
pd, bli, blr = prior_ds
od, bli, blr = opt_ds
prior_ds_ratio, prior_ds_names_ratio = trainer.eval_model("prior", eval_quantity="ratio", plot=False, ntrials=200,
std=1)
ratio_real, ratio_rand, ratio_fake = prior_ds_ratio
return pd, od, bli, blr, ratio_real, ratio_rand, ratio_fake
def eval_cv(dataset: FormulaDataset, cvfolders: [typing.Union[str, pathlib.Path]]):
prior_diff = []
opt_diff = []
identity_diff = []
random_diff = []
ratios_real = []
ratios_rand = []
ratios_fake = []
for cvfolder in cvfolders:
pd, od, bli, blr, ratio_real, ratio_rand, ratio_fake = eval_one_fold(dataset, cvfolder)
prior_diff += pd
opt_diff += od
identity_diff += bli
random_diff += blr
ratios_real += ratio_real.tolist()
ratios_rand += ratio_rand.tolist()
ratios_fake += ratio_fake.tolist()
plot_hist_element_ratio(ratios_real, ratios_fake, "cnratio.tiff")
plot_bar_element_ratio(ratios_real, "cnratio_barplot.tiff")
plot_violin([opt_diff, prior_diff, identity_diff], ["opt", "sample", "identity"], ["red", "purple", "gray"],
"best_eval_vio",
ylim=[-0.005, 0.125])
plot_cdf([opt_diff, prior_diff, identity_diff], ["opt", "sample", "identity", ], ["red", "purple", "gray"],
"best_eval_cdf",
)
def prior_mean_vs_nsample(dataset, cvfolders, saveas="prior_nsamples.tiff"):
def prior_mean(dataset, nsamples, cvfolders):
prior_data = []
bl_identity = []
bl_random = []
for cvfolder in cvfolders:
trainer = Trainer.load(dataset, os.path.join(cvfolder, "Trainer.yml"), change_wdir=cvfolder)
trainer.load_model()
prior_ds, prior_ds_names = trainer.eval_model("prior", plot=False, ntrials=nsamples, std=6)
pd, bli, blr = prior_ds
prior_data += pd
bl_identity += bli
bl_random += blr
return np.mean(prior_data), np.mean(bl_identity), np.mean(bl_random)
x = []
y = []
for nsamples in range(5, 51, 5):
print("sample", nsamples)
p, bli, blr = prior_mean(dataset, nsamples, cvfolders)
x.append(nsamples)
y.append(p)
hl_bli = bli
hl_opt = 0.011604
plt.plot(x, y, ":o", c="purple", label="sample")
plt.hlines(hl_bli, xmin=4, xmax=51, label="identity", ls="-", colors="gray", lw=4)
plt.hlines(hl_opt, xmin=4, xmax=51, label="opt", ls=":", colors="r", lw=4)
plt.legend(loc="upper right", bbox_to_anchor=(1.0, 0.8))
plt.ylabel("mean " + r"$\Delta (C, C')$")
plt.xlabel(r"$N_{\rm{Sample}}$")
plt.xlim([4, 51])
plt.savefig(saveas, dpi=600, bbox_inches='tight', pil_kwargs={"compression": "tiff_lzw"})
plt.clf()
if __name__ == '__main__':
dataset = load_pkl("../dataset/dataset_ab.pkl")
dataset.convertmno = False
dataset: FormulaDataset
dataset.gab: GroupAB
seed_rng(SEED)
sns.set_theme()
sns.set(font_scale=1.4)
sns.set_style("whitegrid")
tuned_model_path = os.path.abspath("../tuned")
result_path = os.path.abspath("./eval_tuned/")
cvfolders = sorted(glob.glob(os.path.join(tuned_model_path, "2021*")))
os.chdir(result_path)
eval_cv(dataset, cvfolders)
# prior_mean_vs_nsample(dataset, cvfolders)
| 37.049724
| 120
| 0.632717
|
5038099c197ea2e6216b975ae00632e830277481
| 3,911
|
py
|
Python
|
web/tron-sql/deploy/db_create.py
|
b01lers/bootcamp-2020
|
6246c6ecb7c7e2c3a2a275e367cc9bf8469e610c
|
[
"MIT"
] | 22
|
2020-10-05T01:10:51.000Z
|
2021-07-14T03:22:37.000Z
|
web/tron-sql/deploy/db_create.py
|
b01lers/bootcamp-2020
|
6246c6ecb7c7e2c3a2a275e367cc9bf8469e610c
|
[
"MIT"
] | null | null | null |
web/tron-sql/deploy/db_create.py
|
b01lers/bootcamp-2020
|
6246c6ecb7c7e2c3a2a275e367cc9bf8469e610c
|
[
"MIT"
] | 1
|
2020-10-05T10:00:35.000Z
|
2020-10-05T10:00:35.000Z
|
#!/usr/bin/env python3
import MySQLdb
import random
import csv
from time import sleep
import os
def get_name(name_list):
name = name_list[random.randint(0, len(name_list) - 1)]
name += "-" + str(round(random.random()*9999, 4))
return name
def get_loc(to_derezz=False):
value_list = ["unknown", "game-room-stands", "uplink", "building", "carrier-ship", "game-room"]
rand = random.randint(0, len(value_list) - 1)
if not to_derezz:
loc = value_list[rand]
else:
loc = "holding-cell"
if rand < 2 or to_derezz:
loc += "-" + str(round(random.random()*9999, 4))
return loc
def get_status():
value_list = ["derezzed", "unknown", "idle", "running", "suspended", "zombie", "orphan"]
return value_list[random.randint(0, len(value_list) - 1)]
# Populate name_list while mysql is loading
name_list = []
with open('data/names.csv', 'r', encoding='utf-8') as fd:
name_reader = csv.reader(fd, delimiter=',')
# Only 1 row
for row in name_reader:
name_list = row
root_pass = os.environ['MYSQL_ROOT_PASSWORD']
connected = False
while not connected:
try:
db = MySQLdb.connect(host='localhost', user='root', passwd=root_pass)
connected = True
except MySQLdb.OperationalError:
# Poll
print("Sleeping...")
sleep(5)
cur = db.cursor()
root_queries = [
"CREATE DATABASE grid",
"CREATE USER 'selection_program'@'localhost' IDENTIFIED BY 'designation2-503';",
"GRANT SELECT ON grid.* TO 'selection_program'@'localhost';",
"GRANT SELECT ON information_schema.* TO 'selection_program'@'localhost';",
]
for query in root_queries:
try:
cur.execute(query)
except MySQLdb.OperationalError:
print("Sleeping...")
sleep(2)
cur.close()
db.commit()
db.close()
# Connect to grid database
db_grid = MySQLdb.connect(host='localhost', user='root', passwd=root_pass, db='grid')
cur_grid = db_grid.cursor()
# Create tables in grid database
grid_tables = [
'programs (id VARCHAR(10) NOT NULL, name VARCHAR(50), status VARCHAR(10), location VARCHAR(50))',
'known_isomorphic_algorithms (id VARCHAR(10) NOT NULL, name VARCHAR(50), status VARCHAR(10), location VARCHAR(50))',
'to_derezz (id VARCHAR(10) NOT NULL, name VARCHAR(50), status VARCHAR(10), location VARCHAR(50))',
]
for query in grid_tables:
cur_grid.execute('CREATE TABLE ' + query + ';')
# Put names into programs table
for i in range(0xffff):
# Add tron at his 'compile date'
loc = None
if i == 1980:
name = "Tron-JA-307020"
status = "running"
loc = "flag{I_fight_for_the_users_and_yori}"
elif i == 1981:
name = "Clu"
status = "derezzed"
elif i == 1982:
name = "Ram"
status = "derezzed"
else:
name = get_name(name_list)
status = get_status()
if status == "derezzed":
loc = "NULL"
elif loc is None:
loc = get_loc()
cur_grid.execute(
'INSERT INTO programs (id, name, status, location) VALUES ("' + str(i) + '", "'
+ name + '", "' + status + '", "' + loc + '");')
for i in range(0x1000):
cur_grid.execute(
'INSERT INTO known_isomorphic_algorithms (id, name, status, location) VALUES("' + str(i) + '", "'
+ get_name(name_list) + '", "derezzed", "NULL");')
# Insert Quorra into the known_isomorphic_algorithms table
cur_grid.execute(
'INSERT INTO known_isomorphic_algorithms (id, name, status, location) VALUES("0x21f3", "Quorra",'
+ ' "unknown", "unknown");')
# Insert into to_derezz table
for i in range(0x100):
cur_grid.execute(
'INSERT INTO to_derezz (id, name, status, location) VALUES ("' + str(i) + '", "'
+ get_name(name_list) + '", "idle", "' + get_loc(to_derezz=True) + '");')
# Commit changes
cur_grid.close()
db_grid.commit()
db_grid.close()
| 28.34058
| 120
| 0.625671
|
872f3879c2ddc76ca0eca99df5eb48ed7953a325
| 17
|
py
|
Python
|
Python/IK.py
|
KobayashiRui/Robot_kinematics
|
0282cb4f5a2d65a2bd2db3a011c4a21562f1d5b7
|
[
"MIT"
] | 1
|
2021-02-26T04:39:57.000Z
|
2021-02-26T04:39:57.000Z
|
Python/IK.py
|
KobayashiRui/Robot_kinematics
|
0282cb4f5a2d65a2bd2db3a011c4a21562f1d5b7
|
[
"MIT"
] | null | null | null |
Python/IK.py
|
KobayashiRui/Robot_kinematics
|
0282cb4f5a2d65a2bd2db3a011c4a21562f1d5b7
|
[
"MIT"
] | null | null | null |
# 逆運動学の実装(数値解析)
| 5.666667
| 15
| 0.647059
|
2fe3f0314e88d229a584838c40b261dffeaf2749
| 109
|
py
|
Python
|
10. Inheritance - Exercise/players_and_monsters_03/project/blade_knight.py
|
elenaborisova/Python-OOP
|
584882c08f84045b12322917f0716c7c7bd9befc
|
[
"MIT"
] | 1
|
2021-03-27T16:56:30.000Z
|
2021-03-27T16:56:30.000Z
|
10. Inheritance - Exercise/players_and_monsters_03/project/blade_knight.py
|
elenaborisova/Python-OOP
|
584882c08f84045b12322917f0716c7c7bd9befc
|
[
"MIT"
] | null | null | null |
10. Inheritance - Exercise/players_and_monsters_03/project/blade_knight.py
|
elenaborisova/Python-OOP
|
584882c08f84045b12322917f0716c7c7bd9befc
|
[
"MIT"
] | 1
|
2021-03-15T14:50:39.000Z
|
2021-03-15T14:50:39.000Z
|
from players_and_monsters_03.project.dark_knight import DarkKnight
class BladeKnight(DarkKnight):
pass
| 18.166667
| 66
| 0.834862
|
b14465f083a0abe7f408a877b419561168781b54
| 4,140
|
py
|
Python
|
ccx_messaging/downloaders/http_downloader.py
|
tisnik/insights-ccx-messaging
|
4d601673079a9242cdb7572b165b8a5af6d64f7f
|
[
"Apache-2.0"
] | 1
|
2022-02-10T13:24:23.000Z
|
2022-02-10T13:24:23.000Z
|
ccx_messaging/downloaders/http_downloader.py
|
tisnik/insights-ccx-messaging
|
4d601673079a9242cdb7572b165b8a5af6d64f7f
|
[
"Apache-2.0"
] | 13
|
2022-02-10T07:46:34.000Z
|
2022-02-24T09:44:59.000Z
|
ccx_messaging/downloaders/http_downloader.py
|
tisnik/insights-ccx-messaging
|
4d601673079a9242cdb7572b165b8a5af6d64f7f
|
[
"Apache-2.0"
] | 3
|
2022-02-09T11:24:18.000Z
|
2022-03-07T10:41:54.000Z
|
# Copyright 2022 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module that defines a Downloader object to get HTTP urls."""
import logging
import re
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
import requests
from ccx_messaging.error import CCXMessagingError
LOG = logging.getLogger(__name__)
def parse_human_input(file_size):
"""Parse an input in human-readable format and return a number of bytes."""
multipliers = {
"K": 10 ** 3,
"M": 10 ** 6,
"G": 10 ** 9,
"T": 10 ** 12,
"Ki": 2 ** 10,
"Mi": 2 ** 20,
"Gi": 2 ** 30,
"Ti": 2 ** 40,
}
match = re.match(
r"^(?P<quantity>\d+(\.\d+)?)\s*(?P<units>[KMGT]?i?B?)?$", file_size
)
if match is None:
raise ValueError(f"The file size cannot be parsed as a file size: {file_size}")
parsed = match.groupdict()
quantity = float(parsed.get("quantity"))
units = parsed.get("units")
units = units.rstrip("B") if units is not None else ""
if units != "" and units not in multipliers:
raise ValueError(
f"The file size cannot be parsed because its units: {parsed.get('units')}"
)
multiplier = multipliers.get(units, 1) # if multiplier == "", then 1
quantity = quantity * multiplier
return int(quantity)
# pylint: disable=too-few-public-methods
class HTTPDownloader:
"""Downloader for HTTP uris."""
# https://<hostname>/service_id/file_id?<credentials and other params>
HTTP_RE = re.compile(
r"^(?:https://[^/]+\.s3\.amazonaws\.com/[0-9a-zA-Z/\-]+|"
r"https://s3\.[0-9a-zA-Z\-]+\.amazonaws\.com/[0-9a-zA-Z\-]+/[0-9a-zA-Z/\-]+|"
r"http://minio:9000/insights-upload-perma/[0-9a-zA-Z\.\-]+/[0-9a-zA-Z\-]+)\?"
r"X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=[^/]+$"
)
def __init__(self, max_archive_size=None, allow_unsafe_links=False):
"""`HTTPDownloader` initializer.
This method accepts a `max_archive_size` argument, that indicates the
maximum size allowed for the archives. If set, archives bigger than this
will be discarded.
"""
if max_archive_size is not None:
self.max_archive_size = parse_human_input(max_archive_size)
LOG.info("Configured max_archive_size to %s bytes", self.max_archive_size)
else:
self.max_archive_size = None
LOG.warning("No max_archive_size defined. Be careful")
self.allow_unsafe_links = allow_unsafe_links
@contextmanager
def get(self, src):
"""Download a file from HTTP server and store it in a temporary file."""
if not self.allow_unsafe_links:
if src is None or not HTTPDownloader.HTTP_RE.fullmatch(src):
raise CCXMessagingError(f"Invalid URL format: {src}")
try:
response = requests.get(src)
data = response.content
size = len(data)
if size == 0:
raise CCXMessagingError(f"Empty input archive from {src}")
if self.max_archive_size is not None and size > self.max_archive_size:
raise CCXMessagingError(
f"The archive is too big ({size} > {self.max_archive_size}). Skipping"
)
with NamedTemporaryFile() as file_data:
file_data.write(data)
file_data.flush()
yield file_data.name
response.close()
except requests.exceptions.ConnectionError as err:
raise CCXMessagingError(err) from err
| 33.658537
| 90
| 0.624155
|
921291d658142cb6d4d16997cac1e7026acf4d4e
| 264
|
py
|
Python
|
src/main/resources/docs/tests/E1004.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/E1004.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
src/main/resources/docs/tests/E1004.py
|
h314to/codacy-pylint
|
9d31567db6188e1b31ce0e1567998f64946502df
|
[
"Apache-2.0"
] | null | null | null |
##Patterns: E1004
class OldStyleClass(object):
def __init__(self): pass
class AnotherOldStyleClass(OldStyleClass):
def __init__(self):
##Err: E1004
super().__init__()
# Marking this file as Python 2
raise Exception, "lala"
| 24
| 42
| 0.651515
|
db8267a7c40403d16bf95db2810b9e9a813c18d3
| 5,445
|
py
|
Python
|
metis_lib/kong.py
|
CS-METIS/minimetis
|
74c0bca0d12d7ba810095c26a45b4929cebb7541
|
[
"Apache-2.0"
] | 5
|
2021-08-24T08:31:39.000Z
|
2022-02-02T14:37:56.000Z
|
metis_lib/kong.py
|
CS-METIS/minimetis
|
74c0bca0d12d7ba810095c26a45b4929cebb7541
|
[
"Apache-2.0"
] | null | null | null |
metis_lib/kong.py
|
CS-METIS/minimetis
|
74c0bca0d12d7ba810095c26a45b4929cebb7541
|
[
"Apache-2.0"
] | null | null | null |
from typing import Any, Dict, List, Optional
import os
import requests
from metis_lib import service
class Kong:
def __init__(
self, kong_url: str = f"http://{os.environ.get('DOMAIN', 'localhost')}:8001"
) -> None:
self.kong_url = kong_url
def wait_ready(self, timeout: Optional[float] = None):
service.wait_respond(self.kong_url, timeout=timeout)
def create_service_subdomain(
self,
name: str,
target_host: str,
target_port: int,
target_protocol: str,
match_host: str,
match_path: str,
strip_path: bool = False,
):
payload = {
"name": name,
"host": target_host,
"port": target_port,
"protocol": target_protocol,
}
resp = requests.post(f"{self.kong_url}/services", json=payload)
service = resp.json()
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(service)
payload = {
"service": {"id": service["id"]},
"protocols": ["https"],
"strip_path": strip_path,
"preserve_host": True
}
methods = []
if match_host:
payload["hosts"] = [match_host]
methods.append("hosts")
if match_path:
payload["paths"] = [match_path]
methods.append("paths")
resp = requests.post(f"{self.kong_url}/routes", json=payload)
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(resp.text)
def create_service(
self,
name: str,
host: str,
port: int,
path: Optional[str] = None,
strip_path: bool = False,
protocol: str = "http",
) -> None:
payload = {
"name": name,
"host": host,
"port": port,
"protocol": protocol,
}
resp = requests.post(f"{self.kong_url}/services", json=payload)
service = resp.json()
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(service)
if not path:
path = name
payload = {
"service": {"id": service["id"]},
"protocols": ["https"],
"paths": [f"/{path}"],
"strip_path": strip_path,
}
resp = requests.post(f"{self.kong_url}/routes", json=payload)
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(resp.text)
def get_service_id(self, service_name: str) -> str:
resp = requests.get(f"{self.kong_url}/services/{service_name}")
service = resp.json()
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(service)
return service["id"]
def activate_oidc_plugin(
self,
service_name: str,
oidc_provider_url: str,
oidc_client_id: str,
oidc_client_secret: str,
redirect_uri_path: str = None,
) -> None:
payload = {
"name": "oidc",
"config.client_id": oidc_client_id,
"config.client_secret": oidc_client_secret,
"config.bearer_only": "no",
"config.realm": "metis",
"config.introspection_endpoint": f"{oidc_provider_url}/auth/realms/metis/protocol/openid-connect/token/introspect",
"config.discovery": f"{oidc_provider_url}/auth/realms/metis/.well-known/openid-configuration",
"config.session_secret": "bXkgc2Vzc2lvbiBzZWNyZXQ=",
}
if redirect_uri_path:
payload["config.redirect_uri_path"] = redirect_uri_path
service_id = self.get_service_id(service_name)
resp = requests.post(
f"{self.kong_url}/services/{service_id}/plugins", data=payload
)
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(resp.text)
def activate_response_transformer_plugin(
self, service_name: str, headers_to_remove: List[str],
) -> None:
payload = [("name", "response-transformer")]
for header in headers_to_remove:
payload.append(("config.remove.headers", header))
service_id = self.get_service_id(service_name)
resp = requests.post(
f"{self.kong_url}/services/{service_id}/plugins", data=payload
)
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(resp.text)
def add_certificate(
self, certificate_file: str, private_key_file: str, domains: List[str]
) -> str:
with open(certificate_file) as f:
cert = f.read()
with open(private_key_file) as f:
key = f.read()
payload: Dict[str, Any] = {"cert": cert, "key": key, "snis": ",".join(domains)}
resp = requests.post(f"{self.kong_url}/certificates", data=payload)
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(resp.text)
cert_id = resp.json()["id"]
for domain in domains:
payload = {"name": domain, "certificate": {"id": cert_id}}
resp = requests.post(
f"{self.kong_url}/certificates/{cert_id}/snis", data=payload
)
if resp.status_code < 200 or resp.status_code >= 300:
raise RuntimeError(resp.text)
return cert_id
| 35.588235
| 127
| 0.570983
|
6281313dd4936bdfc24c6b816134fbc582842a72
| 921
|
py
|
Python
|
setup.py
|
aryangulati/codemod
|
41d6eabad7b055a83923150efd5518813831c9a5
|
[
"Apache-2.0"
] | 3,811
|
2015-02-12T07:04:37.000Z
|
2021-06-09T13:13:37.000Z
|
setup.py
|
aryangulati/codemod
|
41d6eabad7b055a83923150efd5518813831c9a5
|
[
"Apache-2.0"
] | 86
|
2015-02-12T00:10:12.000Z
|
2021-04-22T22:30:40.000Z
|
setup.py
|
aryangulati/codemod
|
41d6eabad7b055a83923150efd5518813831c9a5
|
[
"Apache-2.0"
] | 175
|
2015-02-16T23:24:37.000Z
|
2021-05-27T09:22:19.000Z
|
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
description = (
'Codemod is a tool/library to assist you with large-scale codebase '
'refactors that can be partially automated but still require human '
'oversight and occasional intervention. Codemod was developed at '
'Facebook and released as open source.'
)
setup(
name='codemod',
version="1.0.0",
url='http://github.com/facebook/codemod',
license='Apache License 2.0',
author="Facebook",
author_email="facebook@facebook.com",
description=description,
long_description=description,
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
entry_points='''
[console_scripts]
codemod=codemod.base:main
''',
tests_require=['flake8', 'pytest'],
test_suite='py.test'
)
| 27.909091
| 72
| 0.693811
|
6d5156ad6720958a039d54b798d5d83dad1fb42c
| 256
|
py
|
Python
|
src/05_Numbers/02_int_method.py
|
UltiRequiem/W3Schools-Python-Exercises
|
34d10d48d7ce7b9e50466cd8537a8aece0426ce8
|
[
"MIT"
] | 5
|
2021-05-29T23:30:50.000Z
|
2022-01-15T16:35:20.000Z
|
src/05_Numbers/02_int_method.py
|
UltiRequiem/W3Schools-Python-Exercises
|
34d10d48d7ce7b9e50466cd8537a8aece0426ce8
|
[
"MIT"
] | null | null | null |
src/05_Numbers/02_int_method.py
|
UltiRequiem/W3Schools-Python-Exercises
|
34d10d48d7ce7b9e50466cd8537a8aece0426ce8
|
[
"MIT"
] | 4
|
2021-03-18T17:48:06.000Z
|
2021-07-22T19:32:36.000Z
|
# Instructions: Insert the correct syntax to convert x into a integer.
x = 5.5
# Solution: int()
x = int(x)
print(x)
'''
The int() method can be used to transform a float to an int.
Read more here: https://www.w3schools.com/python/python_numbers.asp
'''
| 21.333333
| 70
| 0.707031
|
96e95045edf2464764ac853feff246e386e19138
| 41,344
|
py
|
Python
|
xmpp_tg/xmpp.py
|
vitlav/tg4xmpp
|
97a65c2f49288504c88d079ba3585de98c77385f
|
[
"Apache-2.0"
] | 8
|
2017-12-25T09:11:30.000Z
|
2020-11-16T14:50:51.000Z
|
xmpp_tg/xmpp.py
|
vitlav/tg4xmpp
|
97a65c2f49288504c88d079ba3585de98c77385f
|
[
"Apache-2.0"
] | 5
|
2017-12-23T12:21:49.000Z
|
2021-11-20T19:30:19.000Z
|
xmpp_tg/xmpp.py
|
vitlav/tg4xmpp
|
97a65c2f49288504c88d079ba3585de98c77385f
|
[
"Apache-2.0"
] | 6
|
2018-01-05T12:00:58.000Z
|
2020-04-03T17:01:22.000Z
|
import re, sys, os, io, sqlite3, hashlib, time, datetime
import xml.etree.ElementTree as ET
from sleekxmpp.componentxmpp import ComponentXMPP
from sleekxmpp import Presence, Message
from telethon.tl.functions.messages import GetDialogsRequest, SendMessageRequest, SendMediaRequest, EditMessageRequest, DeleteMessagesRequest, ImportChatInviteRequest, GetFullChatRequest, AddChatUserRequest, DeleteChatUserRequest, CreateChatRequest, DeleteHistoryRequest
from telethon.tl.functions.account import UpdateStatusRequest, GetAuthorizationsRequest, UpdateProfileRequest, UpdateUsernameRequest
from telethon.tl.functions.contacts import DeleteContactRequest, BlockRequest, UnblockRequest, ImportContactsRequest
from telethon.tl.functions.channels import JoinChannelRequest, LeaveChannelRequest, InviteToChannelRequest, EditBannedRequest, CreateChannelRequest, DeleteMessagesRequest as DeleteMessagesChannel
from telethon.tl.types import InputPeerEmpty, InputPeerUser, InputPeerChat, InputPeerChannel, InputPhoneContact, InputMediaPhotoExternal
from telethon.tl.types import User, Chat, Channel
from telethon.tl.types import PeerChannel, PeerChat, PeerUser, Chat, ChatForbidden, Channel, ChannelForbidden, ChannelBannedRights
from telethon.tl.types import UserStatusOnline, UserStatusRecently, UserStatusOffline
from telethon.tl.types import Updates, UpdateShortSentMessage, UpdateMessageID
from telethon.tl.types.messages import Dialogs, DialogsSlice
from telethon.helpers import generate_random_long
from telethon.errors import SessionPasswordNeededError
from xmpp_tg.mtproto import TelegramGateClient
from xmpp_tg.utils import var_dump, display_tg_name, get_contact_jid, localtime
import xmpp_tg.monkey # monkeypatch
class XMPPTelegram(ComponentXMPP):
"""
Main XMPPTelegram class.
"""
def __init__(self, config_dict):
"""
Transport initialization
:param config_dict:
"""
ComponentXMPP.__init__(self, config_dict['jid'], config_dict['secret'], config_dict['server'],
config_dict['port'])
self.auto_authorize = True
# self.auto_subscribe = True
self.config = config_dict
self.accounts = dict() # personal configuration per JID
self.tg_connections = dict()
self.tg_phones = dict()
self.tg_dialogs = dict()
self.contact_list = dict()
self.db_connection = self.init_database()
self.register_plugin('xep_0030') # Service discovery
self.register_plugin('xep_0054') # VCard-temp
self.register_plugin('xep_0172') # NickNames
self.add_event_handler('message', self.message)
self.add_event_handler('presence_unsubscribe', self.event_presence_unsub)
self.add_event_handler('presence_unsubscribed', self.event_presence_unsub)
self.add_event_handler('presence', self.event_presence)
self.add_event_handler('got_online', self.handle_online)
self.add_event_handler('got_offline', self.handle_offline)
self.add_event_handler('session_start', self.handle_start)
self.plugin['xep_0030'].add_identity(
category='gateway',
itype='telegram',
name=self.config['title'],
node=self.boundjid.node,
jid=self.boundjid.bare,
lang='no'
)
vcard = self.plugin['xep_0054'].make_vcard()
vcard['FN'] = self.config['title']
vcard['DESC'] = 'Send !help for information'
self.plugin['xep_0054'].publish_vcard(jid=self.boundjid.bare, vcard=vcard)
def __del__(self):
"""
Destructor
:return:
"""
self.db_connection.close()
def handle_start(self, arg):
"""
Successful connection to Jabber server
:param arg:
:return:
"""
users = self.db_connection.execute("SELECT * FROM accounts").fetchall()
for usr in users:
self.accounts[usr['jid']] = usr
self.send_presence(pto=usr['jid'], pfrom=self.boundjid.bare, ptype='probe')
def message(self, iq):
"""
Message from XMPP
:param iq:
:return:
"""
jid = iq['from'].bare
if iq['to'] == self.config['jid'] and iq['type'] == 'chat': # message to gateway
if iq['body'].startswith('!'):
self.process_command(iq)
else:
self.gate_reply_message(iq, 'Only commands accepted. Try !help for more info.')
else: # --- outgoing message ---
if jid in self.tg_connections and self.tg_connections[jid].is_user_authorized():
if iq['body'].startswith('!'): # it is command!
if iq['to'].bare.startswith( ('u', 'b') ):
self.process_chat_user_command(iq)
elif iq['to'].bare.startswith('g') or iq['to'].bare.startswith('s') or iq['to'].bare.startswith('c'):
self.process_chat_group_command(iq)
else:
self.gate_reply_message(iq, 'Error.')
else: # -- normal message --
tg_id = int(iq['to'].node[1:])
tg_peer = None
msg = iq['body']
reply_mid = None
if msg.startswith('>'): # quoting check
msg_lines = msg.split('\n')
matched = re.match(r'>[ ]*(?P<mid>[\d]+)[ ]*', msg_lines[0])
matched = matched.groupdict() if matched else {}
if 'mid' in matched: # citation
reply_mid = int(matched['mid'])
msg = '\n'.join(msg_lines[1:])
if iq['to'].bare.startswith( ('u', 'b') ): # normal user
tg_peer = InputPeerUser(tg_id, self.tg_dialogs[jid]['users'][tg_id].access_hash)
elif iq['to'].bare.startswith('g'): # generic group
tg_peer = InputPeerChat(tg_id)
elif iq['to'].bare.startswith( ('s', 'c') ): # supergroup
tg_peer = InputPeerChannel(tg_id, self.tg_dialogs[jid]['supergroups'][tg_id].access_hash)
# peer OK.
if tg_peer:
result = None
# detect media
if msg.startswith('http') and re.match(r'(?:http\:|https\:)?\/\/.*\.(?:' + self.config['media_external_formats'] + ')', msg):
urls = re.findall(r'(?:http\:|https\:)?\/\/.*\.(?:' + self.config['media_external_formats'] + ')', msg)
message = msg.replace(urls[0], '')
media = InputMediaPhotoExternal(urls[0])
try:
result = self.tg_connections[jid].invoke(SendMediaRequest(tg_peer, media, message, random_id = generate_random_long(), reply_to_msg_id = reply_mid))
except Exception:
print('Media upload failed.')
# media send failed. #
if not result:
result = self.tg_connections[jid].invoke(SendMessageRequest(tg_peer, msg, generate_random_long(), reply_to_msg_id=reply_mid))
# find sent message id and save it
if result and hasattr(result, 'id'): # update id
msg_id = result.id
self.tg_dialogs[jid]['messages'][tg_id] = {'id': msg_id, 'body': msg}
#self.send_message(mto=iq['from'], mfrom=iq['to'], mtype='chat', mbody='[Your MID:{}]'.format(msg_id))
def event_presence_unsub(self, presence):
return
def event_presence(self, presence):
"""
Presence handler
:param presence:
:return:
"""
ptype = presence['type']
# handle "online" to transport:
if ptype == 'available' and presence['to'].bare == self.boundjid.bare:
self.handle_online(presence, False) # handle online
elif ptype == 'subscribe':
self.send_presence(pto=presence['from'].bare, pfrom=presence['to'].bare, ptype='subscribed')
elif ptype == 'subscribed':
pass
elif ptype == 'unsubscribe':
pass
elif ptype == 'unsubscribed':
pass
elif ptype == 'probe':
self.send_presence(pto=presence['from'], pfrom=presence['to'], ptype='available')
elif ptype == 'unavailable':
pass
else:
# self.send_presence(pto=presence['from'], pfrom=presence['to'])
pass
def handle_online(self, event, sync_roster = True):
"""
Gateway's subscriber comes online
:param event:
:return:
"""
jid = event['from'].bare
to = event['to'].bare
# maybe if i'll ignore it — it will go ahead
if to != self.boundjid.bare:
return
if jid not in self.tg_connections:
result = self.db_connection.execute("SELECT * FROM accounts WHERE jid = ?", (jid,)).fetchone()
if result is not None:
self.spawn_tg_client(jid, result['tg_phone'])
else:
if not (self.tg_connections[jid].is_connected()):
self.tg_connections[jid].connect()
self.tg_connections[jid].invoke(UpdateStatusRequest(offline=False))
self.send_presence(pto=jid, pfrom=self.boundjid.bare, ptype='online', pstatus='connected')
self.tg_process_dialogs(jid, sync_roster) # do not sync roster if we already have connection!
def handle_offline(self, event):
"""
Gateway's subscriber comes offline.
:param event:
:return:
"""
jid = event['from'].bare
# keep telegram online ?
if self.accounts[jid]['keep_online']:
return
if jid in self.tg_connections:
self.tg_connections[jid].invoke(UpdateStatusRequest(offline=True))
self.tg_connections[jid].disconnect()
def handle_interrupt(self, signal, frame):
"""
Interrupted (Ctrl+C).
:param event:
:return:
"""
for jid in self.tg_connections:
print('Disconnecting: %s' % jid)
self.tg_connections[jid].invoke(UpdateStatusRequest(offline=True))
self.tg_connections[jid].disconnect()
for contact_jid, contact_nickname in self.contact_list[jid].items():
self.send_presence(pto=jid, pfrom=contact_jid, ptype='unavailable')
self.send_presence(pto=jid, pfrom=self.boundjid.bare, ptype='unavailable')
sys.exit(0)
def process_command(self, iq):
"""
Commands to gateway, users or chats (starts with !)
:param iq:
:return:
"""
parsed = iq['body'].split(' ')
jid = iq['from'].bare
if parsed[0] == '!help':
self.gate_reply_message(iq, '=== Available gateway commands ===:\n\n'
'!help - Displays this text\n'
'!login +123456789 - Initiates Telegram session\n'
'!code 12345 - Entering one-time code during auth\n'
'!password abc123 - Entering password during two-factor auth\n'
'!configure - Configure transport settings\n'
#'!list_sessions - List all created sessions at Telegram servers\n'
#'!delete_session 123 - Delete session\n'
'!logout - Deletes current Telegram session at gate\n'
'!reload_dialogs - Reloads dialogs list from Telegram\n\n'
'!add - Find and add Telegram contact. Any formats accepted (nickname or t.me link)\n\n'
'!join - Join Telegram conference via invite link \n\n'
'!import phone firstname lastname - Add Telegram contact with phone number \n\n'
'!group GroupName @InviteContact - Create a normal group\n'
'!supergroup SupergroupName - Create a supergroup\n'
'!channel ChannelName - Create a channel\n\n'
'!name first last - Change your name in Telegram\n'
'!about text - Change about text in Telegram\n'
'!username - Changes your @username in Telegram\n'
)
elif parsed[0] == '!configure':
config_exclude = ['jid', 'tg_phone']
if len(parsed) > 2 and parsed[1] not in config_exclude:
self.db_connection.execute("update accounts set {} = ? where jid = ?".format(parsed[1]), (parsed[2],jid,) )
self.accounts[jid] = self.db_connection.execute("SELECT * FROM accounts where jid = ?", (jid,) ).fetchone()
message = "=== Your current configuration ===\n\n"
for param, value in self.accounts[jid].items():
message = message + "<%s>: %s" % (param, value) + "\n"
message = message + "\nTo modify some option, please, send !configure param value"
self.gate_reply_message(iq, message)
elif parsed[0] == '!login': # --------------------------------------------------
self.gate_reply_message(iq, 'Please wait...')
self.spawn_tg_client(jid, parsed[1])
if self.tg_connections[jid].is_user_authorized():
self.send_presence(pto=jid, pfrom=self.boundjid.bare, ptype='online', pstatus='connected')
self.gate_reply_message(iq, 'You are already authenticated in Telegram.')
else:
# remove old sessions for this JID #
self.db_connection.execute("DELETE from accounts where jid = ?", (jid, ) )
self.tg_connections[jid].send_code_request(parsed[1])
self.gate_reply_message(iq, 'Gate is connected. Telegram should send SMS message to you.')
self.gate_reply_message(iq, 'Please enter one-time code via !code 12345.')
elif parsed[0] in ['!code', '!password']: # --------------------------------------------------
if not self.tg_connections[jid].is_user_authorized():
if parsed[0] == '!code':
try:
self.gate_reply_message(iq, 'Trying authenticate...')
self.tg_connections[jid].sign_in(self.tg_phones[jid], parsed[1])
except SessionPasswordNeededError:
self.gate_reply_message(iq, 'Two-factor authentication detected.')
self.gate_reply_message(iq, 'Please enter your password via !password abc123.')
return
if parsed[0] == '!password':
self.gate_reply_message(iq, 'Checking password...')
self.tg_connections[jid].sign_in(password=parsed[1])
if self.tg_connections[jid].is_user_authorized():
self.send_presence(pto=jid, pfrom=self.boundjid.bare, ptype='online', pstatus='connected')
self.gate_reply_message(iq, 'Authentication successful. Initiating Telegram...')
self.db_connection.execute("INSERT INTO accounts(jid, tg_phone) VALUES(?, ?)", (jid, self.tg_phones[jid],))
self.accounts[jid] = self.db_connection.execute("SELECT * FROM accounts where jid = ?", (jid,) ).fetchone()
self.init_tg(jid)
else:
self.gate_reply_message(iq, 'Authentication failed.')
else:
self.gate_reply_message(iq, 'You are already authenticated. Please use !logout before new login.')
elif parsed[0] == '!list_sessions': # --------------------------------------------------
if not self.tg_connections[jid].is_user_authorized():
self.gate_reply_message(iq, 'Error.')
return
sessions = self.tg_connections[jid].invoke(GetAuthorizationsRequest())
elif parsed[0] == '!reload_dialogs':
if not self.tg_connections[jid].is_user_authorized():
self.gate_reply_message(iq, 'Error.')
return
self.tg_process_dialogs(jid)
self.gate_reply_message(iq, 'Dialogs reloaded.')
elif parsed[0] == '!logout': # --------------------------------------------------
self.tg_connections[jid].log_out()
self.db_connection.execute("DELETE FROM accounts WHERE jid = ?", (jid,))
self.gate_reply_message(iq, 'Your Telegram session was deleted')
elif parsed[0] == '!add': # add user
result = self.tg_connections[jid].get_entity(parsed[1])
if type(result) == User:
tg_peer = InputPeerUser( result.id, result.access_hash )
result = self.tg_connections[jid].invoke( SendMessageRequest(tg_peer, 'Hello! I just want to add you in my contact list.', generate_random_long() ) )
elif type(result) == Channel:
tg_peer = InputPeerChannel( result.id, result.access_hash )
self.tg_connections[jid].invoke(JoinChannelRequest( InputPeerChannel(result.id, result.access_hash) ) )
else:
self.gate_reply_message(iq, 'Sorry, nothing found.')
return
self.tg_process_dialogs(jid)
elif parsed[0] == '!join': # join chat by link
link = parsed[1].split('/') # https://t.me/joinchat/HrCmckx_SkMbSGFLhXCvSg
self.tg_connections[jid].invoke(ImportChatInviteRequest(link[4]))
time.sleep(1)
self.tg_process_dialogs(jid)
elif parsed[0] == '!group' and len(parsed) >= 3: # create new group
# group name? #
groupname = parsed[1]
# group users? #
groupuser = self.tg_connections[jid].get_entity(parsed[2])
# we re ready to make group
self.tg_connections[jid].invoke(CreateChatRequest([groupuser], groupname))
self.tg_process_dialogs(jid)
elif parsed[0] == '!channel' and len(parsed) >= 2: # create new channel
groupname = parsed[1]
self.tg_connections[jid].invoke(CreateChannelRequest(groupname, groupname, broadcast = True))
self.tg_process_dialogs(jid)
elif parsed[0] == '!supergroup' and len(parsed) >= 2: # create new channel
groupname = parsed[1]
self.tg_connections[jid].invoke(CreateChannelRequest(groupname, groupname, megagroup = True))
self.tg_process_dialogs(jid)
elif parsed[0] == '!username' and len(parsed) >= 2: # create new channel
username = parsed[1]
self.tg_connections[jid].invoke(UpdateUsernameRequest(username))
elif parsed[0] == '!name' and len(parsed) >= 2: # create new channel
firstname = parsed[1]
lastname = parsed[2] if len(parsed) > 2 else None
self.tg_connections[jid].invoke(UpdateProfileRequest(first_name = firstname, last_name = lastname))
elif parsed[0] == '!about' and len(parsed) >= 2: # create new channel
about = iq['body'][7:]
self.tg_connections[jid].invoke(UpdateProfileRequest(about = about))
elif parsed[0] == '!import' and len(parsed) >= 3: # create new channel
phone = parsed[1]
firstname = parsed[2]
lastname = parsed[3] if len(parsed) > 3 else None
contact = InputPhoneContact(client_id=generate_random_long(), phone=phone, first_name=firstname, last_name=lastname)
self.tg_connections[jid].invoke(ImportContactsRequest([contact]))
self.tg_process_dialogs(jid)
else: # --------------------------------------------------
self.gate_reply_message(iq, 'Unknown command. Try !help for list all commands.')
def process_chat_user_command(self, iq):
parsed = iq['body'].split(' ')
jid = iq['from'].bare
if parsed[0] == '!help':
self.gate_reply_message(iq, '=== Available dialog commands ===:\n\n'
'!help - Displays this text\n'
'!s/find/replace - Edit last message. Use empty `find` to edit whole message and empty `replace` to delete it.\n'
'!block - Blacklists current user\n'
'!unblock - Unblacklists current user\n'
'!remove - Removes history and contact from your contact list\n'
)
elif parsed[0] == '!block':
tg_id = int(iq['to'].node[1:])
nickname = display_tg_name(self.tg_dialogs[jid]['users'][tg_id])
self.tg_connections[jid].invoke(BlockRequest( InputPeerUser(tg_id, self.tg_dialogs[jid]['users'][tg_id].access_hash) ) )
self.gate_reply_message(iq, 'User %s blacklisted!' % nickname)
elif parsed[0] == '!unblock':
tg_id = int(iq['to'].node[1:])
nickname = display_tg_name(self.tg_dialogs[jid]['users'][tg_id])
self.tg_connections[jid].invoke(UnblockRequest( InputPeerUser(tg_id, self.tg_dialogs[jid]['users'][tg_id].access_hash) ) )
self.gate_reply_message(iq, 'User %s unblacklisted!' % nickname)
elif parsed[0] == '!remove':
tg_id = int(iq['to'].node[1:])
peer = InputPeerUser(tg_id, self.tg_dialogs[jid]['users'][tg_id].access_hash)
c_jid = get_contact_jid(self.tg_dialogs[jid]['users'][tg_id], self.boundjid.bare)
self.tg_connections[jid].invoke( DeleteContactRequest(peer) )
self.tg_connections[jid].invoke( DeleteHistoryRequest( peer, max_id = 0, just_clear = None ) )
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unavailable')
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unsubscribed')
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unsubscribe')
elif iq['body'].startswith('!s/'):
tg_id = int(iq['to'].node[1:])
peer = InputPeerUser(tg_id, self.tg_dialogs[jid]['users'][tg_id].access_hash)
msg_id, edited = self.edit_message(jid, tg_id, iq['body'])
if not edited: return
# and send it
if edited != '' and edited != ' ':
self.tg_dialogs[jid]['messages'][tg_id]["body"] = edited
self.tg_connections[jid].invoke( EditMessageRequest(peer, msg_id, message = edited) )
else:
del(self.tg_dialogs[jid]['messages'][tg_id])
self.tg_connections[jid].invoke( DeleteMessagesRequest([msg_id], revoke = True) )
def process_chat_group_command(self, iq):
parsed = iq['body'].split(' ')
jid = iq['from'].bare
if parsed[0] == '!help':
self.gate_reply_message(iq, '=== Available chat commands ===:\n\n'
'!help - Displays this text\n'
'!s/find/replace - Edit last message. Use empty `find` to edit whole message and empty `replace` to delete it.\n'
'!leave - Leaves current group or supergroup\n'
'!invite - Invites user to group\n'
'!kick - Kicks user to group\n'
)
elif parsed[0] == '!leave':
tg_id = int(iq['to'].node[1:])
if tg_id in self.tg_dialogs[jid]['supergroups']:
peer = InputPeerChannel(tg_id, self.tg_dialogs[jid]['supergroups'][tg_id].access_hash)
self.tg_connections[jid].invoke( LeaveChannelRequest(peer) )
self.tg_connections[jid].invoke( DeleteHistoryRequest( peer, max_id = 0, just_clear = None ) )
c_jid = get_contact_jid(self.tg_dialogs[jid]['supergroups'][tg_id], self.boundjid.bare)
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unavailable')
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unsubscribed')
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unsubscribe')
if tg_id in self.tg_dialogs[jid]['groups']:
self.tg_connections[jid].invoke( DeleteChatUserRequest(tg_id, self.tg_connections[jid].me) )
self.tg_connections[jid].invoke( DeleteHistoryRequest( InputPeerChat(tg_id), max_id = 0, just_clear = None ) )
c_jid = get_contact_jid(self.tg_dialogs[jid]['groups'][tg_id], self.boundjid.bare)
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unavailable')
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unsubscribed')
self.send_presence(pto = jid, pfrom = c_jid, ptype = 'unsubscribe')
elif parsed[0] == '!invite':
tg_id = int(iq['to'].node[1:])
if tg_id in self.tg_dialogs[jid]['supergroups']:
invited_user = self.tg_connections[jid].get_entity(parsed[1])
if type(invited_user) == User:
self.tg_connections[jid].invoke(EditBannedRequest( InputPeerChannel(tg_id, self.tg_dialogs[jid]['supergroups'][tg_id].access_hash), invited_user, ChannelBannedRights(until_date=None,view_messages=False) ) )
self.tg_connections[jid].invoke(InviteToChannelRequest( InputPeerChannel(tg_id, self.tg_dialogs[jid]['supergroups'][tg_id].access_hash), [invited_user] ) )
if tg_id in self.tg_dialogs[jid]['groups']:
invited_user = self.tg_connections[jid].get_entity(parsed[1])
if type(invited_user) == User:
self.tg_connections[jid].invoke( AddChatUserRequest(tg_id, invited_user, 0) )
elif parsed[0] == '!kick':
tg_id = int(iq['to'].node[1:])
if tg_id in self.tg_dialogs[jid]['supergroups']:
kicked_user = self.tg_connections[jid].get_entity(parsed[1])
if type(kicked_user) == User:
self.tg_connections[jid].invoke(EditBannedRequest( InputPeerChannel(tg_id, self.tg_dialogs[jid]['supergroups'][tg_id].access_hash), kicked_user, ChannelBannedRights(until_date=None,view_messages=True) ) )
if tg_id in self.tg_dialogs[jid]['groups']:
kicked_user = self.tg_connections[jid].get_entity(parsed[1])
if type(kicked_user) == User:
self.tg_connections[jid].invoke( DeleteChatUserRequest(tg_id, kicked_user) )
elif iq['body'].startswith('!s/'):
tg_id = int(iq['to'].node[1:])
peer = InputPeerChannel(tg_id, self.tg_dialogs[jid]['supergroups'][tg_id].access_hash) if tg_id in self.tg_dialogs[jid]['supergroups'] else InputPeerChat(tg_id)
msg_id, edited = self.edit_message(jid, tg_id, iq['body'])
if not edited: return
# and send it
if edited != '' and edited != ' ':
self.tg_dialogs[jid]['messages'][tg_id]["body"] = edited
self.tg_connections[jid].invoke( EditMessageRequest(peer, msg_id, message = edited) )
else:
del(self.tg_dialogs[jid]['messages'][tg_id])
if isinstance(peer, InputPeerChannel):
self.tg_connections[jid].invoke( DeleteMessagesChannel(peer, [msg_id]) )
else:
self.tg_connections[jid].invoke( DeleteMessagesRequest([msg_id], revoke = True) )
def spawn_tg_client(self, jid, phone):
"""
Spawns Telegram client
:param jid:
:param phone:
:return:
"""
client = TelegramGateClient('a_'+phone, int(self.config['tg_api_id']), self.config['tg_api_hash'], self, jid, phone)
if 'tg_server_ip' in self.config and 'tg_server_dc' in self.config and 'tg_server_port' in self.config:
client.session.set_dc(self.config['tg_server_dc'], self.config['tg_server_ip'], self.config['tg_server_port'])
client.connect()
self.tg_connections[jid] = client
self.tg_phones[jid] = phone
if client.is_user_authorized():
self.init_tg(jid)
self.send_presence(pto=jid, pfrom=self.boundjid.bare, ptype='online', pstatus='connected')
def init_tg(self, jid):
"""
Initialize
:param jid:
:return:
"""
# Set status = Online
self.tg_connections[jid].invoke(UpdateStatusRequest(offline=False))
# Process Telegram contact list
self.tg_process_dialogs(jid, sync_roster = False)
# Register Telegrap updates handler
self.tg_connections[jid].add_update_handler(self.tg_connections[jid].xmpp_update_handler)
def roster_exchange(self, tojid, contacts):
message = Message()
message['from'] = self.boundjid.bare
message['to'] = tojid
rawxml = "<x xmlns='http://jabber.org/protocol/rosterx'>"
for jid, nick in contacts.items():
c = "<item action='add' jid='%s' name='%s'><group>Telegram</group></item>" % (jid, nick)
rawxml = rawxml + c
rawxml = rawxml + "</x>"
message.appendxml(ET.fromstring(rawxml))
self.send(message)
def roster_fill(self, tojid, contacts):
for jid, nick in contacts.items():
presence = Presence()
presence['from'] = jid
presence['to'] = tojid
presence['type'] = 'subscribe'
presence.appendxml(ET.fromstring("<nick xmlns='http://jabber.org/protocol/nick'>%s</nick>" % nick))
self.send(presence)
def tg_process_dialogs(self, jid, sync_roster = True):
print('Processing dialogs...')
# dialogs dictonaries
self.tg_dialogs[jid] = dict()
self.tg_dialogs[jid]['raw'] = list()
self.tg_dialogs[jid]['users'] = dict()
self.tg_dialogs[jid]['groups'] = dict()
self.tg_dialogs[jid]['supergroups'] = dict()
self.tg_dialogs[jid]['messages'] = dict()
# offsets
last_peer = InputPeerEmpty()
last_msg_id = 0
last_date = None
# roster exchange #
self.contact_list[jid] = dict()
while True:
dlgs = self.tg_connections[jid].invoke(GetDialogsRequest(offset_date=last_date, offset_id=last_msg_id,
offset_peer=last_peer, limit=100))
self.tg_dialogs[jid]['raw'].append(dlgs)
for usr in dlgs.users:
self.tg_dialogs[jid]['users'][usr.id] = usr
for cht in dlgs.chats:
if type(cht) in [Chat, ChatForbidden]: # normal group
self.tg_dialogs[jid]['groups'][cht.id] = cht
elif type(cht) in [Channel, ChannelForbidden]: # supergroup
self.tg_dialogs[jid]['supergroups'][cht.id] = cht
for dlg in dlgs.dialogs:
if type(dlg.peer) is PeerUser:
usr = self.tg_dialogs[jid]['users'][dlg.peer.user_id]
vcard = self.plugin['xep_0054'].make_vcard()
u_jid = get_contact_jid(usr, self.boundjid.bare)
# make vcard #
vcard['JABBERID'] = u_jid
if usr.deleted:
rostername = "Deleted Account"
vcard['FN'] = 'Deleted account'
vcard['DESC'] = 'This user no longer exists in Telegram'
else:
rostername = display_tg_name(usr)
rostername = '[B] ' + rostername if usr.bot else rostername
vcard['FN'] = display_tg_name(usr)
vcard['DESC'] = ''
if usr.first_name:
vcard['N']['GIVEN'] = usr.first_name
if usr.last_name:
vcard['N']['FAMILY'] = usr.last_name
if usr.username:
vcard['DESC'] = 'Telegram Username: @' + usr.username
if usr.phone:
vcard['DESC'] += "\n" + 'Phone number: ' + usr.phone
vcard['NICKNAME'] = vcard['FN']
# add photo to VCard #
photo, photosha1hash = self.get_peer_photo(jid, usr) if sync_roster else (None, None)
if photo:
vcard['PHOTO']['TYPE'] = 'image/jpeg'
vcard['PHOTO']['BINVAL'] = photo
self.plugin['xep_0054'].publish_vcard(jid=u_jid, vcard=vcard)
self.plugin['xep_0172'].publish_nick(nick=vcard['FN'], ifrom=u_jid)
self.publish_photo(jid, u_jid, photosha1hash) if photosha1hash else None
# add it to contect list & avatar download queue #
self.contact_list[jid][u_jid] = rostername
if usr.bot:
self.send_presence(pto=jid, pfrom=u_jid, pshow = 'chat', pstatus='Bot')
else:
if type(usr.status) is UserStatusOnline:
self.send_presence(pto=jid, pfrom=u_jid, pstatus = 'Online' )
elif type(usr.status) is UserStatusRecently:
self.send_presence(pto=jid, pfrom=u_jid, pshow='dnd', pstatus='Last seen recently')
elif type(usr.status) is UserStatusOffline:
phow = 'away' if datetime.datetime.utcnow() - usr.status.was_online < datetime.timedelta(hours = self.accounts[jid]['status_xa_interval'] ) else 'xa'
self.send_presence(pto=jid, pfrom=u_jid, pshow=phow, pstatus=localtime(usr.status.was_online).strftime('Last seen at %H:%M %d/%m/%Y') )
else:
self.send_presence(pto=jid, pfrom=u_jid, ptype='unavailable', pstatus='Last seen a long time ago')
if type(dlg.peer) in [PeerChat, PeerChannel]:
cht = None
if type(dlg.peer) is PeerChat: # old group
cht = self.tg_connections[jid].invoke(GetFullChatRequest(dlg.peer.chat_id))
cht = cht.chats[0]
if cht.deactivated or cht.left:
cht = None
elif type(dlg.peer) is PeerChannel: # supergroup
cht = self.tg_dialogs[jid]['supergroups'][dlg.peer.channel_id]
if cht and cht.id:
rostername = display_tg_name(cht)
u_jid = get_contact_jid(cht, self.boundjid.bare)
vcard = self.plugin['xep_0054'].make_vcard()
vcard['FN'] = rostername
vcard['NICKNAME'] = rostername
vcard['JABBERID'] = u_jid
# add photo to VCard #
photo, photosha1hash = self.get_peer_photo(jid, cht) if sync_roster else (None, None)
if photo:
vcard['PHOTO']['TYPE'] = 'image/jpeg'
vcard['PHOTO']['BINVAL'] = photo
self.plugin['xep_0054'].publish_vcard(jid=u_jid, vcard=vcard)
self.plugin['xep_0172'].publish_nick(nick=vcard['FN'], ifrom=u_jid)
self.publish_photo(jid, u_jid, photosha1hash) if photosha1hash else None
self.contact_list[jid][u_jid] = rostername
self.send_presence(pto=jid, pfrom=u_jid, pshow = 'chat', pstatus = cht.title)
if len(dlgs.dialogs) == 0: # all dialogs was received.
if sync_roster and 'use_roster_exchange' in self.accounts[jid] and self.accounts[jid]['use_roster_exchange'] == 'true':
self.roster_exchange(jid, self.contact_list[jid])
elif sync_roster:
self.roster_fill(jid, self.contact_list[jid])
break
else: # get next part of dialogs.
last_msg_id = dlgs.dialogs[-1].top_message # we fucking need last msg id!
last_peer = dlgs.dialogs[-1].peer
last_date = next(msg for msg in dlgs.messages # find date
if type(msg.to_id) is type(last_peer) and msg.id == last_msg_id).date
if type(last_peer) is PeerUser: # user/bot
access_hash = self.tg_dialogs[jid]['users'][last_peer.user_id].access_hash
last_peer = InputPeerUser(last_peer.user_id, access_hash)
elif type(last_peer) in [Chat, ChatForbidden]: # normal group
last_peer = InputPeerChat(last_peer.chat_id)
elif type(last_peer) in [Channel, ChannelForbidden]: # supergroup/channel
access_hash = self.tg_dialogs[jid]['supergroups'][last_peer.channel_id].access_hash
last_peer = InputPeerChannel(last_peer.channel_id, access_hash)
def tg_process_unread_messages(self):
pass
def gate_reply_message(self, iq, msg):
"""
Reply to message to gate.
:param iq:
:param msg:
:return:
"""
self.send_message(mto=iq['from'], mfrom=self.config['jid'], mtype='chat', mbody=msg)
def get_peer_photo(self, jid, peer):
# we are able to disable this shit #
if not 'enable_avatars' in self.accounts[jid] or self.accounts[jid]['enable_avatars'] != 'true':
return (None, None)
data = io.BytesIO()
self.tg_connections[jid].download_profile_photo(peer, file = data)
data.flush()
if isinstance(data, io.BytesIO) and data.getbuffer().nbytes > 0:
image = data.getvalue()
image_sha1 = hashlib.sha1(image).hexdigest()
return (image, image_sha1)
else:
return (None, None)
def edit_message(self, jid, tg_id, message):
# get last message to this peer
if not tg_id in self.tg_dialogs[jid]['messages']:
return (None, None)
msg_id = self.tg_dialogs[jid]['messages'][tg_id]["id"]
msg_body = self.tg_dialogs[jid]['messages'][tg_id]["body"]
# edit this message
pattern = message.split('/')
replace = ' ' if pattern[2] == '' else '/'.join(pattern[2:]) # no empty regexp — replace with whitespace
edited = re.sub(r'%s' % pattern[1], replace, msg_body, re.I) if pattern[1] != '' else replace # if no pattern specified — edit whole message
return (msg_id, edited)
def publish_photo(self, jid, fromjid, photo):
presence = Presence()
presence['to'] = jid
presence['from'] = fromjid
presence.appendxml(ET.fromstring("<x xmlns='vcard-temp:x:update'><photo>%s</photo></x>" % photo))
self.send(presence)
def init_database(self):
"""
Database initialization
:return:
"""
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
conn = sqlite3.connect(self.config['db_connect'], isolation_level=None, check_same_thread=False)
conn.row_factory = dict_factory
conn.execute("CREATE TABLE IF NOT EXISTS accounts(jid VARCHAR(255), tg_phone VARCHAR(25), use_roster_exchange BOOLEAN default false, keep_online BOOLEAN default false, status_update_interval INTEGER default 30, status_xa_interval INTEGER default 24, enable_avatars BOOLEAN default false)")
return conn
| 50.419512
| 297
| 0.552511
|
54a737b706882ec2c5b07c8d80bd9ae75056fdd9
| 3,516
|
py
|
Python
|
pirates/tutorial/ShipWreck.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 3
|
2021-02-25T06:38:13.000Z
|
2022-03-22T07:00:15.000Z
|
pirates/tutorial/ShipWreck.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | null | null | null |
pirates/tutorial/ShipWreck.py
|
itsyaboyrocket/pirates
|
6ca1e7d571c670b0d976f65e608235707b5737e3
|
[
"BSD-3-Clause"
] | 1
|
2021-02-25T06:38:17.000Z
|
2021-02-25T06:38:17.000Z
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.tutorial.ShipWreck
from direct.interval.IntervalGlobal import *
from direct.gui.DirectGui import *
from direct.actor import Actor
from pandac.PandaModules import *
from pirates.piratesbase.PiratesGlobals import *
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from pirates.effects.ShipSplintersA import ShipSplintersA
from pirates.effects.SmokeCloud import SmokeCloud
from pirates.effects.ExplosionFlip import ExplosionFlip
from pirates.effects.Fire import Fire
from pirates.effects.BlackSmoke import BlackSmoke
from pirates.uberdog.UberDogGlobals import InventoryType
from pirates.battle import CannonGlobals
from pirates.battle import WeaponGlobals
from pirates.shipparts import ShipPart
class ShipWreck(NodePath):
__module__ = __name__
notify = directNotify.newCategory('ShipWreck')
def __init__(self, npShipWreck, uid):
NodePath.__init__(self, npShipWreck)
self.tutorial = None
self.hitCount = 0
self.uid = uid
self.coll = self.findAllMatches('**/+CollisionNode')
self.__targetableCollisions = []
return
def delete(self):
self.hitCount = 0
self.clearTargetableCollisions()
def makeTargetableCollision(self, doId):
for i in range(0, self.coll.getNumPaths()):
c = self.coll[i]
c.setTag('objType', str(PiratesGlobals.COLL_SHIP_WRECK))
c.setTag('propId', str(doId))
self.addTargetableCollision(c)
self.setTargetBitmask(True)
def addTargetableCollision(self, coll):
self.__targetableCollisions.append(coll)
def getTargetableCollisions(self):
return self.__targetableCollisions
def clearTargetableCollisions(self):
self.__targetableCollisions = []
def setTargetBitmask(self, on):
if on:
for coll in self.__targetableCollisions:
curMask = coll.node().getIntoCollideMask()
newMask = curMask | PiratesGlobals.TargetBitmask
coll.setCollideMask(newMask)
for coll in self.__targetableCollisions:
curMask = coll.node().getIntoCollideMask()
newMask = curMask ^ PiratesGlobals.TargetBitmask
coll.setCollideMask(newMask)
def projectileWeaponHit(self, pos):
if self.tutorial:
self.tutorial.cannonHitWreck(self)
if base.cr.wantSpecialEffects:
explosionEffect = ExplosionFlip.getEffect()
if explosionEffect:
explosionEffect.reparentTo(render)
explosionEffect.setPos(self, pos)
explosionEffect.setScale(0.8)
explosionEffect.play()
smokeCloudEffect = SmokeCloud.getEffect()
if smokeCloudEffect:
smokeCloudEffect.reparentTo(render)
smokeCloudEffect.setPos(self, pos)
smokeCloudEffect.setScale(1.0)
smokeCloudEffect.spriteScale = 1.0
smokeCloudEffect.radius = 7.0
smokeCloudEffect.play()
shipSplintersAEffect = ShipSplintersA.getEffect()
if shipSplintersAEffect:
shipSplintersAEffect.reparentTo(render)
shipSplintersAEffect.setPos(self, pos)
shipSplintersAEffect.play()
| 38.637363
| 104
| 0.677474
|
2354522b60138b91831be0296f32c2ac92f818e2
| 3,704
|
py
|
Python
|
Backend/Murphi/MurphiModular/Types/Enums/SubEnums/GenArchEnums.py
|
Errare-humanum-est/HeteroGen
|
600a7bde441cc1365a465746e15564bd8de8fc37
|
[
"MIT"
] | 1
|
2022-01-12T15:52:07.000Z
|
2022-01-12T15:52:07.000Z
|
Backend/Murphi/MurphiModular/Types/Enums/SubEnums/GenArchEnums.py
|
Errare-humanum-est/HeteroGen
|
600a7bde441cc1365a465746e15564bd8de8fc37
|
[
"MIT"
] | null | null | null |
Backend/Murphi/MurphiModular/Types/Enums/SubEnums/GenArchEnums.py
|
Errare-humanum-est/HeteroGen
|
600a7bde441cc1365a465746e15564bd8de8fc37
|
[
"MIT"
] | 1
|
2021-12-14T18:03:37.000Z
|
2021-12-14T18:03:37.000Z
|
# Copyright (c) 2021. Nicolai Oswald
# Copyright (c) 2021. University of Edinburgh
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from typing import List, Dict, Any
from DataObjects.ClassCluster import Cluster
from DataObjects.States.ClassStatev2 import State_v2
from Backend.Common.TemplateHandler.TemplateBase import TemplateBase
from Backend.Murphi.MurphiModular.MurphiTokens import MurphiTokens
from Debug.Monitor.ClassDebug import Debug
class GenArchEnums(TemplateBase, Debug):
def __init__(self, murphi_str: List[str], clusters: List[Cluster]):
TemplateBase.__init__(self)
Debug.__init__(self)
state_enum_str = "------" + __name__.replace('.', '/') + self.nl
for cluster in clusters:
for arch in cluster.get_machine_architectures():
# Generate states
state_enum_str += self.gen_state_enums(str(arch), arch.get_architecture_states_verified())
# Generate events
state_enum_str += self.gen_event_enums(str(arch), arch.event_network.event_issue)
murphi_str.append(state_enum_str)
def gen_state_enums(self, arch: str, state_list: List[State_v2]) -> str:
state_str = MurphiTokens.k_state_label + str(arch) + ": enum {" + self.nl
state_str_list = [str(state) for state in state_list]
self.pwarning("Duplicated state identifiers found in architecture: " + str(state_str_list),
len(state_str_list) != len(set(state_str_list)))
for state in sorted(list(set(state_str_list)), key=lambda x: str(x), reverse=True):
state_str += self.tab + str(arch) + "_" + str(state) + "," + self.nl
state_str = state_str[:state_str.rfind(",")]
return state_str + self.nl + "}" + self.end + self.nl
def gen_event_enums(self, arch: str, event_dict: Dict[str, Any]) -> str:
if not event_dict:
return ""
state_str = MurphiTokens.k_event_label + str(arch) + ": enum {" + self.nl
for event in event_dict:
state_str += self.tab + str(arch) + "_" + str(event) + "," + self.nl
state_str = state_str[:state_str.rfind(",")]
return state_str + self.nl + "}" + self.end + self.nl
| 46.3
| 106
| 0.705994
|
2922ff7b550fb71b93270b533afec554e0e117c1
| 4,207
|
py
|
Python
|
clothings/views.py
|
ygrass/handsome
|
0ea016745d92054bd4df8d934c1b67fd61b6f845
|
[
"Unlicense"
] | null | null | null |
clothings/views.py
|
ygrass/handsome
|
0ea016745d92054bd4df8d934c1b67fd61b6f845
|
[
"Unlicense"
] | null | null | null |
clothings/views.py
|
ygrass/handsome
|
0ea016745d92054bd4df8d934c1b67fd61b6f845
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.core import serializers
from django.core.urlresolvers import reverse_lazy
from django.db.models import Q
from django.views.generic.base import View
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.list import ListView
from braces.views import(
SuperuserRequiredMixin, StaffuserRequiredMixin, AjaxResponseMixin,
JSONResponseMixin, LoginRequiredMixin
)
from .models import Clothing, Supplier
class CreateClothingView(SuperuserRequiredMixin, CreateView):
"""
View for create new clothing
"""
model = Clothing
success_url = reverse_lazy('clothings:list')
class UpdateClothingView(SuperuserRequiredMixin, UpdateView):
"""
View for update clothing object
"""
model = Clothing
success_url = reverse_lazy('clothings:list')
class ClothingListView(StaffuserRequiredMixin, ListView):
"""
Display all the clothings
"""
model = Clothing
def get_context_data(self, **kwargs):
"""
Add extra data to context
"""
data = super(ClothingListView, self).get_context_data(**kwargs)
data.update({'clothing_choices': Clothing.CATEGORY_CHOICES,
'suppliers': Supplier.objects.all()})
data.update(self.request.GET.dict())
return data
def get_queryset(self):
"""
Filter clothings
"""
qs = super(ClothingListView, self).get_queryset()
# category
category = self.request.REQUEST.get('category', 'all')
if category == 'all':
category_Q = Q()
else:
category_Q = Q(category=category)
# supplier
supplier = self.request.REQUEST.get('supplier', 'all')
if supplier == 'all':
supplier_Q = Q()
else:
supplier_Q = Q(supplier__id=supplier)
return qs.filter(category_Q, supplier_Q)
class ClothingSearchView(StaffuserRequiredMixin, AjaxResponseMixin,
JSONResponseMixin, View):
"""
Search clothing
"""
page_size = 20
def search(self, category, name, page):
"""
Search clothings
"""
category_Q = Q()
if category:
category_Q = Q(category=category)
name_Q = Q()
if name:
name_Q = Q(name__contains=name)
start = page*self.page_size
end = start + self.page_size
return Clothing.objects.filter(category_Q, name_Q).filter(is_active=True)[start:end] # noqa
def get_ajax(self, request, *args, **kwargs):
"""
Do ajax search
"""
category = request.REQUEST.get('category')
if not category or category == 'all':
category = None
name = request.REQUEST.get('name')
if not name:
name = None
page = request.REQUEST.get('page')
if not page:
page = 0
else:
page = int(page)
clothings = []
for clo in self.search(category, name, page):
clothings.append({
'pk': clo.id,
'name': clo.name,
'sku': clo.sku,
'price': clo.price,
'sizes': clo.sizes,
'colors': clo.colors,
'note': clo.note,
'image': clo.medium_image,
'is_active': clo.is_active,
'category': clo.category
})
return self.render_json_response(clothings)
class SupplierListView(SuperuserRequiredMixin, ListView):
"""
Display all suppliers
"""
model = Supplier
class CreateSupplierView(SuperuserRequiredMixin, CreateView):
"""
Create new supplier
"""
model = Supplier
success_url = reverse_lazy('clothings:supplier_list')
class UpdateSupplierView(SuperuserRequiredMixin, UpdateView):
"""
Update supplier info
"""
model = Supplier
success_url = reverse_lazy('clothings:supplier_list')
class ClothingPhotosView(LoginRequiredMixin, DetailView):
model = Clothing
template_name = 'clothings/clothing_photos.html'
| 27.141935
| 100
| 0.60851
|
7d3370bb1c4584f51c6da9ecc4c8341670ac45f2
| 585
|
py
|
Python
|
2019/8.py
|
20Koen02/adventofcode
|
72f897b280ced4cdd2b72ff7182795e3c7ee9aa7
|
[
"MIT"
] | 6
|
2021-12-01T16:39:29.000Z
|
2021-12-10T14:00:03.000Z
|
2019/8.py
|
20Koen02/adventofcode
|
72f897b280ced4cdd2b72ff7182795e3c7ee9aa7
|
[
"MIT"
] | null | null | null |
2019/8.py
|
20Koen02/adventofcode
|
72f897b280ced4cdd2b72ff7182795e3c7ee9aa7
|
[
"MIT"
] | 1
|
2021-12-09T12:42:11.000Z
|
2021-12-09T12:42:11.000Z
|
import numpy as np
def getInput():
with open("inout/8_input.txt") as f:
lines = f.read()
return lines.strip()
def main():
lines = getInput()
w = 25
h = 6
pixels = w * h
layers = len(lines) // pixels
data = np.zeros((h, w, 3), dtype=np.uint8)
for i in range(pixels):
for j in range(layers):
if lines[(j*pixels)+i] != "2":
print("■ " if lines[(j*pixels)+i] == "1" else "□ ", end="")
break
if (i+1) % w == 0:
print()
if __name__ == "__main__":
main()
| 18.870968
| 75
| 0.466667
|
25ad92e96f65c0541d7d4495d689d1c023720017
| 11,564
|
py
|
Python
|
src/algorithms/utils.py
|
jbustospelegri/breast_cancer_diagnosis
|
38eb990ef716912c6acabb443e6eb5c21d9b4e0d
|
[
"MIT"
] | 5
|
2022-02-10T12:36:41.000Z
|
2022-03-23T17:28:35.000Z
|
src/algorithms/utils.py
|
jbustospelegri/breast_cancer_diagnosis
|
38eb990ef716912c6acabb443e6eb5c21d9b4e0d
|
[
"MIT"
] | null | null | null |
src/algorithms/utils.py
|
jbustospelegri/breast_cancer_diagnosis
|
38eb990ef716912c6acabb443e6eb5c21d9b4e0d
|
[
"MIT"
] | 1
|
2022-03-16T10:41:17.000Z
|
2022-03-16T10:41:17.000Z
|
import pandas as pd
import numpy as np
import utils.config as conf
from multiprocessing import Queue
from typing import Union, io
from sklearn.metrics import roc_curve
from sklearn.utils import resample
from tensorflow.keras import models
from tensorflow.keras.preprocessing.image import Iterator
from tensorflow.keras import Model
from tensorflow.keras.backend import argmax
from tensorflow.keras.callbacks import CSVLogger, EarlyStopping
from tensorflow.keras.optimizers import Adam
from utils.functions import get_path, bulk_data
from breast_cancer_dataset.database_generator import BreastCancerDataset
def get_predictions(keras_model: models, data: Iterator, **kwargs) -> pd.DataFrame:
"""
Función utilizada para generar las predicciones de un modelo. El dataframe generado contendrá el path de la imagen,
la clase verdadera (en caso de existir) y la clase predicha.
:param keras_model: modelo sobre el que se aplicará el método .predict para generar las predicciones
:param data: dataset sobre el cual aplicar las predicciones
:param kwargs: columnas adicionales para añadir al dataframe devuelto. El key de kwargs será el nombre de la columna
y los values serán el valor asignado a cada columna.
:return: dataframe con el path de la imagen, la clase verdadera (en caso de existir), la clase predicha y columnas
definidas por kwargs.
"""
# Se genera un orden aleatorio del datagenerator producido por on_epoch-end
data.on_epoch_end()
# se recupera el path de los archivos del dataset generator
fnames = [data.filenames[i] for i in data.indexes]
true_labels = []
# En caso de que exista la clase verdadera, se recupera y se añade al dataset
if hasattr(data, 'classes'):
true_labels = [data.classes[i] for i in data.indexes]
# Se define el tamaño de batch a 1 para predecir todas las observaciones
data.set_batch_size(1)
# Se predicen los datos y se recupera la probabilidad de la clase 1 (maligno)
predictions = keras_model.predict(data)[:, 1]
# Se crea el dataset final con los ficheros, prediccion y clase real en caso de existir
if true_labels:
dataset = pd.DataFrame({'PROCESSED_IMG': fnames, 'PREDICTION': predictions, 'IMG_LABEL': true_labels})
else:
dataset = pd.DataFrame({'PROCESSED_IMG': fnames, 'PREDICTION': predictions})
# Se añaden columnas adicionales al dataset
for col, value in kwargs.get('add_columns', {}).items():
dataset.loc[:, col] = [value] * len(dataset)
return dataset
def training_pipe(m: Model, db: BreastCancerDataset, q: Queue, c: conf.MODEL_FILES, task_type: str, fc: str = 'simple',
weight_init: Union[str, io] = None, frozen_layers: Union[str, int] = None) -> None:
"""
Función utilizada para generar el pipeline de entrenamiento de cada modelo. Dado que tensorflow no libera cache
al entrenar un modelo, se debe de llamar esta función a través de un thread o proceso paralelo.
:param m: Red neuronal (objeto de la clase General Model) que contendrá cada algoritmo de dl
:param db: Objeto BreastCancerDataset con las observaciones de los conjuntos de entrenamiento y validacion
:param q: Queue para transmitir comunicar el resultado al thread principal.
:param fc: string indicando el tipo de estructura a utulizar como top layers de cada arquitectura
:param c: objeto Model files que contiene información sobre ls rutas de guardado de cada modelo
:param task_type: admite los valores 'classification' o 'segmentation' para escoger el tipo de tarea a realizar
:param weight_init: nombre o path de los pesos con los que inicializar el entrenamiento de un modelo.
:param frozen_layers: número de capas a entrenar en cada modelo
"""
# Se inicializa cada modelo:
cnn = m(n=len(db.class_dict), weights=None if weight_init == 'random' else weight_init, top_fc=fc)
# Se registran las métricas que se desean almacenar y se obtienen los conjuntos de train y validacion aplicando
# el escalado propio de cada red y la función de preprocesado propia. El tamaño de batch se define a partir
# de la hoja de configuraciones.
if task_type == 'classification':
cnn.register_metric(*list(conf.CLASSIFICATION_METRICS.values()))
train, val = db.get_classification_dataset_generator(
batch_size=cnn.BS_DICT[frozen_layers], callback=cnn.get_preprocessing_func(), size=cnn.shape[:2]
)
# train, val = db.get_classification_dataset_generator(
# batch_size=conf.BATCH_SIZE, callback=cnn.get_preprocessing_func(), size=cnn.shape[:2]
# )
elif task_type == 'segmentation':
cnn.register_metric(*list(conf.SEGMENTATION_METRICS.values()))
train, val = db.get_segmentation_dataset_generator(
batch_size=conf.SEGMENTATION_BATCH_SIZE, callback=cnn.get_preprocessing_func(), size=conf.IMG_SHAPE
)
else:
raise ValueError(f'task_type not incorporated')
name = cnn.__name__
if frozen_layers != 'ALL':
filename = f'{name}_FineTunning'
else:
filename = f'{name}_Scratch'
# Se registran los callbacks del modelo (Earlystopping y CSV logger del train de cada modelo)
csv_filepath = get_path(c.model_log_dir, weight_init, frozen_layers, f'{filename}.csv')
cnn.register_callback(
early_stopping=EarlyStopping(monitor='val_loss', mode='min', patience=20, restore_best_weights=True),
log=CSVLogger(filename=csv_filepath, separator=';', append=True)
)
# En función de las capas congeladas y la inicializacion de los pesos se realiza el entrenamiento de cada
# arquitectura siguiendo la siguiente lógica:
# - Si se entrenan todas las capas, se llama al metodo train_from_scratch.
# - En caso contrario, primero se entrenan únicamente las capas FC de cada modelo y posteriormente se entrena
# el modelo con las capas especificadas por el usuario.
# por defecto el optimizador es Adam haciendo uso del learning rate identificado en la hoja de config.
if frozen_layers == 'ALL':
print(f'{"=" * 75}\nEntrenando {name} desde 0 con inicialización de pesos {weight_init}\n{"=" * 75}')
# Entrenamiento desde 0
t, e = cnn.train_from_scratch(train, val, conf.EPOCHS, Adam(conf.LEARNING_RATE))
# Se registra el etrenamiento del modelo (tiempo, capas, inicialización) en un csv
bulk_data(file=c.model_summary_train_csv, mode='a', cnn=name, process='Scratch', FT=frozen_layers,
weights=weight_init, time=t, epochs=e, trainable_layers=cnn.get_trainable_layers())
print(f'{"=" * 75}\nEntrenamiento finalizado.\n{"=" * 75}')
else:
print(f'{"=" * 75}\nEntrenando {name} mediante transfer learning con inicialización de pesos de '
f'{weight_init}. Número de capas a entrenar {frozen_layers}\n{"=" * 75}')
print(f'{"-" * 75}\n\tEmpieza proceso de extract-features (warm up)\n{"-" * 75}')
# Se realiza el extract features para entrenar los pesos de la capa FC
t, e = cnn.extract_features(train, val, conf.WARM_UP_EPOCHS, Adam(conf.LEARNING_RATE))
# Se registra el etrenamiento del modelo (tiempo, capas, inicialización) en un csv
bulk_data(file=c.model_summary_train_csv, mode='a', cnn=name, process='ExtractFeatures', FT=frozen_layers,
weights=weight_init, time=t, epochs=e, trainable_layers=cnn.get_trainable_layers())
print(f'{"-" * 75}\n\tEntrenamiento finalizado.\n{"-" * 75}')
print(f'{"-" * 75}\n\tEmpieza proceso de fine-tunning\n{"-" * 75}')
# Se entrena el modelo congelando las capas especificadas por el usuario
t, e = cnn.fine_tunning(train, val, conf.EPOCHS, Adam(cnn.get_learning_rate()), frozen_layers)
# Se registra el etrenamiento del modelo (tiempo, capas, inicialización) en un csv
bulk_data(file=c.model_summary_train_csv, mode='a', cnn=name, process='FineTunning', FT=frozen_layers,
weights=weight_init, time=t, epochs=e, trainable_layers=cnn.get_trainable_layers())
print(f'{"-" * 75}\n\tEntrenamiento finalizado.\n{"-" * 75}')
print(f'{"=" * 75}\nProceso de transfer learning finalizado\n{"=" * 75}')
# Se almacenan los pesos del modelo
print(f'{"=" * 75}\nAlmacenando modelo.\n{"=" * 75}')
cnn.save_weights(dirname=get_path(c.model_store_cnn_dir, weight_init, frozen_layers), model_name=f"{name}.h5")
print(f'{"=" * 75}\nModelo almacenado correctamente.\n{"=" * 75}')
# En el caso de realizar una clasificación, se obtienen las predicciones de cada instancia.
if task_type == 'classification':
print(f'{"=" * 75}\nObteniendo predicciones del modelo {name}.\n{"=" * 75}')
# Se generan las predicciones de entrenamiento y validación en formato de dataframe y se devuelven al proceso
# ppal.
q.put(
pd.concat(
objs=[
get_predictions(keras_model=cnn, data=train, add_columns={'TRAIN_VAL': 'train'}),
get_predictions(keras_model=cnn, data=val, add_columns={'TRAIN_VAL': 'val'})
],
ignore_index=True)
)
print(f'{"=" * 75}\nPredicciones finalizadas.\n{"=" * 75}')
else:
q.put(True)
def optimize_threshold(true_labels: np.array, pred_labels: np.array) -> float:
"""
Función utilizada para obtimizar el threshold a partir del estadístico J de Youden (maximizar tasa de tpr y tnr).
:param true_labels: Vector con las clases verdaderas
:param pred_labels: Vector con las clases predichas
:return: threshold que maximiza la diferencia entre tpr y fpr = tpr + tnr - 1
"""
try:
fpr, tpr, thresholds = roc_curve(true_labels, pred_labels)
return thresholds[argmax(tpr - fpr)]
except Exception:
return None
def apply_bootstrap(data: pd.DataFrame, true_col: str, pred_col: str, metric: callable, iters: int = 1000,
ci: float = 0.95, prop: float = 0.75, **kwargs) -> tuple:
"""
Función utilizada para aplicar un bootstrap y obtener una métrica de actucación de un modelo.
:param data: pandas dataframe con los datos verdaderos y predichos de cada instancia
:param true_col: nombre de la columna del dataframe con los datos verdaderos
:param pred_col: nombre de la columna del dataframe con los datos predidchos
:param metric: callback sobre el cual aplicar la métrica
:param iters: número de iteraciones para realizar el algoritmo de bootstrap
:param ci: interalo de confianza para obtener la metrica
:param prop: proporción del set de datos a tener en cuenta para aplicar el bootstrap
:param kwargs: parámetros del callable metric
:return: media del intervalo con sus respectivos limites (mínimo y máximo).
"""
assert true_col in data.columns, f'{true_col} not in dataframe'
assert pred_col in data.columns, f'{pred_col} not in dataframe'
results = []
for i in range(iters):
sample = resample(data, n_samples=int(len(data) * prop))
results.append(metric(sample[true_col].values.tolist(), sample[pred_col].values.tolist(), **kwargs))
try:
lower = max(0.0, np.percentile(results, ((1.0 - ci) / 2.0) * 100))
upper = min(1.0, np.percentile(results, (ci + ((1.0 - ci) / 2.0)) * 100))
return np.mean(results), lower, upper
except TypeError:
return None, None, None
| 50.942731
| 120
| 0.695866
|
728be79cae9f01d3de6e37197947c599ff13cbef
| 9,357
|
py
|
Python
|
accounts/views.py
|
SiddharthKumar02/Acko_1.0
|
9d84b3aefc6416b58ccfbdbe061f46a91e99ec57
|
[
"MIT"
] | 1
|
2022-02-04T00:19:05.000Z
|
2022-02-04T00:19:05.000Z
|
accounts/views.py
|
SiddharthKumar02/Acko_1.0
|
9d84b3aefc6416b58ccfbdbe061f46a91e99ec57
|
[
"MIT"
] | 3
|
2018-12-17T09:42:18.000Z
|
2021-06-10T21:04:20.000Z
|
accounts/views.py
|
SiddharthKumar02/Acko_1.0
|
9d84b3aefc6416b58ccfbdbe061f46a91e99ec57
|
[
"MIT"
] | 4
|
2018-12-15T06:42:43.000Z
|
2019-10-24T13:00:20.000Z
|
import json
import ast
from django.contrib.auth import authenticate, login
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import status
from rest_framework.decorators import permission_classes, api_view, authentication_classes
from rest_framework.response import Response
from knox.views import LoginView, LogoutView
from rest_framework.permissions import AllowAny
from . import serializers
from django.contrib.auth.models import User
from hacko.settings import STATIC_ROOT
import csv
import matplotlib.pyplot as plt
from . import models
from rest_framework.generics import CreateAPIView
# Create your views here.
# User LoginView
class UserLoginView(LoginView):
http_method_names = ['post']
authentication_classes = []
permission_classes = [AllowAny]
def post(self, request, format=None):
s = serializers.LoginSerializer(data=self.request.data)
s.is_valid(raise_exception=True)
username_or_email = s.validated_data.get('user', None)
password = s.validated_data.get('password', None)
try:
validate_email(username_or_email)
username = User.objects.filter(email=username_or_email)
if username.exists():
user = authenticate(username=username[0].username, password=password)
except ValidationError:
user = authenticate(username=username_or_email, password=password)
if user is None:
return Response({'message': 'No user found as per given credentials', 'error': 1},
status=status.HTTP_400_BAD_REQUEST)
if user.is_active is False:
return Response({'message': 'Please wait till the admin confirms your account', 'error': 1},
status=status.HTTP_202_ACCEPTED)
login(request, user)
context = super(UserLoginView, self).post(request, format=None)
context.data['error'] = 0
return context
# Logout View
class UserLogoutView(LogoutView):
http_method_names = ['post']
def post(self, request, format=None):
super(UserLogoutView, self).post(request, format=None)
return Response({'message': 'successfully logged out!', 'error': 0})
def ProductComplaintView(request):
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
products=[]
for row in reader:
products.append(row[1])
plot1={}
for product in products:
if(product in plot1):
plot1[product]+=1
else:
plot1[product]=1
json1 = json.dumps(plot1)
json2 = json.loads(json1)
return JsonResponse({'message': json2})
@csrf_exempt
def SubProductComplaint(request, product):
product = product.replace("_", " ")
print(product)
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
sub_products = {}
for row in reader:
if row[1] == product:
if row[2] in sub_products:
sub_products[row[2]] += 1
else:
sub_products[row[2]] = 1
if list(sub_products.keys())[0] == '':
return JsonResponse({'message': 'No sub products in it'})
json_values = json.loads(json.dumps(sub_products))
return JsonResponse(json_values)
@csrf_exempt
@api_view(['post'])
@authentication_classes([])
@permission_classes([AllowAny])
def firstbitthird(request):
sub_product = request.POST['sub_product']
companies = request.POST['companies']
companies = ast.literal_eval(companies)
print(companies)
returns = {}
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
for row in reader:
if ((row[2] == sub_product) and (row[7] in companies)):
print(row[7])
if row[7] in returns.keys():
returns[row[7]] += 1
else:
returns[row[7]] = 1
return JsonResponse(json.loads(json.dumps(returns)))
def CompanyDispute(request):
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
company, dispute, final_dis, disputes =[], [], {}, {}
for row in reader:
company.append(row[7])
dispute.append(row[16])
for i in range(len(company)):
disputes[company[i]]=0
for i in range(len(company)):
if dispute[i] == 'Yes':
disputes[company[i]] += 1
sorted_disputes=sorted(disputes.values())
max_disputes, min_disputes = sorted_disputes[-10:], sorted_disputes[30:50]
for i in disputes:
if disputes[i] in max_disputes and len(final_dis)<20 and disputes[i]!=0:
final_dis[i]=disputes[i]
if disputes[i] in min_disputes and len(final_dis)<20 and disputes[i]!=0:
final_dis[i]=disputes[i]
return JsonResponse(json.loads(json.dumps(final_dis)))
@csrf_exempt
@permission_classes([AllowAny])
def Demographic(request):
company = request.POST['company']
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
product, dispute, sum1 = [], [], {}
for row in reader:
if(row[7]==company):
product.append(row[1])
dispute.append(row[16])
for i in range(len(dispute)):
if(dispute[i]=='Yes'):
dispute[i]=0
else:
dispute[i]=1
for i in range(len(product)):
if(product[i] in sum1):
sum1[product[i]]+=dispute[i]
else:
sum1[product[i]]=dispute[i]
sorted1=sorted(sum1.values())
add1, note= 0, []
for i in sum1:
if(sum1[i]<sorted1[-3]):
add1+=sum1[i]
note.append(i)
for i in note:
sum1.pop(i, None)
sum1['others'] = add1
return JsonResponse(json.loads(json.dumps(sum1)))
def company_names(request):
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
company = set()
lis = {}
for row in reader:
sample = '' + row[7] + '::'
company.add(sample)
return HttpResponse(company)
@csrf_exempt
@api_view(['post'])
@authentication_classes([])
@permission_classes([AllowAny])
def performance(request):
filename = STATIC_ROOT + '/accounts/csv/consumer_complaints.csv'
companies = request.POST['company']
with open(filename, "r") as csv_file:
reader = csv.reader(csv_file)
date, company = [], []
for row in reader:
if row[16] == 'No':
date.append(row[0])
company.append(row[7])
# correct years format from date
dates=[]
for dates1 in date:
dates1=list(dates1)
dates1=dates1[6:]
dates1="".join(dates1)
dates.append(dates1)
print(companies)
company2012, company2013, company2014, company2015 = {}, {}, {}, {}
for i in range(1,len(company)):
if dates[i] == '2012' and company[i] == companies:
if company[i] in company2012:
company2012[company[i]] += 1
else:
company2012[company[i]] = 1
if dates[i] == '2013' and company[i] == companies:
if company[i] in company2013:
company2013[company[i]] += 1
else:
company2013[company[i]] = 1
if dates[i] == '2014' and company[i] == companies:
if company[i] in company2014:
company2014[company[i]] += 1
else:
company2014[company[i]] = 1
if dates[i] == '2015' and company[i] == companies:
if company[i] in company2015:
company2015[company[i]] += 1
else:
company2015[company[i]] = 1
# all_years = {company2012, company2013, company2014, company2015}
all_years = {}
all_years[companies] = [company2012[companies], company2013[companies], company2014[companies],
company2015[companies]]
print(all_years)
json_values = json.loads(json.dumps(all_years))
return JsonResponse(json_values)
#'date_received
# 'product'
# 'sub_product'
# 'issue'
# 'sub_issue'
# 'consumer_complaint_narrative'
# 'company_public_response'
# 'company'
# 'state'
# 'zipcode'
# 'tags'
# 'consumer_consent_provided'
# 'submitted_via'
# 'date_sent_to_company'
# 'company_response_to_consumer'
# 'timely_response'
# 'consumer_disputed?'
# 'complaint_id'
| 34.400735
| 104
| 0.599124
|
5b3defae893c9fb4c6e05601b53e7ef4598bb398
| 4,251
|
py
|
Python
|
kubernetes/client/models/v2alpha1_cron_job_status.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 11
|
2020-10-13T05:27:59.000Z
|
2021-09-23T02:56:32.000Z
|
kubernetes/client/models/v2alpha1_cron_job_status.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 48
|
2020-10-15T09:53:36.000Z
|
2021-07-05T15:33:24.000Z
|
kubernetes/client/models/v2alpha1_cron_job_status.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 4
|
2020-12-04T08:51:35.000Z
|
2022-03-27T09:42:20.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V2alpha1CronJobStatus(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'active': 'list[V1ObjectReference]',
'last_schedule_time': 'datetime'
}
attribute_map = {
'active': 'active',
'last_schedule_time': 'lastScheduleTime'
}
def __init__(self, active=None, last_schedule_time=None):
"""
V2alpha1CronJobStatus - a model defined in Swagger
"""
self._active = None
self._last_schedule_time = None
self.discriminator = None
if active is not None:
self.active = active
if last_schedule_time is not None:
self.last_schedule_time = last_schedule_time
@property
def active(self):
"""
Gets the active of this V2alpha1CronJobStatus.
A list of pointers to currently running jobs.
:return: The active of this V2alpha1CronJobStatus.
:rtype: list[V1ObjectReference]
"""
return self._active
@active.setter
def active(self, active):
"""
Sets the active of this V2alpha1CronJobStatus.
A list of pointers to currently running jobs.
:param active: The active of this V2alpha1CronJobStatus.
:type: list[V1ObjectReference]
"""
self._active = active
@property
def last_schedule_time(self):
"""
Gets the last_schedule_time of this V2alpha1CronJobStatus.
Information when was the last time the job was successfully scheduled.
:return: The last_schedule_time of this V2alpha1CronJobStatus.
:rtype: datetime
"""
return self._last_schedule_time
@last_schedule_time.setter
def last_schedule_time(self, last_schedule_time):
"""
Sets the last_schedule_time of this V2alpha1CronJobStatus.
Information when was the last time the job was successfully scheduled.
:param last_schedule_time: The last_schedule_time of this V2alpha1CronJobStatus.
:type: datetime
"""
self._last_schedule_time = last_schedule_time
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V2alpha1CronJobStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.425806
| 105
| 0.586685
|
c2b89b3403da66e212a4c162f1cf3ba517000d22
| 2,209
|
py
|
Python
|
Course_1/Week_04/ZhiyuanGraphMinCut.py
|
KnightZhang625/Stanford_Algorithm
|
7dacbbfa50e7b0e8380cf500df24af60cb9f42df
|
[
"Apache-2.0"
] | null | null | null |
Course_1/Week_04/ZhiyuanGraphMinCut.py
|
KnightZhang625/Stanford_Algorithm
|
7dacbbfa50e7b0e8380cf500df24af60cb9f42df
|
[
"Apache-2.0"
] | 1
|
2020-07-16T08:03:22.000Z
|
2020-07-16T08:09:34.000Z
|
Course_1/Week_04/ZhiyuanGraphMinCut.py
|
KnightZhang625/Stanford_Algorithm
|
7dacbbfa50e7b0e8380cf500df24af60cb9f42df
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import random
import sys
import copy
def findVertexGroup(vertex,index):
if vertex[index] != index:
path = [index]
index = vertex[index]
while vertex[index] != index:
path.append(index)
index = vertex[index]
for i in path:
vertex[i] = index
return index
def findMinCut(vertex,edge):
nVertex = len(vertex)
nEdge = len(edge)
while nVertex >2:
indexEdge = random.randint(0,nEdge-1)
indexVertex1 = findVertexGroup(vertex,edge[indexEdge][0])
indexVertex2 = findVertexGroup(vertex,edge[indexEdge][1])
if indexVertex1 != indexVertex2:
vertex[indexVertex2] = vertex[indexVertex1]
nVertex -= 1
del edge[indexEdge]
nEdge -= 1
i = nEdge-1
while i>=0:
indexVertex1 = findVertexGroup(vertex,edge[i][0])
indexVertex2 = findVertexGroup(vertex,edge[i][1])
if indexVertex1 == indexVertex2:
del edge[i]
nEdge -= 1
i -= 1
return vertex,nEdge
if __name__ == "__main__":
filename = "kargerMinCut.txt"
file = open(filename)
InputData = file.readlines()
graph = []
for i in InputData:
graph.append(list(map(int,i.split('\t')[:-1])))
n = len(graph)
vertex = np.array(range(n)) # all the vertex is reduced by 1
edge = []
for i in range(n):
if len(graph[i])==1:
print("vertex %s is isolated" %i)
sys.exit(0)
else:
for j in range(1,len(graph[i])):
if graph[i][0] < graph[i][j]: # avoid identical edges
edge.append([graph[i][0]-1,graph[i][j]-1])
iterations = n**2
minCut = len(graph[0])
minCutHistory = []
minCutVertex = 0
random.seed()
for i in range(iterations):
print ("processing %ith iteration" %i)
newVertex,newMinCut = findMinCut(copy.deepcopy(vertex),copy.deepcopy(edge))
minCutHistory.append(newMinCut)
if newMinCut < minCut:
minCutVertex = newVertex
minCut = newMinCut
| 31.112676
| 84
| 0.549117
|
5126ee86067923ea8b7c8dcca9d34c87b9a80578
| 513
|
py
|
Python
|
cracking_the_code/chapter1/Q1.1_is_unique.py
|
ajeet1308/code_problems
|
5d99839b6319295c6d81dd86775c46a536e7a1ca
|
[
"MIT"
] | 61
|
2020-09-26T19:57:44.000Z
|
2022-03-09T18:51:44.000Z
|
cracking_the_code/chapter1/Q1.1_is_unique.py
|
ajeet1308/code_problems
|
5d99839b6319295c6d81dd86775c46a536e7a1ca
|
[
"MIT"
] | 88
|
2020-09-19T20:00:27.000Z
|
2021-10-31T09:41:57.000Z
|
cracking_the_code/chapter1/Q1.1_is_unique.py
|
ajeet1308/code_problems
|
5d99839b6319295c6d81dd86775c46a536e7a1ca
|
[
"MIT"
] | 218
|
2020-09-20T08:18:03.000Z
|
2022-01-30T23:13:16.000Z
|
ALPHLEN = 128
def is_unique(string):
if len(string) > ALPHLEN:
return False
else:
alph = [0 for x in range(ALPHLEN)]
for c in string:
if alph[ord(c)] == 1:
return False
else:
alph[ord(c)] = 1
return True
if __name__ == '__main__':
res1 = is_unique('Larissa')
res2 = is_unique('train')
res3 = is_unique('Bolsonaro is the worst brazilian president')
res4 = is_unique('phone')
print(res4)
| 19.730769
| 66
| 0.534113
|
0b360580ba992f47635834ae7f1b34f2c736f7de
| 26,742
|
py
|
Python
|
mktoExportActivities.py
|
i001962/py-marketo-extract
|
fee1b46c251962dc9cedd487caec2a3dcb91fdb8
|
[
"MIT"
] | null | null | null |
mktoExportActivities.py
|
i001962/py-marketo-extract
|
fee1b46c251962dc9cedd487caec2a3dcb91fdb8
|
[
"MIT"
] | null | null | null |
mktoExportActivities.py
|
i001962/py-marketo-extract
|
fee1b46c251962dc9cedd487caec2a3dcb91fdb8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
mktoExportActivities.py: Extracting Leads Activities via Marketo REST API
Usage: mktoExportActivities.py <options>
Options:
-h this help
-i --instance <instance> Marketo Instance URL such as https://app-abj.marketo.com
-o --output <filename> Output filename
-d --id <client id> Marketo LaunchPoint Client Id: eg. 3d96eaef-f611-42a0-967f-00aeeee7e0ea
-s --secret <client secret> Marketo LaunchPoint Client Secret: eg. i8s6RRq1LhPlMyATEKfLWl1255bwzrF
-c --since <date> Since Date time for calling Get Paging Token: eg. 2015-01-31
-l --listid ListId to filter leads (You can find the ID from URL like #ST443A1, 443 is list id)
-g --debug Pring debugging information
-j --not-use-jst Change TimeZone for Activity Date field. Default is JST.
-f --change-data-field <fields> Specify comma separated 'UI' fields name such as 'Behavior Score' for extracting from 'Data Value Changed' activities. default fields: 'Lead Score'
-w --add-webvisit-activity Adding Web Visit activity. It might be a cause of slowdown.
-m --add-mail-activity Adding mail open/click activity. It might be a cause of slowdown.
Mail bug reports and suggestion to : Yukio Y <unknot304 AT gmail.com>
Please refer Market REST API documents: http://docs.marketo.com
Search article with "Create a Custom Service for Use with ReST API"
"""
import sys, os, errno
import argparse
import csv
import getpass
import time
import json
import httplib2
import logging
import pytz
from datetime import datetime
# Reference:
# Marketo REST API: http://developers.marketo.com/documentation/rest/
# -------
# Base class for all the rest service
#
# mkto_instance: eg. http://123-abc-456.mktorest.com
# grant_type: client_credentials
# client_id: eg. 3d96eaef-f611-42a0-967f-002fasdweeea
# client_secret: eg. i8s6RRq1LhPlMyATEKfLW2300CMbwzrF
#
class MarketoClient:
def __init__(self, mkto_instance, grant_type, client_id, client_secret, list_id):
self.identity_url = mkto_instance + '/identity'
self.endpoint_url = mkto_instance
self.access_token_url = self.identity_url + '/oauth/token?grant_type=' + grant_type + '&client_id=' + client_id + '&client_secret=' + client_secret
self.request_headers = {'Accept': 'application/json',
'Content-Type': 'application/json; charset=UTF-8'
}
self.http_client = httplib2.Http()
self.debug = False
self.list_id = list_id
# send request
response, content = self.http_client.request(self.access_token_url, 'GET', '', self.request_headers)
data = json.loads(content)
self.access_token = data ['access_token']
# print >> sys.stderr, "Access Token: " + self.access_token
# print >> sys.stderr, "Access Token Expired in", data ['expires_in']
# get lead by id
def getLeadRaw(self, id, fields):
leads_url = self.endpoint_url + '/rest/v1/lead/' + id + '.json?access_token=' + self.access_token
leads_url = leads_url + '&files=' + fields
response, content = self.http_client.request(leads_url, 'GET', '', self.request_headers)
data = json.loads(content)
# print >> sys.stderr, data
return data
# get leads by filter
def getLeadsRaw(self, filter_type, filter_values, fields):
leads_url = self.endpoint_url + '/rest/v1/leads.json?access_token=' + self.access_token
leads_url = leads_url + '&filterType=' + filter_type + '&filterValues=' + filter_values + '&fields=' + fields
response, content = self.http_client.request(leads_url, 'GET', '', self.request_headers)
data = json.loads(content)
# print >> sys.stderr, data
return data
# get Paging Token, since may be formatted as "2015-04-10"
def getPagingToken(self, since):
leads_url = self.endpoint_url + '/rest/v1/activities/pagingtoken.json?access_token=' + self.access_token
leads_url = leads_url + '&sinceDatetime=' + since
response, content = self.http_client.request(leads_url, 'GET', '', self.request_headers)
data = json.loads(content)
pageToken = data ['nextPageToken']
# print >> sys.stderr, data
return pageToken
# get lead changes
def getLeadChangesRaw(self, token, fields):
leads_url = self.endpoint_url + '/rest/v1/activities/leadchanges.json?access_token=' + self.access_token
leads_url = leads_url + '&nextPageToken=' + token + '&fields=' + fields
response, content = self.http_client.request(leads_url, 'GET', '', self.request_headers)
data = json.loads(content)
# print >> sys.stderr, data
return data
# get lead activities. activity_type_ids may take Click Link in Email(11), Web Visit(1) and Click Link on a page(3)
def getLeadActivitiesRaw(self, token, activity_type_ids):
leads_url = self.endpoint_url + '/rest/v1/activities.json?access_token=' + self.access_token
#leads_url = leads_url + '&nextPageToken=' + token + '&activityTypeIds=1&activityTypeIds=11&activityTypeIds=3'
leads_url = leads_url + '&nextPageToken=' + token + '&activityTypeIds=' + activity_type_ids
if self.list_id:
leads_url = leads_url + '&listId=' + self.list_id
response, content = self.http_client.request(leads_url, 'GET', '', self.request_headers)
data = json.loads(content)
# print >> sys.stderr, data
return data
# get activity Types
def getActivityTypesRaw(self):
leads_url = self.endpoint_url + '/rest/v1/activities/types.json?access_token=' + self.access_token
response, content = self.http_client.request(leads_url, 'GET', '', self.request_headers)
data = json.loads(content)
# print >> sys.stderr, data
return data
def updateAccessToken(self):
response, content = self.http_client.request(self.access_token_url, 'GET', '', self.request_headers)
data = json.loads(content)
self.access_token = data ['access_token']
# print >> sys.stderr, self.access_token
print >> sys.stderr, "Access Token Expired in", data ['expires_in']
# timestamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d %H:%M:%S')
def enableDebug(self):
httplib2.debuglevel = 1
self.debug = True
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extract Lead Activities via Marketo API')
parser.add_argument(
'-i', '--instance',
type = str,
dest = 'mkto_instance',
required = True,
help = 'Marketo REST API Instance URL such as https://123-XYZ-456.mktorest.com'
)
parser.add_argument(
'-d', '--id',
type = str,
dest = 'mkto_client_id',
required = True,
help = 'Marketo LaunchPoint Client Id: eg. 3d96eaef-f611-42a0-967f-00aeeee7e0ea'
)
parser.add_argument(
'-s', '--secret',
type = str,
dest = 'mkto_client_secret',
required = True,
help = 'Marketo LaunchPoint Client Secret: eg. i8s6RRq1LhPlMyATEKfLWl1255bwzrF'
)
parser.add_argument(
'-l', '--listid',
type = str,
dest = 'mkto_list_id',
required = False,
help = 'List Id: eg. 443 (please extract it from URL on Marketo)'
)
parser.add_argument(
'-o', '--output',
type = str,
dest = 'output_file',
required = False,
help = 'Output file name'
)
parser.add_argument(
'-c', '--since',
type = str,
dest = 'mkto_date',
required = True,
help = 'sinceDate time for calling Get Paging Token: eg. 2015-01-31'
)
parser.add_argument(
'-g', '--debug',
action='store_true',
dest = 'debug',
default = False,
required = False,
help = 'Pring debugging information'
)
parser.add_argument(
'-j', '--not-use-jst',
action='store_true',
dest = 'not_jst',
default = False,
required = False,
help = 'Change TimeZone for Activity Date field. Default is JST.'
)
parser.add_argument(
'-f', '--change-data-fields',
type = str,
dest = 'change_data_fields',
required = False,
help = 'Specify comma separated "UI" fields name such as "Behavior Score" for extracting from "Data Value Changed" activities. default fields: "Lead Score"'
)
parser.add_argument(
'-m', '--add-mail-activity',
action = 'store_true',
dest = 'mail_activity',
default = False,
required = False,
help = 'Adding mail open/click activity. It might be a cause of slowdown.'
)
parser.add_argument(
'-w', '--add-webvisit-activity',
action = 'store_true',
dest = 'web_activity',
default = False,
required = False,
help = 'Adding Web Visit activity. It might be a cause of slowdown.'
)
args = parser.parse_args()
# initiate file handler, selecting file output or stdout according to command arguments
if args.output_file:
fh = open(args.output_file, 'w')
else:
fh = sys.stdout
mywriter = csv.writer(fh, delimiter = ',')
# prepairing activityTypeName
# Currently, this script supports the following activityType for extracting activity.
activityTypeNameDict = {1:'Visit Webpage', 3:'Click Link', 10:'Open Email', 11:'Click Email', 12:'New Lead', 13:'Change Data Value'}
default_activity_id = "12,13"
# preparing csv headers according to command arguments. if user set -w option, we add "page" and "link"
default_header = ["Activity Id", "Activity Date", "Activity Type Id", "Activity Type Name", "Lead Id"]
tracking_fields = ["Lead Score"]
default_header.extend(tracking_fields)
if args.change_data_fields:
change_data_fields = args.change_data_fields.split(",")
for field in change_data_fields:
tracking_fields.append(field)
default_header.append(field)
if args.mail_activity:
default_header.extend(["Mail","Link in Mail"])
default_activity_id = default_activity_id + ",10,11"
if args.web_activity:
default_header.extend(["Web Page","Link on Page","Query Parameters"])
default_activity_id = default_activity_id + ",1,3"
# write header to fh
mywriter.writerow(default_header)
# initiate dictionalies for storing latest leadStatus, lifecycleStatus and specified fields through command argument for each leads
last_custom_fields = {}
for field in tracking_fields:
last_custom_fields[field] = {}
#
# initiate Marketo ReST API
mktoClient = MarketoClient(args.mkto_instance, 'client_credentials', args.mkto_client_id, args.mkto_client_secret, args.mkto_list_id)
# enable debug information
if args.debug:
mktoClient.enableDebug()
# get value change activities
token = mktoClient.getPagingToken(args.mkto_date)
moreResult=True
while moreResult:
raw_data = mktoClient.getLeadActivitiesRaw(token, default_activity_id)
if args.debug:
print >> sys.stderr, "Activity: " + json.dumps(raw_data, indent=4)
success = raw_data ['success']
if success == False:
errors = raw_data ['errors']
error_code = errors [0] ['code']
error_message = errors[0] ['message']
if error_code == "602":
if args.debug:
print >> sys.stderr, "Access Token has been expired. Now updating..."
mktoClient.updateAccessToken()
continue
elif error_code == "606":
if args.debug:
print >> sys.stderr, "Max rate limit '100' exceeded with in '20' secs..."
time.sleep(2.0)
continue
else:
print >> sys.stderr, "Error:"
print >> sys.stderr, "REST API Error Code: ", error_code
print >> sys.stderr, "Message: ", error_message
if fh is not sys.stdout:
fh.close()
sys.exit(1)
token = raw_data ['nextPageToken']
moreResult = raw_data ['moreResult']
if args.debug:
print >> sys.stderr, "Activity: " + json.dumps(raw_data, indent=4)
#check if there is result field
if raw_data.has_key('result') == False and moreResult != True:
print >> sys.stderr, "Error:"
print >> sys.stderr, "There is no specific activities."
if fh is not sys.stdout:
fh.close()
sys.exit(1)
#check empty results
if raw_data.has_key('result') == False:
continue
raw_data_result = raw_data ['result']
for result in raw_data_result:
csv_row = []
# id
csv_row.append(result ['id'])
# activityDate
# convert datetime (CST) to JST
activityDate = unicode(result ['activityDate']).encode('utf-8')
activityDate = activityDate.replace("T", " ")
activityDate = activityDate.replace("Z", "")
if args.not_jst == False: # use JST
jstActivityDate = datetime.strptime(activityDate, '%Y-%m-%d %H:%M:%S')
jstActivityDate = pytz.utc.localize(jstActivityDate)
jstActivityDate = jstActivityDate.astimezone(pytz.timezone('Asia/Tokyo'))
activityDate = jstActivityDate.strftime('%Y-%m-%d %H:%M:%S')
csv_row.append(activityDate)
# activityTypeId
activityTypeId = result ['activityTypeId']
csv_row.append(activityTypeId)
# activityTypeName
csv_row.append(activityTypeNameDict[activityTypeId])
# leadId
leadId = result ['leadId']
csv_row.append(leadId)
# 12:Created
# leadScore, lifecycleStatus and other custom fields is empty, because of lead is just created.
#
# JSON results example:
#
# {
# "id": 303290,
# "leadId": 101093,
# "activityDate": "2015-04-09T05:34:40Z",
# "activityTypeId": 12,
# "primaryAttributeValueId": 101093,
# "attributes": [
# {
# "name": "Created Date",
# "value": "2015-04-09"
# },
# {
# "name": "Form Name",
# "value": "YY_Program.YY_Form"
# },
# {
# "name": "Source Type",
# "value": "Web form fillout"
# }
# ]
# }
if activityTypeId == 12:
for field in tracking_fields:
csv_row.append("")
# is this correct... Lead Score should be integer but it will be initialized as ""
last_custom_fields [field][leadId] = ""
# adding empty field value for mail related column
if args.mail_activity:
csv_row.append("")
csv_row.append("")
# adding empty field value for web related column
if args.web_activity:
csv_row.append("")
csv_row.append("")
# write row into csv
mywriter.writerow(csv_row)
continue
#
# 13: Change Data Value
# Lead Score and other standard/custom fields are updated!
#
# JSON results example:
# {
# "id": 303306,
# "leadId": 101093,
# "activityDate": "2015-04-09T09:51:00Z",
# "activityTypeId": 13,
# "primaryAttributeValueId": 641,
# "primaryAttributeValue": "YY_Field_1",
# "attributes": [
# {
# "name": "New Value",
# "value": "marketo"
# },
# {
# "name": "Old Value",
# "value": "coverity"
# },
# {
# "name": "Reason",
# "value": "Form fill-out, URL: http://yy.marketo.com/lp/yy.html"
# },
# {
# "name": "Source",
# "value": "Web form fillout"
# }
# ]
# }
if activityTypeId == 13:
activity_field = unicode(result ['primaryAttributeValue']).encode('utf-8')
if activity_field in tracking_fields:
for field in tracking_fields:
if field == activity_field:
attributes = result ['attributes']
for attribute in attributes:
if attribute ['name'] == "New Value":
value = unicode(attribute ['value']).encode('utf-8')
csv_row.append(value)
# store current value
last_custom_fields [field][leadId] = value
break
else:
# if it is not matched, adding latest value or empty
csv_row.append(last_custom_fields [field].get(leadId))
else:
# this activity is not related to tracking_fields, so we skip this activity without writerow
continue
# adding empty field value for mail related column
if args.mail_activity:
csv_row.append("")
csv_row.append("")
# adding empty field value for web related column
if args.web_activity:
csv_row.append("")
csv_row.append("")
# write row into csv
mywriter.writerow(csv_row)
continue
#
# 10: Open Mail
# JSON results example:
# {
# "id": 303306,
# "leadId": 101093,
# "activityDate": "2015-04-09T09:51:00Z",
# "activityTypeId": 10,
# "primaryAttributeValueId": 5,
# "primaryAttributeValue": "RestAPITester.01_Mail",
# "attributes": [
# {
# "name": "Device",
# "value": "unknown"
# },
# {
# "name": "Is Mobile Device",
# "value": false
# },
# {
# "name": "Platform",
# "value": "unknown"
# },
# {
# "name": "User Agent",
# "value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/600.3.18 (KHTML, like Gecko)"
# }
# ]
# }
if activityTypeId == 10:
for field in tracking_fields:
csv_row.append(last_custom_fields [field].get(leadId))
# Mail
mail = unicode(result ['primaryAttributeValue']).encode('utf-8')
csv_row.append(mail)
# Click in Mail
csv_row.append("")
# adding empty field value for web related column
if args.web_activity:
csv_row.append("")
csv_row.append("")
# write row into csv
mywriter.writerow(csv_row)
continue
#
# 11: Click in Mail
# JSON results example:
# {
# "id": 303306,
# "leadId": 101093,
# "activityDate": "2015-04-09T09:51:00Z",
# "activityTypeId": 10,
# "primaryAttributeValueId": 5,
# "primaryAttributeValue": "RestAPITester.01_Mail",
# "attributes": [
# {
# "name": "Device",
# "value": "unknown"
# },
# {
# "name": "Is Mobile Device",
# "value": false
# },
# {
# "name": "Link",
# "value": "http://unknot304.blogspot.jp/2015/02/munchkin-tag.html"
# },
# {
# "name": "Platform",
# "value": "unknown"
# },
# {
# "name": "User Agent",
# "value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/600.3.18 (KHTML, like Gecko)"
# }
# ]
# }
if activityTypeId == 11:
for field in tracking_fields:
csv_row.append(last_custom_fields [field].get(leadId))
# Mail
mail = unicode(result ['primaryAttributeValue']).encode('utf-8')
csv_row.append(mail)
attributes = result ['attributes']
# Click in Mail
for attribute in attributes:
if attribute ['name'] == "Link":
value = unicode(attribute ['value']).encode('utf-8')
csv_row.append(value)
break
# adding empty field value for web related column
if args.web_activity:
csv_row.append("")
csv_row.append("")
# write row into csv
mywriter.writerow(csv_row)
continue
#
# 1: Web Visit
# JSON results example:
# {
# "id": 303306,
# "leadId": 101093,
# "activityDate": "2015-04-09T09:51:00Z",
# "activityTypeId": 10,
# "primaryAttributeValueId": 14,
# "primaryAttributeValue": "unknot304.jp/",
# "attributes": [
# {
# "name": "Client IP Address",
# "value": "202.212.192.233"
# },
# {
# "name": "Query Parameters",
# "value": ""
# },
# {
# "name": "Referrer URL",
# "value": ""
# },
# {
# "name": "User Agent",
# "value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/600.3.18 (KHTML, like Gecko)"
# }
# ]
# }
if activityTypeId == 1:
for field in tracking_fields:
csv_row.append(last_custom_fields [field].get(leadId))
# adding empty field value for mail related column
if args.mail_activity:
csv_row.append("")
csv_row.append("")
if args.web_activity:
# Web
web = unicode(result ['primaryAttributeValue']).encode('utf-8')
csv_row.append(web)
# Link on Web
csv_row.append("")
# Query Parameter
web_attributes = result ['attributes']
for web_attribute in web_attributes:
if web_attribute ['name'] == "Query Parameters":
qparam = unicode(web_attribute ['value']).encode('utf-8')
csv_row.append(qparam)
break
# write row into csv
mywriter.writerow(csv_row)
continue
#
# 3: Click on Web
# JSON results example:
# {
# "id": 303306,
# "leadId": 101093,
# "activityDate": "2015-04-09T09:51:00Z",
# "activityTypeId": 10,
# "primaryAttributeValueId": 10,
# "primaryAttributeValue": "na-ab09.marketo.com/lp/user/01_PDF__DL.html",
# "attributes": [
# {
# "name": "Client IP Address",
# "value": "202.212.192.233"
# },
# {
# "name": "Query Parameters",
# "value": ""
# },
# {
# "name": "Referrer URL",
# "value": ""
# },
# {
# "name": "User Agent",
# "value": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_2) AppleWebKit/600.3.18 (KHTML, like Gecko)"
# }
# {
# "name": "Webpage ID",
# "value": 7
# }
# ]
# }
if activityTypeId == 3:
for field in tracking_fields:
csv_row.append(last_custom_fields [field].get(leadId))
# adding empty field value for mail related column
if args.mail_activity:
csv_row.append("")
csv_row.append("")
if args.web_activity:
# Web
csv_row.append("")
# Link on Web
link = unicode(result ['primaryAttributeValue']).encode('utf-8')
csv_row.append(link)
# Query Parameter
web_attributes = result ['attributes']
for web_attribute in web_attributes:
if web_attribute ['name'] == "Query Parameters":
qparam = unicode(web_attribute ['value']).encode('utf-8')
csv_row.append(qparam)
break
# write row into csv
mywriter.writerow(csv_row)
continue
if fh is not sys.stdout:
fh.close()
# testing methods
# mktoClient.updateAccessToken()
# mktoClient.getLeadRaw("101099", "email")
# mktoClient.getLeadsRaw("id", "101095", "id")
# raw_data = mktoClient.getActivityTypesRaw()
# print >> sys.stderr, "Activity Types: " + json.dumps(raw_data, indent=4)
| 38.257511
| 183
| 0.52139
|
2b7d8d450b10ff989b1669e087ab800e841b0105
| 670
|
py
|
Python
|
migrations/versions/045fce8e2e82_.py
|
vincentbello/ember-flask-chat
|
2c07878fd75e7f8e26d4cdab9d139f32c755d191
|
[
"BSD-3-Clause"
] | 1
|
2018-11-03T14:48:34.000Z
|
2018-11-03T14:48:34.000Z
|
migrations/versions/045fce8e2e82_.py
|
vincentbello/ember-flask-chat
|
2c07878fd75e7f8e26d4cdab9d139f32c755d191
|
[
"BSD-3-Clause"
] | null | null | null |
migrations/versions/045fce8e2e82_.py
|
vincentbello/ember-flask-chat
|
2c07878fd75e7f8e26d4cdab9d139f32c755d191
|
[
"BSD-3-Clause"
] | null | null | null |
"""empty message
Revision ID: 045fce8e2e82
Revises: eb065a6b31be
Create Date: 2017-09-04 16:58:05.896358
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '045fce8e2e82'
down_revision = 'eb065a6b31be'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('authors', sa.Column('image_url', sa.String(length=256), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('authors', 'image_url')
# ### end Alembic commands ###
| 23.103448
| 91
| 0.697015
|
7257647dcc688b803fee836647d926cdf9046af2
| 6,765
|
py
|
Python
|
experiments/hyperparam_search/meta_svgd_hyperparam.py
|
shlu2019/meta_learning_pacoh
|
376349e66bdd782e3d06b4bac2ecb56a2a10bcf6
|
[
"MIT"
] | 23
|
2020-02-13T12:45:42.000Z
|
2022-03-07T20:37:14.000Z
|
experiments/hyperparam_search/meta_svgd_hyperparam.py
|
JeremyAlain/meta_learning_pacoh
|
b4c2c37d9715e74542bab556ac1f5d778cc3409c
|
[
"MIT"
] | 3
|
2020-09-01T15:24:04.000Z
|
2021-06-03T10:39:16.000Z
|
experiments/hyperparam_search/meta_svgd_hyperparam.py
|
JeremyAlain/meta_learning_pacoh
|
b4c2c37d9715e74542bab556ac1f5d778cc3409c
|
[
"MIT"
] | 9
|
2020-04-15T09:43:22.000Z
|
2021-07-18T13:37:38.000Z
|
import ray
import copy
import torch
import numpy as np
import pandas as pd
import os
import sys
import math
from ray import tune
from ray.tune.suggest.hyperopt import HyperOptSearch
from ray.tune import Analysis
from hyperopt import hp
from datetime import datetime
import argparse
import gpytorch
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
DATA_DIR = os.path.join(BASE_DIR, 'data')
HPARAM_EXP_DIR = os.path.join(DATA_DIR, 'tune-hparam-ntasks')
SEED = 28
N_THREADS_PER_RUN = 1
TEST_SEEDS = [28, 29, 30, 31, 32]
# configuration for prior learning
def main(args):
ray.init(num_cpus=args.num_cpus, memory=1800 * 1024**2, object_store_memory=300 * 1024**2)
def train_reg(config, reporter):
sys.path.append(BASE_DIR)
# 1) load / generate data
from experiments.data_sim import provide_data
data_train, data_valid, _ = provide_data(dataset=args.dataset, seed=SEED)
# 2) setup model
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
torch.set_num_threads(N_THREADS_PER_RUN)
model = GPRegressionMetaLearnedSVGD(data_train, **config)
# 3) train and evaluate model
with gpytorch.settings.max_cg_iterations(300):
eval_period = 3000
train_iter = 0
for i in range(config["num_iter_fit"] // eval_period):
loss = model.meta_fit(verbose=False, log_period=2000, n_iter=eval_period)
train_iter += eval_period
ll, rmse, calib_err = model.eval_datasets(data_valid)
reporter(timesteps_total=train_iter, loss=loss,
test_rmse=rmse, test_ll=ll, calib_err=calib_err)
@ray.remote
def train_test(config):
results_dict = config
try:
sys.path.append(BASE_DIR)
# 1) load / generate data
from experiments.data_sim import provide_data
data_train, _, data_test = provide_data(dataset=args.dataset, seed=SEED)
# 2) Fit model
from meta_learn.GPR_meta_svgd import GPRegressionMetaLearnedSVGD
torch.set_num_threads(N_THREADS_PER_RUN)
with gpytorch.settings.max_cg_iterations(300):
model = GPRegressionMetaLearnedSVGD(data_train, **config)
model.meta_fit(data_test, log_period=5000)
# 3) evaluate on test set
ll, rmse, calib_err = model.eval_datasets(data_test)
results_dict.update(ll=ll, rmse=rmse, calib_err=calib_err)
except Exception as e:
print(e)
results_dict.update(ll=np.nan, rmse=np.nan, calib_err=np.nan)
return results_dict
assert args.metric in ['test_ll', 'test_rmse']
exp_name = 'tune_meta_svgd_%s_kernel_%s'%(args.covar_module, args.dataset)
if args.load_analysis:
analysis_dir = os.path.join(HPARAM_EXP_DIR, exp_name)
assert os.path.isdir(analysis_dir), 'load_analysis_from must be a valid directory'
print('Loading existing tune analysis results from %s' % analysis_dir)
analysis = Analysis(analysis_dir)
else:
space = {
"task_kl_weight": hp.loguniform("task_kl_weight", math.log(8e-2), math.log(1.0)),
"prior_factor": hp.loguniform("prior_factor", math.log(1e-6), math.log(2e-1)),
"lr": hp.loguniform("lr", math.log(5e-4), math.log(5e-3)),
"lr_decay": hp.loguniform("lr_decay", math.log(0.8), math.log(1.0)),
"bandwidth": hp.loguniform("bandwidth", math.log(1e-3), math.log(5e2)),
"num_particles": hp.choice("num_particles", [10, 50]),
"task_batch_size": hp.choice("task_batch_size", [4, 10]),
}
config = {
"num_samples": 200,
"config": {
"num_iter_fit": 30000,
'kernel_nn_layers': [32, 32, 32, 32],
'mean_nn_layers': [32, 32, 32, 32],
'random_seed': SEED,
'mean_module': 'NN',
'covar_module': args.covar_module,
'normalize_data': True,
},
"stop": {
"timesteps_total": 30000
},
}
# Run hyper-parameter search
algo = HyperOptSearch(
space,
max_concurrent=args.num_cpus,
metric=args.metric,
mode="max" if args.metric == 'test_ll' else "min")
analysis = tune.run(train_reg, name=exp_name, search_alg=algo, verbose=1, raise_on_failed_trial=False,
local_dir=HPARAM_EXP_DIR, **config)
# Select N best configurations re-run train & test with 5 different seeds
from experiments.hyperparam_search.util import select_best_configs
if args.metric == 'test_ll':
best_configs = select_best_configs(analysis, metric='test_ll', mode='max', N=args.n_test_runs)
elif args.metric == 'test_rmse':
best_configs = select_best_configs(analysis, metric='test_rmse', mode='min', N=args.n_test_runs)
else:
raise AssertionError('metric must be test_ll or test_rmse')
test_configs = []
for config in best_configs:
for seed in TEST_SEEDS:
test_config = copy.deepcopy(config)
test_config.update({'random_seed': seed})
test_configs.append(test_config)
result_dicts = ray.get([train_test.remote(config) for config in test_configs])
result_df = pd.DataFrame(result_dicts)
print(result_df.to_string())
csv_file_name = os.path.join(HPARAM_EXP_DIR, '%s_%s.csv' % (exp_name, datetime.now().strftime("%b_%d_%Y_%H:%M:%S")))
result_df.to_csv(csv_file_name)
print("\nSaved result csv to %s"%csv_file_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run meta mll hyper-parameter search.')
parser.add_argument('--covar_module', type=str, default='NN', help='type of kernel function')
parser.add_argument('--dataset', type=str, default='sin', help='dataset')
parser.add_argument('--num_cpus', type=int, default=64, help='dataset')
parser.add_argument('--load_analysis', type=bool, default=False, help='whether to load the analysis from existing results')
parser.add_argument('--metric', type=str, default='test_ll', help='test metric to optimize')
parser.add_argument('--n_test_runs', type=int, default=5, help='number of test runs')
args = parser.parse_args()
print('Running', os.path.abspath(__file__), '\n')
print("--- Experiment Settings ---")
print("Covar Module:", args.covar_module)
print("Dataset:", args.dataset)
print("num cpus:", args.num_cpus)
print('\n')
main(args)
| 37.375691
| 127
| 0.63799
|
73fb1718cd0e74a910775d02f49e5db0d6f06c9e
| 3,815
|
py
|
Python
|
git_stuff.py
|
tapQA/tap_u_pirates
|
f1b57b5720e915b903df90fc8725a9f7757cb530
|
[
"MIT"
] | null | null | null |
git_stuff.py
|
tapQA/tap_u_pirates
|
f1b57b5720e915b903df90fc8725a9f7757cb530
|
[
"MIT"
] | null | null | null |
git_stuff.py
|
tapQA/tap_u_pirates
|
f1b57b5720e915b903df90fc8725a9f7757cb530
|
[
"MIT"
] | 1
|
2019-11-04T20:22:28.000Z
|
2019-11-04T20:22:28.000Z
|
#!/usr/bin/python2.7
import requests
import json
import StringIO
import subprocess
import os
import time
from datetime import datetime
from PIL import Image
# Motion detection settings:
# Threshold (how much a pixel has to change by to be marked as "changed")
# Sensitivity (how many changed pixels before capturing an image)
# ForceCapture (whether to force an image to be captured every forceCaptureTime seconds)
threshold = 30
sensitivity = 500
forceCapture = False
forceCaptureTime = 5 # Once 5 seconds
#forceCaptureTime = 60 * 60 # Once an hour
# File settings
saveWidth = 1280
saveHeight = 960
diskSpaceToReserve = 18897856102.4 # Keep 9.25 mb free on disk.
# Capture a small test image (for motion detection)
def captureTestImage():
command = "raspistill -w %s -h %s -t 1000 -p '100,100,256,256' -e bmp -o -" % (100, 75)
imageData = StringIO.StringIO()
imageData.write(subprocess.check_output(command, shell=True))
imageData.seek(0)
im = Image.open(imageData)
buffer = im.load()
imageData.close()
return im, buffer
# Save a full size image to disk
def saveImage(width, height, diskSpaceToReserve):
keepDiskSpaceFree(diskSpaceToReserve)
time = datetime.now()
filename = "image1.jpg"
#filename = "capture-%04d%02d%02d-%02d%02d%02d.jpg" % (time.year, time.month, time.day, time.hour, time.minute, time.second)
subprocess.call("raspistill -w 1296 -h 972 -t 1000 -e jpg -q 15 -p '300,300,256,256' -o %s" % filename, shell=True)
print "Captured %s" % filename
# Keep free space above given level
def keepDiskSpaceFree(bytesToReserve):
if (getFreeSpace() < bytesToReserve):
for filename in sorted(os.listdir(".")):
if filename.startswith("capture") and filename.endswith(".jpg"):
os.remove(filename)
print "Deleted %s to avoid filling disk" % filename
if (getFreeSpace() > bytesToReserve):
return
# Get available disk space
def getFreeSpace():
st = os.statvfs(".")
du = st.f_bavail * st.f_frsize
print du
return du
# Get first imageF
image1, buffer1 = captureTestImage()
# Reset last capture time
lastCapture = time.time()
# Presence check
getpresence = 'https://slack.com/api/users.getPresence'
user = {'token' : 'token', 'user' : 'user', 'pretty':1}
# Attempt at error handling
while (True):
try:
p = requests.get(getpresence, params=user)
print p.json()
except requests.exceptions.ConnectionError:
pass
if p.json().get('presence') == "away":
# Get comparison image
t0=time.time()
image2, buffer2 = captureTestImage()
# Count changed pixels
changedPixels = 0
for x in xrange(0, 100):
for y in xrange(0, 75):
# Just check green channel as it's the highest quality channel
pixdiff = abs(buffer1[x,y][1] - buffer2[x,y][1])
if pixdiff > threshold:
changedPixels += 1
t1=time.time()
# Check force capture
if forceCapture:
if time.time() - lastCapture > forceCaptureTime:
changedPixels = sensitivity + 1
# Save an image if pixels changed
if changedPixels > sensitivity:
lastCapture = time.time()
saveImage(saveWidth, saveHeight, diskSpaceToReserve)
# Swap comparison buffers
image1 = image2
buffer1 = buffer2
print changedPixels, (t1-t0)
time.sleep(30)
# File upload API call
if changedPixels > sensitivity:
upload = 'https://slack.com/api/files.upload'
image = {'file': open('image1.jpg', 'rb')}
id = {'token':'token', 'channels': '#tap_u_pirates', 'pretty': 1}
r = requests.post(upload, params=id, files=image,)
print r.json()
# How often presence is checked
else:
time.sleep(5)
| 29.346154
| 129
| 0.656881
|
7aab0d7247a486a3d52744f3439889388d3f7cf7
| 1,634
|
py
|
Python
|
Interact with the API/face_detect.py
|
KevoLoyal/youngrockets
|
fe04938fe5058ab9083c36f2c3e61536151cbc1b
|
[
"MIT"
] | null | null | null |
Interact with the API/face_detect.py
|
KevoLoyal/youngrockets
|
fe04938fe5058ab9083c36f2c3e61536151cbc1b
|
[
"MIT"
] | null | null | null |
Interact with the API/face_detect.py
|
KevoLoyal/youngrockets
|
fe04938fe5058ab9083c36f2c3e61536151cbc1b
|
[
"MIT"
] | null | null | null |
import http.client, urllib.request, urllib.parse, urllib.error, base64, requests, json
# Subscription Key to identify my Service in Azure
subscription_key = 'XXXXXXXXXXXXXXXXXXXXX'
persongroup = 'microsoft'
# Azure Data Center End Point
detect_api_url = 'https://westus.api.cognitive.microsoft.com/face/v1.0/detect'
# Azure Data Center End Point
identify_api_url = 'https://westus.api.cognitive.microsoft.com/face/v1.0/identify'
# Request headers.
headers = { 'Ocp-Apim-Subscription-Key': subscription_key }
image_url = input("What is the URL of the image to detect? ")
print("***********************************************")
print("")
####################################
params_detect = {
'returnFaceId': 'true',
'returnFaceLandmarks': 'false',
'returnFaceAttributes': 'age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise',
'recognitionModel': 'recognition_02',
}
####################################
response = requests.post(detect_api_url, params=params_detect,
headers=headers, json={"url": image_url})
#print(json.dumps(response.json()))
parsed = json.loads(response.text)
####################################
# To Extract the Face ID index from a Matrix that is into a list
parsed_matrix1 = parsed[0]
print(parsed_matrix1)
facedetectid = parsed_matrix1['faceId']
print("************************************")
print(" ")
print('Your Face ID assigned to the image detected was: ')
print(" ")
print('>>>>>>>>>>>> ' + facedetectid + ' <<<<<<<<<<<<' )
| 31.423077
| 138
| 0.601591
|
d0464c4716ecf3fe4e6999ced45f418b0bb1bd6a
| 34,059
|
py
|
Python
|
libs/utils/analysis/latency_analysis.py
|
MIPS/external-lisa
|
48024e3bdcb39528f69bb897a3aff57347535c7d
|
[
"Apache-2.0"
] | 2
|
2017-04-20T15:35:19.000Z
|
2020-04-29T05:40:02.000Z
|
libs/utils/analysis/latency_analysis.py
|
RenderBroken/lisa_toolkit
|
dde4826139d143a7e712abbbde2a4fa0aacb447e
|
[
"Apache-2.0"
] | null | null | null |
libs/utils/analysis/latency_analysis.py
|
RenderBroken/lisa_toolkit
|
dde4826139d143a7e712abbbde2a4fa0aacb447e
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2015, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Latency Analysis Module """
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab as pl
import re
from collections import namedtuple
from analysis_module import AnalysisModule
from devlib.utils.misc import memoized
from trappy.utils import listify
# Tuple representing all IDs data of a Task
TaskData = namedtuple('TaskData', ['pid', 'names', 'label'])
CDF = namedtuple('CDF', ['df', 'threshold', 'above', 'below'])
class LatencyAnalysis(AnalysisModule):
"""
Support for plotting Latency Analysis data
:param trace: input Trace object
:type trace: :mod:`libs.utils.Trace`
"""
def __init__(self, trace):
super(LatencyAnalysis, self).__init__(trace)
###############################################################################
# DataFrame Getter Methods
###############################################################################
@memoized
def _dfg_latency_df(self, task):
"""
DataFrame of task's wakeup/suspend events
The returned DataFrame index is the time, in seconds, an event related
to `task` happened.
The DataFrame has these columns:
- target_cpu: the CPU where the task has been scheduled
reported only for wakeup events
- curr_state: the current task state:
A letter which corresponds to the standard events reported by the
prev_state field of a sched_switch event.
Only exception is 'A', which is used to represent active tasks,
i.e. tasks RUNNING on a CPU
- next_state: the next status for the task
- t_start: the time when the current status started, it matches Time
- t_delta: the interval of time after witch the task will switch to the
next_state
:param task: the task to report wakeup latencies for
:type task: int or str
"""
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Events [sched_wakeup] not found, '
'cannot compute CPU active signal!')
return None
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Events [sched_switch] not found, '
'cannot compute CPU active signal!')
return None
# Get task data
td = self._getTaskData(task)
if not td:
return None
wk_df = self._dfg_trace_event('sched_wakeup')
sw_df = self._dfg_trace_event('sched_switch')
# Filter Task's WAKEUP events
task_wakeup = wk_df[wk_df.pid == td.pid][['target_cpu', 'pid']]
# Filter Task's START events
task_events = (sw_df.prev_pid == td.pid) | (sw_df.next_pid == td.pid)
task_switches_df = sw_df[task_events]\
[['__cpu', 'prev_pid', 'next_pid', 'prev_state']]
# Unset prev_state for switch_in events, i.e.
# we don't care about the status of a task we are replacing
task_switches_df.prev_state = task_switches_df.apply(
lambda r : np.nan if r['prev_pid'] != td.pid
else self._taskState(r['prev_state']),
axis=1)
# Rename prev_state
task_switches_df.rename(columns={'prev_state' : 'curr_state'}, inplace=True)
# Fill in Running status
# We've just set curr_state (a.k.a prev_state) to nan where td.pid was
# switching in, so set the state to 'A' ("active") in those places.
task_switches_df.curr_state = task_switches_df.curr_state.fillna(value='A')
# Join Wakeup and SchedSwitch events
task_latency_df = task_wakeup.join(task_switches_df, how='outer',
lsuffix='_wkp', rsuffix='_slp')
# Remove not required columns
task_latency_df = task_latency_df[['target_cpu', '__cpu', 'curr_state']]
# Set Wakeup state on each Wakeup event
task_latency_df.curr_state = task_latency_df.curr_state.fillna(value='W')
# Sanity check for all task states to be mapped to a char
numbers = 0
for value in task_switches_df.curr_state.unique():
if type(value) is not str:
self._log.warning('The [sched_switch] events contain "prev_state" value [%s]',
value)
numbers += 1
if numbers:
verb = 'is' if numbers == 1 else 'are'
self._log.warning(' which %s not currently mapped into a task state.',
verb)
self._log.warning('Check mappings in:')
self._log.warning(' %s::%s _taskState()',
__file__, self.__class__.__name__)
# Forward annotate task state
task_latency_df['next_state'] = task_latency_df.curr_state.shift(-1)
# Forward account for previous state duration
task_latency_df['t_start'] = task_latency_df.index
task_latency_df['t_delta'] = (
task_latency_df['t_start'].shift(-1)
- task_latency_df['t_start']
)
return task_latency_df
# Select Wakeup latency
def _dfg_latency_wakeup_df(self, task):
"""
DataFrame of task's wakeup latencies
The returned DataFrame index is the time, in seconds, `task` waken-up.
The DataFrame has just one column:
- wakeup_latency: the time the task waited before getting a CPU
:param task: the task to report wakeup latencies for
:type task: int or str
"""
task_latency_df = self._dfg_latency_df(task)
if task_latency_df is None:
return None
df = task_latency_df[
(task_latency_df.curr_state == 'W') &
(task_latency_df.next_state == 'A')][['t_delta']]
df.rename(columns={'t_delta' : 'wakeup_latency'}, inplace=True)
return df
# Select Wakeup latency
def _dfg_latency_preemption_df(self, task):
"""
DataFrame of task's preemption latencies
The returned DataFrame index is the time, in seconds, `task` has been
preempted.
The DataFrame has just one column:
- preemption_latency: the time the task waited before getting again a CPU
:param task: the task to report wakeup latencies for
:type task: int or str
"""
task_latency_df = self._dfg_latency_df(task)
if task_latency_df is None:
return None
df = task_latency_df[
(task_latency_df.curr_state.isin([0, 'R', 'R+'])) &
(task_latency_df.next_state == 'A')][['t_delta']]
df.rename(columns={'t_delta' : 'preempt_latency'}, inplace=True)
return df
@memoized
def _dfg_activations_df(self, task):
"""
DataFrame of task's wakeup intrvals
The returned DataFrame index is the time, in seconds, `task` has
waken-up.
The DataFrame has just one column:
- activation_interval: the time since the previous wakeup events
:param task: the task to report runtimes for
:type task: int or str
"""
# Select all wakeup events
wkp_df = self._dfg_latency_df(task)
wkp_df = wkp_df[wkp_df.curr_state == 'W'].copy()
# Compute delta between successive wakeup events
wkp_df['activation_interval'] = (
wkp_df['t_start'].shift(-1) - wkp_df['t_start'])
wkp_df['activation_interval'] = wkp_df['activation_interval'].shift(1)
# Return the activation period each time the task wakeups
wkp_df = wkp_df[['activation_interval']].shift(-1)
return wkp_df
@memoized
def _dfg_runtimes_df(self, task):
"""
DataFrame of task's runtime each time the task blocks
The returned DataFrame index is the time, in seconds, `task` completed
an activation (i.e. sleep or exit)
The DataFrame has just one column:
- running_time: the time the task spent RUNNING since its last wakeup
:param task: the task to report runtimes for
:type task: int or str
"""
# Select all wakeup events
run_df = self._dfg_latency_df(task)
# Filter function to add up RUNNING intervals of each activation
def cr(row):
if row['curr_state'] in ['S']:
return cr.runtime
if row['curr_state'] in ['W']:
if cr.spurious_wkp:
cr.runtime += row['t_delta']
cr.spurious_wkp = False
return cr.runtime
cr.runtime = 0
return cr.runtime
if row['curr_state'] != 'A':
return cr.runtime
if row['next_state'] in ['R', 'R+', 'S', 'x', 'D']:
cr.runtime += row['t_delta']
return cr.runtime
# This is required to capture strange trace sequences where
# a switch_in event is follower by a wakeup_event.
# This sequence is not expected, but we found it in some traces.
# Possible reasons could be:
# - misplaced sched_wakeup events
# - trace buffer artifacts
# TO BE BETTER investigated in kernel space.
# For the time being, we account this interval as RUNNING time,
# which is what kernelshark does.
if row['next_state'] in ['W']:
cr.runtime += row['t_delta']
cr.spurious_wkp = True
return cr.runtime
if row['next_state'] in ['n']:
return cr.runtime
self._log.warning("Unexpected next state: %s @ %f",
row['next_state'], row['t_start'])
return 0
# cr's static variables intialization
cr.runtime = 0
cr.spurious_wkp = False
# Add up RUNNING intervals of each activation
run_df['running_time'] = run_df.apply(cr, axis=1)
# Return RUNTIME computed for each activation,
# each time the task blocks or terminate
run_df = run_df[run_df.next_state.isin(['S', 'x'])][['running_time']]
return run_df
###############################################################################
# Plotting Methods
###############################################################################
def plotLatency(self, task, kind='all', tag=None, threshold_ms=1, bins=64):
"""
Generate a set of plots to report the WAKEUP and PREEMPT latencies the
specified task has been subject to. A WAKEUP latencies is the time from
when a task becomes RUNNABLE till the first time it gets a CPU.
A PREEMPT latencies is the time from when a RUNNING task is suspended
because of the CPU is assigned to another task till when the task
enters the CPU again.
:param task: the task to report latencies for
:type task: int or list(str)
:param kind: the kind of latencies to report (WAKEUP and/or PREEMPT")
:type kind: str
:param tag: a string to add to the plot title
:type tag: str
:param threshold_ms: the minimum acceptable [ms] value to report
graphically in the generated plots
:type threshold_ms: int or float
:param bins: number of bins to be used for the runtime's histogram
:type bins: int
:returns: a DataFrame with statistics on ploted latencies
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task data
td = self._getTaskData(task)
if not td:
return None
# Load wakeup latencies (if required)
wkp_df = None
if 'all' in kind or 'wakeup' in kind:
wkp_df = self._dfg_latency_wakeup_df(td.pid)
if wkp_df is not None:
wkp_df.rename(columns={'wakeup_latency' : 'latency'}, inplace=True)
self._log.info('Found: %5d WAKEUP latencies', len(wkp_df))
# Load preempt latencies (if required)
prt_df = None
if 'all' in kind or 'preempt' in kind:
prt_df = self._dfg_latency_preemption_df(td.pid)
if prt_df is not None:
prt_df.rename(columns={'preempt_latency' : 'latency'}, inplace=True)
self._log.info('Found: %5d PREEMPT latencies', len(prt_df))
if wkp_df is None and prt_df is None:
self._log.warning('No Latency info for task [%s]', td.label)
return
# Join the two data frames
df = wkp_df.append(prt_df)
ymax = 1.1 * df.latency.max()
self._log.info('Total: %5d latency events', len(df))
# Build the series for the CDF
cdf = self._getCDF(df.latency, (threshold_ms / 1000.))
self._log.info('%.1f %% samples below %d [ms] threshold',
100. * cdf.below, threshold_ms)
# Setup plots
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1])
plt.figure(figsize=(16, 8))
plot_title = "[{}]: {} latencies".format(td.label, kind.upper())
if tag:
plot_title = "{} [{}]".format(plot_title, tag)
plot_title = "{}, threshold @ {} [ms]".format(plot_title, threshold_ms)
# Latency events duration over time
axes = plt.subplot(gs[0,0:2])
axes.set_title(plot_title)
try:
wkp_df.rename(columns={'latency': 'wakeup'}, inplace=True)
wkp_df.plot(style='b+', logy=True, ax=axes)
except: pass
try:
prt_df.rename(columns={'latency' : 'preempt'}, inplace=True)
prt_df.plot(style='r+', logy=True, ax=axes)
except: pass
axes.axhline(threshold_ms / 1000., linestyle='--', color='g')
self._trace.analysis.status.plotOverutilized(axes)
axes.legend(loc='lower center', ncol=2)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
# Cumulative distribution of latencies samples
axes = plt.subplot(gs[1,0])
cdf.df.plot(ax=axes, legend=False, xlim=(0,None),
title='Latencies CDF ({:.1f}% within {} [ms] threshold)'\
.format(100. * cdf.below, threshold_ms))
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
axes.axhline(y=cdf.below, linewidth=1, color='r', linestyle='--')
# Histogram of all latencies
axes = plt.subplot(gs[1,1])
df.latency.plot(kind='hist', bins=bins, ax=axes,
xlim=(0,ymax), legend=False,
title='Latency histogram ({} bins, {} [ms] green threshold)'\
.format(bins, threshold_ms));
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
# Save generated plots into datadir
task_name = re.sub('[\ :/]', '_', td.label)
figname = '{}/{}task_latencies_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
td.pid, task_name)
pl.savefig(figname, bbox_inches='tight')
# Return statistics
stats_df = df.describe(percentiles=[0.95, 0.99])
label = '{:.1f}%'.format(100. * cdf.below)
stats = { label : cdf.threshold }
return stats_df.append(pd.DataFrame(
stats.values(), columns=['latency'], index=stats.keys()))
def plotLatencyBands(self, task, axes=None):
"""
Draw a plot that shows intervals of time when the execution of a
RUNNABLE task has been delayed. The plot reports:
WAKEUP lantecies as RED colored bands
PREEMPTION lantecies as BLUE colored bands
The optional axes parameter allows to plot the signal on an existing
graph.
:param task: the task to report latencies for
:type task: str
:param axes: axes on which to plot the signal
:type axes: :mod:`matplotlib.axes.Axes`
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task PID
td = self._getTaskData(task)
if not td:
return None
wkl_df = self._dfg_latency_wakeup_df(td.pid)
prt_df = self._dfg_latency_preemption_df(td.pid)
if wkl_df is None and prt_df is None:
self._log.warning('No task with name [%s]', td.label)
return
# If not axis provided: generate a standalone plot
if not axes:
gs = gridspec.GridSpec(1, 1)
plt.figure(figsize=(16, 2))
axes = plt.subplot(gs[0, 0])
axes.set_title('Latencies on [{}] '
'(red: WAKEUP, blue: PREEMPT)'\
.format(td.label))
axes.set_xlim(self._trace.x_min, self._trace.x_max)
axes.set_yticklabels([])
axes.set_xlabel('Time [s]')
axes.grid(True)
# Draw WAKEUP latencies
try:
bands = [(t, wkl_df['wakeup_latency'][t]) for t in wkl_df.index]
for (start, duration) in bands:
end = start + duration
axes.axvspan(start, end, facecolor='r', alpha=0.1)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
except: pass
# Draw PREEMPTION latencies
try:
bands = [(t, prt_df['preempt_latency'][t]) for t in prt_df.index]
for (start, duration) in bands:
end = start + duration
axes.axvspan(start, end, facecolor='b', alpha=0.1)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
except: pass
def plotActivations(self, task, tag=None, threshold_ms=16, bins=64):
"""
Plots "activation intervals" for the specified task
An "activation interval" is time incurring between two consecutive
wakeups of a task. A set of plots is generated to report:
- Activations interval at wakeup time: every time a task wakeups a
point is plotted to represent the time interval since the previous
wakeup.
- Activations interval cumulative function: reports the cumulative
function of the activation intervals.
- Activations intervals histogram: reports a 64 bins histogram of
the activation intervals.
All plots are parameterized based on the value of threshold_ms, which
can be used to filter activations intervals bigger than 2 times this
value.
Such a threshold is useful to filter out from the plots outliers thus
focusing the analysis in the most critical periodicity under analysis.
The number and percentage of discarded samples is reported in output.
A default threshold of 16 [ms] is used, which is useful for example
to analyze a 60Hz rendering pipelines.
A PNG of the generated plots is generated and saved in the same folder
where the trace is.
:param task: the task to report latencies for
:type task: int or list(str)
:param tag: a string to add to the plot title
:type tag: str
:param threshold_ms: the minimum acceptable [ms] value to report
graphically in the generated plots
:type threshold_ms: int or float
:param bins: number of bins to be used for the runtime's histogram
:type bins: int
:returns: a DataFrame with statistics on ploted activation intervals
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task data
td = self._getTaskData(task)
if not td:
return None
# Load activation data
wkp_df = self._dfg_activations_df(td.pid)
if wkp_df is None:
return None
self._log.info('Found: %5d activations for [%s]',
len(wkp_df), td.label)
# Disregard data above two time the specified threshold
y_max = (2 * threshold_ms) / 1000.
len_tot = len(wkp_df)
wkp_df = wkp_df[wkp_df.activation_interval <= y_max]
len_plt = len(wkp_df)
if len_plt < len_tot:
len_dif = len_tot - len_plt
len_pct = 100. * len_dif / len_tot
self._log.warning('Discarding {} activation intervals (above 2 x threshold_ms, '
'{:.1f}% of the overall activations)'\
.format(len_dif, len_pct))
ymax = 1.1 * wkp_df.activation_interval.max()
# Build the series for the CDF
cdf = self._getCDF(wkp_df.activation_interval, (threshold_ms / 1000.))
self._log.info('%.1f %% samples below %d [ms] threshold',
100. * cdf.below, threshold_ms)
# Setup plots
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1])
plt.figure(figsize=(16, 8))
plot_title = "[{}]: activaton intervals (@ wakeup time)".format(td.label)
if tag:
plot_title = "{} [{}]".format(plot_title, tag)
plot_title = "{}, threshold @ {} [ms]".format(plot_title, threshold_ms)
# Activations intervals over time
axes = plt.subplot(gs[0,0:2])
axes.set_title(plot_title)
wkp_df.plot(style='g+', logy=False, ax=axes)
axes.axhline(threshold_ms / 1000., linestyle='--', color='g')
self._trace.analysis.status.plotOverutilized(axes)
axes.legend(loc='lower center', ncol=2)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
# Cumulative distribution of all activations intervals
axes = plt.subplot(gs[1,0])
cdf.df.plot(ax=axes, legend=False, xlim=(0,None),
title='Activations CDF ({:.1f}% within {} [ms] threshold)'\
.format(100. * cdf.below, threshold_ms))
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
axes.axhline(y=cdf.below, linewidth=1, color='r', linestyle='--')
# Histogram of all activations intervals
axes = plt.subplot(gs[1,1])
wkp_df.plot(kind='hist', bins=bins, ax=axes,
xlim=(0,ymax), legend=False,
title='Activation intervals histogram ({} bins, {} [ms] green threshold)'\
.format(bins, threshold_ms));
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
# Save generated plots into datadir
task_name = re.sub('[\ :/]', '_', td.label)
figname = '{}/{}task_activations_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
td.pid, task_name)
pl.savefig(figname, bbox_inches='tight')
# Return statistics
stats_df = wkp_df.describe(percentiles=[0.95, 0.99])
label = '{:.1f}%'.format(100. * cdf.below)
stats = { label : cdf.threshold }
return stats_df.append(pd.DataFrame(
stats.values(), columns=['activation_interval'], index=stats.keys()))
def plotRuntimes(self, task, tag=None, threshold_ms=8, bins=64):
"""
Plots "running times" for the specified task
A "running time" is the sum of all the time intervals a task executed
in between a wakeup and the next sleep (or exit).
A set of plots is generated to report:
- Running times at block time: every time a task blocks a
point is plotted to represent the cumulative time the task has be
running since its last wakeup
- Running time cumulative function: reports the cumulative
function of the running times.
- Running times histogram: reports a 64 bins histogram of
the running times.
All plots are parameterized based on the value of threshold_ms, which
can be used to filter running times bigger than 2 times this value.
Such a threshold is useful to filter out from the plots outliers thus
focusing the analysis in the most critical periodicity under analysis.
The number and percentage of discarded samples is reported in output.
A default threshold of 16 [ms] is used, which is useful for example to
analyze a 60Hz rendering pipelines.
A PNG of the generated plots is generated and saved in the same folder
where the trace is.
:param task: the task to report latencies for
:type task: int or list(str)
:param tag: a string to add to the plot title
:type tag: str
:param threshold_ms: the minimum acceptable [ms] value to report
graphically in the generated plots
:type threshold_ms: int or float
:param bins: number of bins to be used for the runtime's histogram
:type bins: int
:returns: a DataFrame with statistics on ploted running times
"""
if not self._trace.hasEvents('sched_switch'):
self._log.warning('Event [sched_switch] not found, '
'plot DISABLED!')
return
if not self._trace.hasEvents('sched_wakeup'):
self._log.warning('Event [sched_wakeup] not found, '
'plot DISABLED!')
return
# Get task data
td = self._getTaskData(task)
if not td:
return None
# Load runtime data
run_df = self._dfg_runtimes_df(td.pid)
if run_df is None:
return None
self._log.info('Found: %5d activations for [%s]',
len(run_df), td.label)
# Disregard data above two time the specified threshold
y_max = (2 * threshold_ms) / 1000.
len_tot = len(run_df)
run_df = run_df[run_df.running_time <= y_max]
len_plt = len(run_df)
if len_plt < len_tot:
len_dif = len_tot - len_plt
len_pct = 100. * len_dif / len_tot
self._log.warning('Discarding {} running times (above 2 x threshold_ms, '
'{:.1f}% of the overall activations)'\
.format(len_dif, len_pct))
ymax = 1.1 * run_df.running_time.max()
# Build the series for the CDF
cdf = self._getCDF(run_df.running_time, (threshold_ms / 1000.))
self._log.info('%.1f %% samples below %d [ms] threshold',
100. * cdf.below, threshold_ms)
# Setup plots
gs = gridspec.GridSpec(2, 2, height_ratios=[2,1], width_ratios=[1,1])
plt.figure(figsize=(16, 8))
plot_title = "[{}]: running times (@ block time)".format(td.label)
if tag:
plot_title = "{} [{}]".format(plot_title, tag)
plot_title = "{}, threshold @ {} [ms]".format(plot_title, threshold_ms)
# Running time over time
axes = plt.subplot(gs[0,0:2])
axes.set_title(plot_title)
run_df.plot(style='g+', logy=False, ax=axes)
axes.axhline(threshold_ms / 1000., linestyle='--', color='g')
self._trace.analysis.status.plotOverutilized(axes)
axes.legend(loc='lower center', ncol=2)
axes.set_xlim(self._trace.x_min, self._trace.x_max)
# Cumulative distribution of all running times
axes = plt.subplot(gs[1,0])
cdf.df.plot(ax=axes, legend=False, xlim=(0,None),
title='Runtime CDF ({:.1f}% within {} [ms] threshold)'\
.format(100. * cdf.below, threshold_ms))
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
axes.axhline(y=cdf.below, linewidth=1, color='r', linestyle='--')
# Histogram of all running times
axes = plt.subplot(gs[1,1])
run_df.plot(kind='hist', bins=bins, ax=axes,
xlim=(0,ymax), legend=False,
title='Latency histogram ({} bins, {} [ms] green threshold)'\
.format(bins, threshold_ms));
axes.axvspan(0, threshold_ms / 1000., facecolor='g', alpha=0.5);
# Save generated plots into datadir
task_name = re.sub('[\ :/]', '_', td.label)
figname = '{}/{}task_runtimes_{}_{}.png'\
.format(self._trace.plots_dir, self._trace.plots_prefix,
td.pid, task_name)
pl.savefig(figname, bbox_inches='tight')
# Return statistics
stats_df = run_df.describe(percentiles=[0.95, 0.99])
label = '{:.1f}%'.format(100. * cdf.below)
stats = { label : cdf.threshold }
return stats_df.append(pd.DataFrame(
stats.values(), columns=['running_time'], index=stats.keys()))
###############################################################################
# Utility Methods
###############################################################################
@memoized
def _getTaskData(self, task):
# Get task PID
if isinstance(task, str):
task_pids = self._trace.getTaskByName(task)
if len(task_pids) == 0:
self._log.warning('No tasks found with name [%s]', task)
return None
task_pid = task_pids[0]
if len(task_pids) > 1:
self._log.warning('Multiple PIDs for task named [%s]', task)
for pid in task_pids:
self._log.warning(' %5d : %s', pid,
','.join(self._trace.getTaskByPid(pid)))
self._log.warning('Returning stats only for PID: %d',
task_pid)
task_names = self._trace.getTaskByPid(task_pid)
# Get task name
elif isinstance(task, int):
task_pid = task
task_names = self._trace.getTaskByPid(task_pid)
if len(task_names) == 0:
self._log.warning('No tasks found with name [%s]', task)
return None
else:
raise ValueError("Task must be either an int or str")
task_label = "{}: {}".format(task_pid, ', '.join(task_names))
return TaskData(task_pid, task_names, task_label)
@memoized
def _taskState(self, state):
try:
state = int(state)
except ValueError:
# State already converted to symbol
return state
# Tasks STATE flags (Linux 3.18)
TASK_STATES = {
0: "R", # TASK_RUNNING
1: "S", # TASK_INTERRUPTIBLE
2: "D", # TASK_UNINTERRUPTIBLE
4: "T", # __TASK_STOPPED
8: "t", # __TASK_TRACED
16: "X", # EXIT_DEAD
32: "Z", # EXIT_ZOMBIE
64: "x", # TASK_DEAD
128: "K", # TASK_WAKEKILL
256: "W", # TASK_WAKING
512: "P", # TASK_PARKED
1024: "N", # TASK_NOLOAD
}
kver = self._trace.platform['kernel']['parts']
if kver is None:
kver = (3, 18)
self._log.info('Parsing sched_switch states assuming kernel v%d.%d',
kver[0], kver[1])
if kver >= (4, 8):
TASK_STATES[2048] = "n" # TASK_NEW
TASK_MAX_STATE = 2 * max(TASK_STATES)
res = "R"
if state & (TASK_MAX_STATE - 1) != 0:
res = ""
for key in TASK_STATES.keys():
if key & state:
res += TASK_STATES[key]
if state & TASK_MAX_STATE:
res += "+"
else:
res = '|'.join(res)
return res
def _getCDF(self, data, threshold):
"""
Build the "Cumulative Distribution Function" (CDF) for the given data
"""
# Build the series of sorted values
ser = data.sort_values()
if len(ser) < 1000:
# Append again the last (and largest) value.
# This step is important especially for small sample sizes
# in order to get an unbiased CDF
ser = ser.append(pd.Series(ser.iloc[-1]))
df = pd.Series(np.linspace(0., 1., len(ser)), index=ser)
# Compute percentage of samples above/below the specified threshold
below = float(max(df[:threshold]))
above = 1 - below
return CDF(df, threshold, above, below)
# vim :set tabstop=4 shiftwidth=4 expandtab
| 40.211334
| 98
| 0.573505
|
71c7cec3dbdd75dacea60ff77fd65eedf56a266c
| 3,388
|
py
|
Python
|
placement/jobport/helpers.py
|
IIIT-Delhi/jobport
|
28c96916f47a47617d433f5e828cb91d9b1f230b
|
[
"MIT"
] | 8
|
2015-12-19T14:02:03.000Z
|
2021-05-17T20:16:18.000Z
|
placement/jobport/helpers.py
|
IIIT-Delhi/jobport
|
28c96916f47a47617d433f5e828cb91d9b1f230b
|
[
"MIT"
] | 15
|
2015-11-03T18:11:11.000Z
|
2021-06-01T22:07:37.000Z
|
placement/jobport/helpers.py
|
IIIT-Delhi/jobport
|
28c96916f47a47617d433f5e828cb91d9b1f230b
|
[
"MIT"
] | 7
|
2016-03-14T19:51:28.000Z
|
2017-09-29T06:57:41.000Z
|
# //=======================================================================
# // Copyright JobPort, IIIT Delhi 2015.
# // Distributed under the MIT License.
# // (See accompanying file LICENSE or copy at
# // http://opensource.org/licenses/MIT)
# //=======================================================================
# __author__ = 'naman'
import re
from django.contrib.auth.models import User
from django.utils import timezone
def is_member(user, group):
"""Checks if the user object is a member of the group or not."""
return user.groups.filter(name=group)
def is_eligible(candidate, job):
"""Checks if the user object is a eligible candidate for the job or not.
All the logic for checking eligibility goes here!"""
eligibility = {}
eligibility['value'] = True
# list of all the reasons that contribute towards uneligibilty
eligibility['reasons'] = []
if (candidate.batch.pg_or_not == 'G'):
if (candidate.cgpa_ug < job.cgpa_min):
eligibility['value'] = False
eligibility['reasons'].append(
"Your CGPA is below the requirement.")
else:
if (candidate.cgpa_pg < job.cgpa_min):
eligibility['value'] = False
eligibility['reasons'].append(
"Your CGPA is below the requirement.")
if (candidate.percentage_tenth < job.min_tenthmarks):
eligibility['value'] = False
eligibility['reasons'].append(
"Your 10th Marks are below the requirement.")
if (candidate.percentage_twelfth < job.min_twelfthmarks):
eligibility['value'] = False
eligibility['reasons'].append(
"Your 12th Marks are below the requirement.")
if (candidate.backlogs > job.max_blacklogs):
eligibility['value'] = False
eligibility['reasons'].append("You have too many backlogs.")
if (candidate.status == 'B'):
eligibility['value'] = False
eligibility['reasons'].append(
"You have been blocked by the placement cell.")
if (job.status == 'C' or job.status == 'A'):
eligibility['value'] = False
eligibility['reasons'].append("This Job cannot be applied to.")
Vals = []
for b in job.batch.all():
if (b != candidate.batch):
Vals.append(False)
else:
Vals.append(True)
if not any(Vals):
eligibility['value'] = False
eligibility['reasons'].append("You are not eligible for this job!")
return eligibility
def checkdeadline(job):
"""Checks if the deadline has passed or not."""
if (timezone.now() > job.deadline):
return True
else:
return False
def is_admin(user):
"""Checks if the user object is a member of the admin group or not."""
allowed_group = {'admin'}
usr = User.objects.get(username=user)
groups = [x.name for x in usr.groups.all()]
if allowed_group.intersection(set(groups)):
return True
return False
def special_match(strg, search=re.compile(r'[^A-Za-z0-9., -]').search):
return not bool(search(strg))
def contact_match(strg, search=re.compile(r'[0-9]\n').search):
return not bool(search(strg))
def onlyspecchar_match(strg, search=re.compile(r'^[., -]').search):
return not bool(search(strg))
def onlynumbers(strg, search=re.compile(r'^[0-9]').search):
return bool(search(strg))
| 31.37037
| 76
| 0.605962
|
c1388dc9bb7a1db275597b2cb11c55e5337a55a7
| 4,377
|
py
|
Python
|
vunit/com/codec_vhdl_record_type.py
|
svenka3/vunit
|
0acacbe2b276abb907c908c76885bbc58099fa68
|
[
"Artistic-2.0"
] | null | null | null |
vunit/com/codec_vhdl_record_type.py
|
svenka3/vunit
|
0acacbe2b276abb907c908c76885bbc58099fa68
|
[
"Artistic-2.0"
] | null | null | null |
vunit/com/codec_vhdl_record_type.py
|
svenka3/vunit
|
0acacbe2b276abb907c908c76885bbc58099fa68
|
[
"Artistic-2.0"
] | 1
|
2021-09-11T16:56:08.000Z
|
2021-09-11T16:56:08.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (c) 2015, Lars Asplund lars.anders.asplund@gmail.com
"""
Module containing the CodecVHDLRecordType class.
"""
from string import Template
from vunit.vhdl_parser import VHDLRecordType
from vunit.com.codec_datatype_template import DatatypeStdCodecTemplate, DatatypeDebugCodecTemplate
class CodecVHDLRecordType(VHDLRecordType):
"""Class derived from VHDLRecordType to provide codec generator functionality for the record type."""
def generate_codecs_and_support_functions(self, debug=False):
"""Generate codecs and communication support functions for the record type."""
if not debug:
template = RecordStdCodecTemplate()
else:
template = RecordDebugCodecTemplate()
declarations = ''
definitions = ''
declarations += template.codec_declarations.substitute(type=self.identifier)
declarations += template.to_string_declarations.substitute(type=self.identifier)
element_encoding_list = []
element_decoding_list = []
num_of_elements = 0
for element in self.elements:
for i in element.identifier_list:
element_encoding_list.append('encode(data.%s)' % i)
if debug:
element_decoding_list.append('ret_val.%s := decode(elements.all(%d).all);' %
(i, num_of_elements))
else:
element_decoding_list.append('decode(code, index, result.%s);' % i)
num_of_elements += 1
if debug:
element_encodings = ', '.join(element_encoding_list)
else:
element_encodings = ' & '.join(element_encoding_list)
element_decodings = '\n '.join(element_decoding_list)
definitions += template.record_codec_definition.substitute(type=self.identifier,
element_encodings=element_encodings,
num_of_elements=str(num_of_elements),
element_decodings=element_decodings)
definitions += template.record_to_string_definition.substitute(
type=self.identifier,
element_encoding_list=', '.join(element_encoding_list),
num_of_elements=str(num_of_elements))
return declarations, definitions
class RecordCodecTemplate(object):
"""This class contains record templates common to both standard and debug codecs."""
record_to_string_definition = Template("""\
function to_string (
constant data : $type)
return string is
begin
return create_group($num_of_elements, $element_encoding_list);
end function to_string;
""")
class RecordStdCodecTemplate(DatatypeStdCodecTemplate, RecordCodecTemplate):
"""This class contains standard record templates."""
record_codec_definition = Template("""\
function encode (
constant data : $type)
return string is
begin
return $element_encodings;
end function encode;
procedure decode (
constant code : string;
variable index : inout positive;
variable result : out $type) is
begin
$element_decodings
end procedure decode;
function decode (
constant code : string)
return $type is
variable ret_val : $type;
variable index : positive := code'left;
begin
decode(code, index, ret_val);
return ret_val;
end function decode;
""")
class RecordDebugCodecTemplate(DatatypeDebugCodecTemplate, RecordCodecTemplate):
"""This class contains debug record templates."""
record_codec_definition = Template("""\
function encode (
constant data : $type)
return string is
begin
return to_string(data);
end function encode;
function decode (
constant code : string)
return $type is
variable ret_val : $type;
variable elements : lines_t;
variable length : natural;
begin
split_group(code, elements, $num_of_elements, length);
$element_decodings
deallocate_elements(elements);
return ret_val;
end function decode;
""")
| 33.159091
| 105
| 0.655243
|
8ccb8d2d81c2f9c45b94b9df36972a4d93c4be4d
| 6,041
|
py
|
Python
|
dlk/tests/tstutils.py
|
tsawada/blueoil
|
745a2eb25e090e0ff9af547c1a11b538bf7e5c8a
|
[
"Apache-2.0"
] | null | null | null |
dlk/tests/tstutils.py
|
tsawada/blueoil
|
745a2eb25e090e0ff9af547c1a11b538bf7e5c8a
|
[
"Apache-2.0"
] | null | null | null |
dlk/tests/tstutils.py
|
tsawada/blueoil
|
745a2eb25e090e0ff9af547c1a11b538bf7e5c8a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import unittest
import subprocess
import time
import os
from os.path import join
from tstconf import DO_CLEANUP
TEST_LEVEL_FUTURE_TARGET=512
FPGA_HOST = os.environ['FPGA_HOST']
def updated_dict(src_dict, updation) -> dict:
dst_dict = dict(src_dict)
dst_dict.update(updation)
return dst_dict
def run_and_check(command, cwd, file_stdout=None, file_stderr=None, testcase=None,
keep_outputs=not DO_CLEANUP,
check_stdout_include=None,
check_stdout_block=None,
check_stderr_include=None,
check_stderr_block=None,
ignore_returncode=False,
**parameters) -> None:
"""
return true if the command successfully
asserted
all words in check_stdout_include should be in stdout
all words in check_stdout_block should not be in stdout
all words in check_stderr_include should be in stderr
all words in check_stderr_block should not be in stderr
"""
testcase = testcase if testcase is not None else unittest.TestCase()
proc = subprocess.Popen(
command,
cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
**parameters
)
out, err = proc.communicate()
if keep_outputs and file_stdout:
with open(file_stdout, 'w') as fout:
fout.write(out)
if keep_outputs and file_stderr:
with open(file_stderr, 'w') as ferr:
ferr.write(err)
if len(err.strip()) > 0:
print("---begining of stderr---")
print(err)
print("---end of stderr---")
try:
if not ignore_returncode:
testcase.assertTrue(proc.returncode == 0)
if check_stdout_include:
for key in check_stdout_include:
testcase.assertTrue(key in out)
if check_stdout_block:
for key in check_stdout_block:
testcase.assertFalse(key in out)
if check_stderr_include:
for key in check_stderr_include:
testcase.assertTrue(key in err)
if check_stderr_block:
for key in check_stderr_block:
testcase.assertFalse(key in err)
except AssertionError:
print("---begining of stdout---")
print(out)
print("---end of stdout---")
raise AssertionError
def wait_for_device(host: str, tries: int, seconds: int, log_path: str, testcase=None) -> bool:
board_found = False
board_ssh_enabled = False
for i in range(tries):
try:
print(f'Waiting for device {host}: try {i+1} of {tries}')
if not board_found:
run_and_check(
["ping", "-c5", host],
log_path, join(log_path, "ping.out"), join(log_path, "ping.err"), testcase)
board_found = True
else:
run_and_check(
[ "ssh",
"-o",
"StrictHostKeyChecking no",
f"root@{host}",
f"uname -a"
],
log_path,
join(log_path, "ssh_uname-a.out"),
join(log_path, "ssh_uname-a.err"),
testcase
)
board_ssh_enabled = True
break
except Exception as e:
print(str(e))
time.sleep(seconds)
continue
return board_ssh_enabled
def setup_de10nano(hw_path: str, output_path: str, testcase=None):
host = FPGA_HOST
available = wait_for_device(host, 15, 10, output_path, testcase)
if not available:
return False
try:
run_and_check(
[ "ssh",
f"root@{host}",
f"mkdir -p ~/automated_testing; mkdir -p ~/boot; if grep -qs '/root/boot' /proc/mounts ;" \
+ "then echo 0 ; else mount /dev/mmcblk0p1 /root/boot ; fi"
],
output_path,
join(output_path, "mount.out"),
join(output_path, "mount.err"),
testcase
)
run_and_check(
[ "scp",
join(hw_path, 'soc_system.rbf'),
join(hw_path, 'soc_system.dtb'),
join(hw_path, 'preloader-mkpimage.bin'),
f"root@{host}:~/boot/"
],
output_path,
join(output_path, "scp_hw.out"),
join(output_path, "scp_hw.err"),
testcase
)
run_and_check(
[ "ssh",
f"root@{host}",
f"cd ~/boot && dd if=./preloader-mkpimage.bin of=/dev/mmcblk0p3 && sync && cd ~ && umount boot"
],
output_path,
join(output_path, "update_hw.out"),
join(output_path, "update_hw.err"),
testcase
)
run_and_check(
[ "ssh", f"root@{host}", "reboot"],
output_path,
join(output_path, "reboot.out"),
join(output_path, "reboot.err"),
testcase,
ignore_returncode=True
)
except:
return False
available = wait_for_device(host, 15, 10, output_path, testcase)
if not available:
return False
return True
| 30.356784
| 109
| 0.556696
|
846ae5cbaa6d1d8a3664a067b33f95a7c57f6da9
| 1,022
|
py
|
Python
|
Admin/models.py
|
hxllll/Lost
|
50612f004a49dca6b2c9298613189b927daae7a8
|
[
"Apache-2.0"
] | null | null | null |
Admin/models.py
|
hxllll/Lost
|
50612f004a49dca6b2c9298613189b927daae7a8
|
[
"Apache-2.0"
] | 20
|
2020-01-28T22:35:15.000Z
|
2022-03-11T23:38:45.000Z
|
Admin/models.py
|
hxllll/Lost
|
50612f004a49dca6b2c9298613189b927daae7a8
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib.auth.hashers import make_password, check_password
from django.db import models
# Create your models here.
from werkzeug.security import check_password_hash
class AdminUser(models.Model):
a_username = models.CharField(max_length=15,unique=True)
a_password = models.CharField(max_length=256)
is_delete = models.BooleanField(default=False)
is_super = models.BooleanField(default=False)
def set_password(self,password):
self.a_password = make_password(password)
def check_admin_password(self,password):
return check_password(password,self.a_password)
def has_permission(self, permission_name):
permissions = self.permission_set.all()
for permission in permissions:
if permission_name == permission.p_name:
return True
return False
class Permission(models.Model):
p_name = models.CharField(max_length=32,unique=True)
p_users = models.ManyToManyField(AdminUser)
| 30.969697
| 70
| 0.714286
|
4c70e5ceb628c7e3d011e4ae2cd4a174794e747a
| 818
|
py
|
Python
|
django_visitor_information/__init__.py
|
goranpavlovic/django-visitor-information-middleware
|
45e7c13a5472e56583d8ff0257e3e840de4ec048
|
[
"Apache-2.0"
] | null | null | null |
django_visitor_information/__init__.py
|
goranpavlovic/django-visitor-information-middleware
|
45e7c13a5472e56583d8ff0257e3e840de4ec048
|
[
"Apache-2.0"
] | null | null | null |
django_visitor_information/__init__.py
|
goranpavlovic/django-visitor-information-middleware
|
45e7c13a5472e56583d8ff0257e3e840de4ec048
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to Tomaz Muraus under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Tomaz muraus licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'__version__'
]
__version__ = '0.1.0'
| 38.952381
| 78
| 0.764059
|
67159d8ed51d068ce0a3014b41c5277500bc2900
| 2,399
|
py
|
Python
|
ckanext/example_igroupform/plugin.py
|
algebrasrl/ckan
|
7779b975dc5936a55335130409dc3e9d7aba9bec
|
[
"BSD-3-Clause"
] | 1
|
2021-10-01T12:47:19.000Z
|
2021-10-01T12:47:19.000Z
|
ckanext/example_igroupform/plugin.py
|
algebrasrl/ckan
|
7779b975dc5936a55335130409dc3e9d7aba9bec
|
[
"BSD-3-Clause"
] | null | null | null |
ckanext/example_igroupform/plugin.py
|
algebrasrl/ckan
|
7779b975dc5936a55335130409dc3e9d7aba9bec
|
[
"BSD-3-Clause"
] | 2
|
2018-01-21T17:03:08.000Z
|
2019-07-23T08:49:52.000Z
|
# encoding: utf-8
from ckan.common import CKANConfig
import ckan.plugins as plugins
import ckan.plugins.toolkit as tk
# I did try unicode in the group_type, but Routes wasn't happy with unicode in
# the route name, so it would require encoding everywhere we do url_for, so
# I've left it.
# group_type = u'gr\xc3\xb6up' # This is 'group' with an umlaut on the 'o'
group_type = u'grup'
group_type_utf8 = group_type.encode('utf8')
class ExampleIGroupFormPlugin(plugins.SingletonPlugin,
tk.DefaultGroupForm):
'''An example IGroupForm CKAN plugin with custom group_type.
Doesn't do much yet.
'''
plugins.implements(plugins.IGroupForm, inherit=False)
plugins.implements(plugins.IConfigurer)
# IConfigurer
def update_config(self, config_: CKANConfig):
tk.add_template_directory(config_, 'templates')
# IGroupForm
def group_types(self):
return (group_type,)
def is_fallback(self):
False
def group_form(self):
return 'example_igroup_form/group_form.html'
class ExampleIGroupFormPlugin_DefaultGroupType(plugins.SingletonPlugin, # noqa
tk.DefaultGroupForm):
'''An example IGroupForm CKAN plugin for default group_type.
Doesn't do much yet.
'''
plugins.implements(plugins.IGroupForm, inherit=False)
plugins.implements(plugins.IConfigurer)
# IConfigurer
def update_config(self, config_: CKANConfig):
tk.add_template_directory(config_, 'templates')
# IGroupForm
def group_types(self):
return ('group',)
def is_fallback(self):
False
def group_form(self):
return 'example_igroup_form/group_form.html'
class ExampleIGroupFormOrganizationPlugin(plugins.SingletonPlugin,
tk.DefaultOrganizationForm):
'''An example IGroupForm Organization CKAN plugin with custom group_type.
Doesn't do much yet.
'''
plugins.implements(plugins.IGroupForm, inherit=False)
plugins.implements(plugins.IConfigurer)
# IConfigurer
def update_config(self, config_: CKANConfig):
tk.add_template_directory(config_, 'templates')
# IGroupForm
def group_types(self):
return (group_type,)
def is_fallback(self):
False
def group_controller(self):
return 'organization'
| 25.795699
| 79
| 0.679033
|
0bd9e238b6082b3f0aeea82ba7a2d7fa5d9a42f3
| 517
|
py
|
Python
|
factorial_python.py
|
trunknx/PythonAlgorithms
|
0a4a63eec34b0255508cbdac35fa91f994bef279
|
[
"MIT"
] | null | null | null |
factorial_python.py
|
trunknx/PythonAlgorithms
|
0a4a63eec34b0255508cbdac35fa91f994bef279
|
[
"MIT"
] | null | null | null |
factorial_python.py
|
trunknx/PythonAlgorithms
|
0a4a63eec34b0255508cbdac35fa91f994bef279
|
[
"MIT"
] | null | null | null |
# Python program to find the factorial of a number provided by the user.
#
# change the value for a different result
num = 10
# uncomment to take input from the user
#num = int(input("Enter a number: "))
factorial = 1
# check if the number is negative, positive or zero
if num < 0:
print("Sorry, factorial does not exist for negative numbers")
elif num == 0:
print("The factorial of 0 is 1")
else:
for i in range(1,num + 1):
factorial = factorial*i
print("The factorial of",num,"is",factorial)
| 24.619048
| 72
| 0.690522
|
13b3e4b3db768aa3ac1bfa50d18b819400ded95b
| 33,437
|
py
|
Python
|
Lib/plat-mac/bundlebuilder.py
|
arvindm95/unladen-swallow
|
8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3
|
[
"PSF-2.0"
] | 8
|
2020-08-24T14:21:35.000Z
|
2022-01-26T04:49:11.000Z
|
Lib/plat-mac/bundlebuilder.py
|
arvindm95/unladen-swallow
|
8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3
|
[
"PSF-2.0"
] | null | null | null |
Lib/plat-mac/bundlebuilder.py
|
arvindm95/unladen-swallow
|
8175e37eaea7ca66ed03283b46bc1d2db0d3f9c3
|
[
"PSF-2.0"
] | 3
|
2020-08-23T23:20:38.000Z
|
2021-10-18T03:35:00.000Z
|
#! /usr/bin/env python
"""\
bundlebuilder.py -- Tools to assemble MacOS X (application) bundles.
This module contains two classes to build so called "bundles" for
MacOS X. BundleBuilder is a general tool, AppBuilder is a subclass
specialized in building application bundles.
[Bundle|App]Builder objects are instantiated with a bunch of keyword
arguments, and have a build() method that will do all the work. See
the class doc strings for a description of the constructor arguments.
The module contains a main program that can be used in two ways:
% python bundlebuilder.py [options] build
% python buildapp.py [options] build
Where "buildapp.py" is a user-supplied setup.py-like script following
this model:
from bundlebuilder import buildapp
buildapp(<lots-of-keyword-args>)
"""
__all__ = ["BundleBuilder", "BundleBuilderError", "AppBuilder", "buildapp"]
from warnings import warnpy3k
warnpy3k("In 3.x, the bundlebuilder module is removed.", stacklevel=2)
import sys
import os, errno, shutil
import imp, marshal
import re
from copy import deepcopy
import getopt
from plistlib import Plist
from types import FunctionType as function
class BundleBuilderError(Exception): pass
class Defaults:
"""Class attributes that don't start with an underscore and are
not functions or classmethods are (deep)copied to self.__dict__.
This allows for mutable default values.
"""
def __init__(self, **kwargs):
defaults = self._getDefaults()
defaults.update(kwargs)
self.__dict__.update(defaults)
def _getDefaults(cls):
defaults = {}
for base in cls.__bases__:
if hasattr(base, "_getDefaults"):
defaults.update(base._getDefaults())
for name, value in cls.__dict__.items():
if name[0] != "_" and not isinstance(value,
(function, classmethod)):
defaults[name] = deepcopy(value)
return defaults
_getDefaults = classmethod(_getDefaults)
class BundleBuilder(Defaults):
"""BundleBuilder is a barebones class for assembling bundles. It
knows nothing about executables or icons, it only copies files
and creates the PkgInfo and Info.plist files.
"""
# (Note that Defaults.__init__ (deep)copies these values to
# instance variables. Mutable defaults are therefore safe.)
# Name of the bundle, with or without extension.
name = None
# The property list ("plist")
plist = Plist(CFBundleDevelopmentRegion = "English",
CFBundleInfoDictionaryVersion = "6.0")
# The type of the bundle.
type = "BNDL"
# The creator code of the bundle.
creator = None
# the CFBundleIdentifier (this is used for the preferences file name)
bundle_id = None
# List of files that have to be copied to <bundle>/Contents/Resources.
resources = []
# List of (src, dest) tuples; dest should be a path relative to the bundle
# (eg. "Contents/Resources/MyStuff/SomeFile.ext).
files = []
# List of shared libraries (dylibs, Frameworks) to bundle with the app
# will be placed in Contents/Frameworks
libs = []
# Directory where the bundle will be assembled.
builddir = "build"
# Make symlinks instead copying files. This is handy during debugging, but
# makes the bundle non-distributable.
symlink = 0
# Verbosity level.
verbosity = 1
# Destination root directory
destroot = ""
def setup(self):
# XXX rethink self.name munging, this is brittle.
self.name, ext = os.path.splitext(self.name)
if not ext:
ext = ".bundle"
bundleextension = ext
# misc (derived) attributes
self.bundlepath = pathjoin(self.builddir, self.name + bundleextension)
plist = self.plist
plist.CFBundleName = self.name
plist.CFBundlePackageType = self.type
if self.creator is None:
if hasattr(plist, "CFBundleSignature"):
self.creator = plist.CFBundleSignature
else:
self.creator = "????"
plist.CFBundleSignature = self.creator
if self.bundle_id:
plist.CFBundleIdentifier = self.bundle_id
elif not hasattr(plist, "CFBundleIdentifier"):
plist.CFBundleIdentifier = self.name
def build(self):
"""Build the bundle."""
builddir = self.builddir
if builddir and not os.path.exists(builddir):
os.mkdir(builddir)
self.message("Building %s" % repr(self.bundlepath), 1)
if os.path.exists(self.bundlepath):
shutil.rmtree(self.bundlepath)
if os.path.exists(self.bundlepath + '~'):
shutil.rmtree(self.bundlepath + '~')
bp = self.bundlepath
# Create the app bundle in a temporary location and then
# rename the completed bundle. This way the Finder will
# never see an incomplete bundle (where it might pick up
# and cache the wrong meta data)
self.bundlepath = bp + '~'
try:
os.mkdir(self.bundlepath)
self.preProcess()
self._copyFiles()
self._addMetaFiles()
self.postProcess()
os.rename(self.bundlepath, bp)
finally:
self.bundlepath = bp
self.message("Done.", 1)
def preProcess(self):
"""Hook for subclasses."""
pass
def postProcess(self):
"""Hook for subclasses."""
pass
def _addMetaFiles(self):
contents = pathjoin(self.bundlepath, "Contents")
makedirs(contents)
#
# Write Contents/PkgInfo
assert len(self.type) == len(self.creator) == 4, \
"type and creator must be 4-byte strings."
pkginfo = pathjoin(contents, "PkgInfo")
f = open(pkginfo, "wb")
f.write(self.type + self.creator)
f.close()
#
# Write Contents/Info.plist
infoplist = pathjoin(contents, "Info.plist")
self.plist.write(infoplist)
def _copyFiles(self):
files = self.files[:]
for path in self.resources:
files.append((path, pathjoin("Contents", "Resources",
os.path.basename(path))))
for path in self.libs:
files.append((path, pathjoin("Contents", "Frameworks",
os.path.basename(path))))
if self.symlink:
self.message("Making symbolic links", 1)
msg = "Making symlink from"
else:
self.message("Copying files", 1)
msg = "Copying"
files.sort()
for src, dst in files:
if os.path.isdir(src):
self.message("%s %s/ to %s/" % (msg, src, dst), 2)
else:
self.message("%s %s to %s" % (msg, src, dst), 2)
dst = pathjoin(self.bundlepath, dst)
if self.symlink:
symlink(src, dst, mkdirs=1)
else:
copy(src, dst, mkdirs=1)
def message(self, msg, level=0):
if level <= self.verbosity:
indent = ""
if level > 1:
indent = (level - 1) * " "
sys.stderr.write(indent + msg + "\n")
def report(self):
# XXX something decent
pass
if __debug__:
PYC_EXT = ".pyc"
else:
PYC_EXT = ".pyo"
MAGIC = imp.get_magic()
USE_ZIPIMPORT = "zipimport" in sys.builtin_module_names
# For standalone apps, we have our own minimal site.py. We don't need
# all the cruft of the real site.py.
SITE_PY = """\
import sys
if not %(semi_standalone)s:
del sys.path[1:] # sys.path[0] is Contents/Resources/
"""
if USE_ZIPIMPORT:
ZIP_ARCHIVE = "Modules.zip"
SITE_PY += "sys.path.append(sys.path[0] + '/%s')\n" % ZIP_ARCHIVE
def getPycData(fullname, code, ispkg):
if ispkg:
fullname += ".__init__"
path = fullname.replace(".", os.sep) + PYC_EXT
return path, MAGIC + '\0\0\0\0' + marshal.dumps(code)
#
# Extension modules can't be in the modules zip archive, so a placeholder
# is added instead, that loads the extension from a specified location.
#
EXT_LOADER = """\
def __load():
import imp, sys, os
for p in sys.path:
path = os.path.join(p, "%(filename)s")
if os.path.exists(path):
break
else:
assert 0, "file not found: %(filename)s"
mod = imp.load_dynamic("%(name)s", path)
__load()
del __load
"""
MAYMISS_MODULES = ['mac', 'os2', 'nt', 'ntpath', 'dos', 'dospath',
'win32api', 'ce', '_winreg', 'nturl2path', 'sitecustomize',
'org.python.core', 'riscos', 'riscosenviron', 'riscospath'
]
STRIP_EXEC = "/usr/bin/strip"
#
# We're using a stock interpreter to run the app, yet we need
# a way to pass the Python main program to the interpreter. The
# bootstrapping script fires up the interpreter with the right
# arguments. os.execve() is used as OSX doesn't like us to
# start a real new process. Also, the executable name must match
# the CFBundleExecutable value in the Info.plist, so we lie
# deliberately with argv[0]. The actual Python executable is
# passed in an environment variable so we can "repair"
# sys.executable later.
#
BOOTSTRAP_SCRIPT = """\
#!%(hashbang)s
import sys, os
execdir = os.path.dirname(sys.argv[0])
executable = os.path.join(execdir, "%(executable)s")
resdir = os.path.join(os.path.dirname(execdir), "Resources")
libdir = os.path.join(os.path.dirname(execdir), "Frameworks")
mainprogram = os.path.join(resdir, "%(mainprogram)s")
sys.argv.insert(1, mainprogram)
if %(standalone)s or %(semi_standalone)s:
os.environ["PYTHONPATH"] = resdir
if %(standalone)s:
os.environ["PYTHONHOME"] = resdir
else:
pypath = os.getenv("PYTHONPATH", "")
if pypath:
pypath = ":" + pypath
os.environ["PYTHONPATH"] = resdir + pypath
os.environ["PYTHONEXECUTABLE"] = executable
os.environ["DYLD_LIBRARY_PATH"] = libdir
os.environ["DYLD_FRAMEWORK_PATH"] = libdir
os.execve(executable, sys.argv, os.environ)
"""
#
# Optional wrapper that converts "dropped files" into sys.argv values.
#
ARGV_EMULATOR = """\
import argvemulator, os
argvemulator.ArgvCollector().mainloop()
execfile(os.path.join(os.path.split(__file__)[0], "%(realmainprogram)s"))
"""
#
# When building a standalone app with Python.framework, we need to copy
# a subset from Python.framework to the bundle. The following list
# specifies exactly what items we'll copy.
#
PYTHONFRAMEWORKGOODIES = [
"Python", # the Python core library
"Resources/English.lproj",
"Resources/Info.plist",
]
def isFramework():
return sys.exec_prefix.find("Python.framework") > 0
LIB = os.path.join(sys.prefix, "lib", "python" + sys.version[:3])
SITE_PACKAGES = os.path.join(LIB, "site-packages")
class AppBuilder(BundleBuilder):
# Override type of the bundle.
type = "APPL"
# platform, name of the subfolder of Contents that contains the executable.
platform = "MacOS"
# A Python main program. If this argument is given, the main
# executable in the bundle will be a small wrapper that invokes
# the main program. (XXX Discuss why.)
mainprogram = None
# The main executable. If a Python main program is specified
# the executable will be copied to Resources and be invoked
# by the wrapper program mentioned above. Otherwise it will
# simply be used as the main executable.
executable = None
# The name of the main nib, for Cocoa apps. *Must* be specified
# when building a Cocoa app.
nibname = None
# The name of the icon file to be copied to Resources and used for
# the Finder icon.
iconfile = None
# Symlink the executable instead of copying it.
symlink_exec = 0
# If True, build standalone app.
standalone = 0
# If True, build semi-standalone app (only includes third-party modules).
semi_standalone = 0
# If set, use this for #! lines in stead of sys.executable
python = None
# If True, add a real main program that emulates sys.argv before calling
# mainprogram
argv_emulation = 0
# The following attributes are only used when building a standalone app.
# Exclude these modules.
excludeModules = []
# Include these modules.
includeModules = []
# Include these packages.
includePackages = []
# Strip binaries from debug info.
strip = 0
# Found Python modules: [(name, codeobject, ispkg), ...]
pymodules = []
# Modules that modulefinder couldn't find:
missingModules = []
maybeMissingModules = []
def setup(self):
if ((self.standalone or self.semi_standalone)
and self.mainprogram is None):
raise BundleBuilderError, ("must specify 'mainprogram' when "
"building a standalone application.")
if self.mainprogram is None and self.executable is None:
raise BundleBuilderError, ("must specify either or both of "
"'executable' and 'mainprogram'")
self.execdir = pathjoin("Contents", self.platform)
if self.name is not None:
pass
elif self.mainprogram is not None:
self.name = os.path.splitext(os.path.basename(self.mainprogram))[0]
elif executable is not None:
self.name = os.path.splitext(os.path.basename(self.executable))[0]
if self.name[-4:] != ".app":
self.name += ".app"
if self.executable is None:
if not self.standalone and not isFramework():
self.symlink_exec = 1
if self.python:
self.executable = self.python
else:
self.executable = sys.executable
if self.nibname:
self.plist.NSMainNibFile = self.nibname
if not hasattr(self.plist, "NSPrincipalClass"):
self.plist.NSPrincipalClass = "NSApplication"
if self.standalone and isFramework():
self.addPythonFramework()
BundleBuilder.setup(self)
self.plist.CFBundleExecutable = self.name
if self.standalone or self.semi_standalone:
self.findDependencies()
def preProcess(self):
resdir = "Contents/Resources"
if self.executable is not None:
if self.mainprogram is None:
execname = self.name
else:
execname = os.path.basename(self.executable)
execpath = pathjoin(self.execdir, execname)
if not self.symlink_exec:
self.files.append((self.destroot + self.executable, execpath))
self.execpath = execpath
if self.mainprogram is not None:
mainprogram = os.path.basename(self.mainprogram)
self.files.append((self.mainprogram, pathjoin(resdir, mainprogram)))
if self.argv_emulation:
# Change the main program, and create the helper main program (which
# does argv collection and then calls the real main).
# Also update the included modules (if we're creating a standalone
# program) and the plist
realmainprogram = mainprogram
mainprogram = '__argvemulator_' + mainprogram
resdirpath = pathjoin(self.bundlepath, resdir)
mainprogrampath = pathjoin(resdirpath, mainprogram)
makedirs(resdirpath)
open(mainprogrampath, "w").write(ARGV_EMULATOR % locals())
if self.standalone or self.semi_standalone:
self.includeModules.append("argvemulator")
self.includeModules.append("os")
if not self.plist.has_key("CFBundleDocumentTypes"):
self.plist["CFBundleDocumentTypes"] = [
{ "CFBundleTypeOSTypes" : [
"****",
"fold",
"disk"],
"CFBundleTypeRole": "Viewer"}]
# Write bootstrap script
executable = os.path.basename(self.executable)
execdir = pathjoin(self.bundlepath, self.execdir)
bootstrappath = pathjoin(execdir, self.name)
makedirs(execdir)
if self.standalone or self.semi_standalone:
# XXX we're screwed when the end user has deleted
# /usr/bin/python
hashbang = "/usr/bin/python"
elif self.python:
hashbang = self.python
else:
hashbang = os.path.realpath(sys.executable)
standalone = self.standalone
semi_standalone = self.semi_standalone
open(bootstrappath, "w").write(BOOTSTRAP_SCRIPT % locals())
os.chmod(bootstrappath, 0775)
if self.iconfile is not None:
iconbase = os.path.basename(self.iconfile)
self.plist.CFBundleIconFile = iconbase
self.files.append((self.iconfile, pathjoin(resdir, iconbase)))
def postProcess(self):
if self.standalone or self.semi_standalone:
self.addPythonModules()
if self.strip and not self.symlink:
self.stripBinaries()
if self.symlink_exec and self.executable:
self.message("Symlinking executable %s to %s" % (self.executable,
self.execpath), 2)
dst = pathjoin(self.bundlepath, self.execpath)
makedirs(os.path.dirname(dst))
os.symlink(os.path.abspath(self.executable), dst)
if self.missingModules or self.maybeMissingModules:
self.reportMissing()
def addPythonFramework(self):
# If we're building a standalone app with Python.framework,
# include a minimal subset of Python.framework, *unless*
# Python.framework was specified manually in self.libs.
for lib in self.libs:
if os.path.basename(lib) == "Python.framework":
# a Python.framework was specified as a library
return
frameworkpath = sys.exec_prefix[:sys.exec_prefix.find(
"Python.framework") + len("Python.framework")]
version = sys.version[:3]
frameworkpath = pathjoin(frameworkpath, "Versions", version)
destbase = pathjoin("Contents", "Frameworks", "Python.framework",
"Versions", version)
for item in PYTHONFRAMEWORKGOODIES:
src = pathjoin(frameworkpath, item)
dst = pathjoin(destbase, item)
self.files.append((src, dst))
def _getSiteCode(self):
return compile(SITE_PY % {"semi_standalone": self.semi_standalone},
"<-bundlebuilder.py->", "exec")
def addPythonModules(self):
self.message("Adding Python modules", 1)
if USE_ZIPIMPORT:
# Create a zip file containing all modules as pyc.
import zipfile
relpath = pathjoin("Contents", "Resources", ZIP_ARCHIVE)
abspath = pathjoin(self.bundlepath, relpath)
zf = zipfile.ZipFile(abspath, "w", zipfile.ZIP_DEFLATED)
for name, code, ispkg in self.pymodules:
self.message("Adding Python module %s" % name, 2)
path, pyc = getPycData(name, code, ispkg)
zf.writestr(path, pyc)
zf.close()
# add site.pyc
sitepath = pathjoin(self.bundlepath, "Contents", "Resources",
"site" + PYC_EXT)
writePyc(self._getSiteCode(), sitepath)
else:
# Create individual .pyc files.
for name, code, ispkg in self.pymodules:
if ispkg:
name += ".__init__"
path = name.split(".")
path = pathjoin("Contents", "Resources", *path) + PYC_EXT
if ispkg:
self.message("Adding Python package %s" % path, 2)
else:
self.message("Adding Python module %s" % path, 2)
abspath = pathjoin(self.bundlepath, path)
makedirs(os.path.dirname(abspath))
writePyc(code, abspath)
def stripBinaries(self):
if not os.path.exists(STRIP_EXEC):
self.message("Error: can't strip binaries: no strip program at "
"%s" % STRIP_EXEC, 0)
else:
import stat
self.message("Stripping binaries", 1)
def walk(top):
for name in os.listdir(top):
path = pathjoin(top, name)
if os.path.islink(path):
continue
if os.path.isdir(path):
walk(path)
else:
mod = os.stat(path)[stat.ST_MODE]
if not (mod & 0100):
continue
relpath = path[len(self.bundlepath):]
self.message("Stripping %s" % relpath, 2)
inf, outf = os.popen4("%s -S \"%s\"" %
(STRIP_EXEC, path))
output = outf.read().strip()
if output:
# usually not a real problem, like when we're
# trying to strip a script
self.message("Problem stripping %s:" % relpath, 3)
self.message(output, 3)
walk(self.bundlepath)
def findDependencies(self):
self.message("Finding module dependencies", 1)
import modulefinder
mf = modulefinder.ModuleFinder(excludes=self.excludeModules)
if USE_ZIPIMPORT:
# zipimport imports zlib, must add it manually
mf.import_hook("zlib")
# manually add our own site.py
site = mf.add_module("site")
site.__code__ = self._getSiteCode()
mf.scan_code(site.__code__, site)
# warnings.py gets imported implicitly from C
mf.import_hook("warnings")
includeModules = self.includeModules[:]
for name in self.includePackages:
includeModules.extend(findPackageContents(name).keys())
for name in includeModules:
try:
mf.import_hook(name)
except ImportError:
self.missingModules.append(name)
mf.run_script(self.mainprogram)
modules = mf.modules.items()
modules.sort()
for name, mod in modules:
path = mod.__file__
if path and self.semi_standalone:
# skip the standard library
if path.startswith(LIB) and not path.startswith(SITE_PACKAGES):
continue
if path and mod.__code__ is None:
# C extension
filename = os.path.basename(path)
pathitems = name.split(".")[:-1] + [filename]
dstpath = pathjoin(*pathitems)
if USE_ZIPIMPORT:
if name != "zlib":
# neatly pack all extension modules in a subdirectory,
# except zlib, since it's necessary for bootstrapping.
dstpath = pathjoin("ExtensionModules", dstpath)
# Python modules are stored in a Zip archive, but put
# extensions in Contents/Resources/. Add a tiny "loader"
# program in the Zip archive. Due to Thomas Heller.
source = EXT_LOADER % {"name": name, "filename": dstpath}
code = compile(source, "<dynloader for %s>" % name, "exec")
mod.__code__ = code
self.files.append((path, pathjoin("Contents", "Resources", dstpath)))
if mod.__code__ is not None:
ispkg = mod.__path__ is not None
if not USE_ZIPIMPORT or name != "site":
# Our site.py is doing the bootstrapping, so we must
# include a real .pyc file if USE_ZIPIMPORT is True.
self.pymodules.append((name, mod.__code__, ispkg))
if hasattr(mf, "any_missing_maybe"):
missing, maybe = mf.any_missing_maybe()
else:
missing = mf.any_missing()
maybe = []
self.missingModules.extend(missing)
self.maybeMissingModules.extend(maybe)
def reportMissing(self):
missing = [name for name in self.missingModules
if name not in MAYMISS_MODULES]
if self.maybeMissingModules:
maybe = self.maybeMissingModules
else:
maybe = [name for name in missing if "." in name]
missing = [name for name in missing if "." not in name]
missing.sort()
maybe.sort()
if maybe:
self.message("Warning: couldn't find the following submodules:", 1)
self.message(" (Note that these could be false alarms -- "
"it's not always", 1)
self.message(" possible to distinguish between \"from package "
"import submodule\" ", 1)
self.message(" and \"from package import name\")", 1)
for name in maybe:
self.message(" ? " + name, 1)
if missing:
self.message("Warning: couldn't find the following modules:", 1)
for name in missing:
self.message(" ? " + name, 1)
def report(self):
# XXX something decent
import pprint
pprint.pprint(self.__dict__)
if self.standalone or self.semi_standalone:
self.reportMissing()
#
# Utilities.
#
SUFFIXES = [_suf for _suf, _mode, _tp in imp.get_suffixes()]
identifierRE = re.compile(r"[_a-zA-z][_a-zA-Z0-9]*$")
def findPackageContents(name, searchpath=None):
head = name.split(".")[-1]
if identifierRE.match(head) is None:
return {}
try:
fp, path, (ext, mode, tp) = imp.find_module(head, searchpath)
except ImportError:
return {}
modules = {name: None}
if tp == imp.PKG_DIRECTORY and path:
files = os.listdir(path)
for sub in files:
sub, ext = os.path.splitext(sub)
fullname = name + "." + sub
if sub != "__init__" and fullname not in modules:
modules.update(findPackageContents(fullname, [path]))
return modules
def writePyc(code, path):
f = open(path, "wb")
f.write(MAGIC)
f.write("\0" * 4) # don't bother about a time stamp
marshal.dump(code, f)
f.close()
def copy(src, dst, mkdirs=0):
"""Copy a file or a directory."""
if mkdirs:
makedirs(os.path.dirname(dst))
if os.path.isdir(src):
shutil.copytree(src, dst, symlinks=1)
else:
shutil.copy2(src, dst)
def copytodir(src, dstdir):
"""Copy a file or a directory to an existing directory."""
dst = pathjoin(dstdir, os.path.basename(src))
copy(src, dst)
def makedirs(dir):
"""Make all directories leading up to 'dir' including the leaf
directory. Don't moan if any path element already exists."""
try:
os.makedirs(dir)
except OSError, why:
if why.errno != errno.EEXIST:
raise
def symlink(src, dst, mkdirs=0):
"""Copy a file or a directory."""
if not os.path.exists(src):
raise IOError, "No such file or directory: '%s'" % src
if mkdirs:
makedirs(os.path.dirname(dst))
os.symlink(os.path.abspath(src), dst)
def pathjoin(*args):
"""Safe wrapper for os.path.join: asserts that all but the first
argument are relative paths."""
for seg in args[1:]:
assert seg[0] != "/"
return os.path.join(*args)
cmdline_doc = """\
Usage:
python bundlebuilder.py [options] command
python mybuildscript.py [options] command
Commands:
build build the application
report print a report
Options:
-b, --builddir=DIR the build directory; defaults to "build"
-n, --name=NAME application name
-r, --resource=FILE extra file or folder to be copied to Resources
-f, --file=SRC:DST extra file or folder to be copied into the bundle;
DST must be a path relative to the bundle root
-e, --executable=FILE the executable to be used
-m, --mainprogram=FILE the Python main program
-a, --argv add a wrapper main program to create sys.argv
-p, --plist=FILE .plist file (default: generate one)
--nib=NAME main nib name
-c, --creator=CCCC 4-char creator code (default: '????')
--iconfile=FILE filename of the icon (an .icns file) to be used
as the Finder icon
--bundle-id=ID the CFBundleIdentifier, in reverse-dns format
(eg. org.python.BuildApplet; this is used for
the preferences file name)
-l, --link symlink files/folder instead of copying them
--link-exec symlink the executable instead of copying it
--standalone build a standalone application, which is fully
independent of a Python installation
--semi-standalone build a standalone application, which depends on
an installed Python, yet includes all third-party
modules.
--python=FILE Python to use in #! line in stead of current Python
--lib=FILE shared library or framework to be copied into
the bundle
-x, --exclude=MODULE exclude module (with --(semi-)standalone)
-i, --include=MODULE include module (with --(semi-)standalone)
--package=PACKAGE include a whole package (with --(semi-)standalone)
--strip strip binaries (remove debug info)
-v, --verbose increase verbosity level
-q, --quiet decrease verbosity level
-h, --help print this message
"""
def usage(msg=None):
if msg:
print msg
print cmdline_doc
sys.exit(1)
def main(builder=None):
if builder is None:
builder = AppBuilder(verbosity=1)
shortopts = "b:n:r:f:e:m:c:p:lx:i:hvqa"
longopts = ("builddir=", "name=", "resource=", "file=", "executable=",
"mainprogram=", "creator=", "nib=", "plist=", "link",
"link-exec", "help", "verbose", "quiet", "argv", "standalone",
"exclude=", "include=", "package=", "strip", "iconfile=",
"lib=", "python=", "semi-standalone", "bundle-id=", "destroot=")
try:
options, args = getopt.getopt(sys.argv[1:], shortopts, longopts)
except getopt.error:
usage()
for opt, arg in options:
if opt in ('-b', '--builddir'):
builder.builddir = arg
elif opt in ('-n', '--name'):
builder.name = arg
elif opt in ('-r', '--resource'):
builder.resources.append(os.path.normpath(arg))
elif opt in ('-f', '--file'):
srcdst = arg.split(':')
if len(srcdst) != 2:
usage("-f or --file argument must be two paths, "
"separated by a colon")
builder.files.append(srcdst)
elif opt in ('-e', '--executable'):
builder.executable = arg
elif opt in ('-m', '--mainprogram'):
builder.mainprogram = arg
elif opt in ('-a', '--argv'):
builder.argv_emulation = 1
elif opt in ('-c', '--creator'):
builder.creator = arg
elif opt == '--bundle-id':
builder.bundle_id = arg
elif opt == '--iconfile':
builder.iconfile = arg
elif opt == "--lib":
builder.libs.append(os.path.normpath(arg))
elif opt == "--nib":
builder.nibname = arg
elif opt in ('-p', '--plist'):
builder.plist = Plist.fromFile(arg)
elif opt in ('-l', '--link'):
builder.symlink = 1
elif opt == '--link-exec':
builder.symlink_exec = 1
elif opt in ('-h', '--help'):
usage()
elif opt in ('-v', '--verbose'):
builder.verbosity += 1
elif opt in ('-q', '--quiet'):
builder.verbosity -= 1
elif opt == '--standalone':
builder.standalone = 1
elif opt == '--semi-standalone':
builder.semi_standalone = 1
elif opt == '--python':
builder.python = arg
elif opt in ('-x', '--exclude'):
builder.excludeModules.append(arg)
elif opt in ('-i', '--include'):
builder.includeModules.append(arg)
elif opt == '--package':
builder.includePackages.append(arg)
elif opt == '--strip':
builder.strip = 1
elif opt == '--destroot':
builder.destroot = arg
if len(args) != 1:
usage("Must specify one command ('build', 'report' or 'help')")
command = args[0]
if command == "build":
builder.setup()
builder.build()
elif command == "report":
builder.setup()
builder.report()
elif command == "help":
usage()
else:
usage("Unknown command '%s'" % command)
def buildapp(**kwargs):
builder = AppBuilder(**kwargs)
main(builder)
if __name__ == "__main__":
main()
| 35.685165
| 85
| 0.58531
|
75da46d1e20c7874fe8fb8c645c1cedde099c9eb
| 1,194
|
py
|
Python
|
WeblogicScanLot/poc/Console.py
|
y11en/super-guacamole
|
e15697e58895ed684a8076f95a0d2ab6d43d98d2
|
[
"Apache-2.0"
] | 32
|
2020-07-15T06:22:06.000Z
|
2022-02-15T02:29:36.000Z
|
WeblogicScanLot/poc/Console.py
|
wonderkun/Penetration_Testing_POC
|
563c356a1ca9f50d8ad336aa061149830c00de1a
|
[
"Apache-2.0"
] | 1
|
2021-06-02T02:55:26.000Z
|
2021-06-02T02:55:26.000Z
|
WeblogicScanLot/poc/Console.py
|
wonderkun/Penetration_Testing_POC
|
563c356a1ca9f50d8ad336aa061149830c00de1a
|
[
"Apache-2.0"
] | 8
|
2020-07-15T06:22:25.000Z
|
2022-01-26T03:11:33.000Z
|
#!/usr/bin/env python3
# _*_ coding:utf-8 _*_
'''
____ _ _ _ _ __ __ _
| _ \ __ _| |__ | |__ (_) |_| \/ | __ _ ___| | __
| |_) / _` | '_ \| '_ \| | __| |\/| |/ _` / __| |/ /
| _ < (_| | |_) | |_) | | |_| | | | (_| \__ \ <
|_| \_\__,_|_.__/|_.__/|_|\__|_| |_|\__,_|___/_|\_\
'''
import logging
import sys
import requests
logging.basicConfig(filename='Weblogic.log',
format='%(asctime)s %(message)s',
filemode="w", level=logging.INFO)
headers = {'user-agent': 'ceshi/0.0.1'}
def islive(ur,port):
url='http://' + str(ur)+':'+str(port)+'/console/login/LoginForm.jsp'
r = requests.get(url, headers=headers)
return r.status_code
def run(url,port):
if islive(url,port)==200:
u='http://' + str(url)+':'+str(port)+'/console/login/LoginForm.jsp'
logging.info("[+]{}:{} console address is exposed! The path is: {} Please try weak password blasting!".format(url,port,u))
else:
logging.info('[-]{}:{} console address not found!'.format(url,port))
if __name__=="__main__":
url = sys.argv[1]
port = int(sys.argv[2])
run(url,port)
| 32.27027
| 131
| 0.514238
|
314e5241160a66e3705fc069e2f7b6fafadd3833
| 1,380
|
py
|
Python
|
DQMOffline/CalibTracker/test/SiStripBadComponentsDQMServiceReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
DQMOffline/CalibTracker/test/SiStripBadComponentsDQMServiceReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
DQMOffline/CalibTracker/test/SiStripBadComponentsDQMServiceReader_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
timetype = cms.string('runnumber'),
firstRun = cms.untracked.uint32(110213),
lastRun = cms.untracked.uint32(110213),
interval = cms.uint32(1)
)
process.MessageLogger = cms.Service(
"MessageLogger",
destinations = cms.untracked.vstring("SiStripBadComponentsDQMServiceReader.log"),
threshold = cms.untracked.string('INFO')
)
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('/afs/cern.ch/cms/DB/conddb')
),
timetype = cms.string('runnumber'),
toGet = cms.VPSet(cms.PSet(
record = cms.string("SiStripBadStripRcd"),
tag = cms.string("SiStripBadStrip_FromOnlineDQM_V2")
)),
connect = cms.string('oracle://cms_orcoff_prod/CMS_COND_31X_STRIP')
)
process.prod = cms.EDAnalyzer("SiStripBadComponentsDQMServiceReader",
printDebug = cms.untracked.bool(True)
)
#process.print = cms.OutputModule("AsciiOutputModule")
process.p = cms.Path(process.prod)
#process.ep = cms.EndPath(process.print)
| 31.363636
| 85
| 0.707246
|
4d499c79b063c239d667bdb45a5357bfc6bd04e2
| 4,298
|
py
|
Python
|
share/seeds/generate-seeds.py
|
xuyangcn/opalcoin
|
aafb557b1d80fbe29143c360cb138b83cea2275e
|
[
"MIT"
] | null | null | null |
share/seeds/generate-seeds.py
|
xuyangcn/opalcoin
|
aafb557b1d80fbe29143c360cb138b83cea2275e
|
[
"MIT"
] | null | null | null |
share/seeds/generate-seeds.py
|
xuyangcn/opalcoin
|
aafb557b1d80fbe29143c360cb138b83cea2275e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the opalcoin network\n')
g.write(' * AUTOGENERATED by share/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9999)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19999)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| 31.602941
| 98
| 0.585854
|
d94111cc6d5d7da4747a2bfade87491146098588
| 12,408
|
py
|
Python
|
frasco_models/__init__.py
|
frascoweb/frasco-models
|
f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba
|
[
"MIT"
] | 1
|
2015-09-24T10:01:03.000Z
|
2015-09-24T10:01:03.000Z
|
frasco_models/__init__.py
|
frascoweb/frasco-models
|
f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba
|
[
"MIT"
] | null | null | null |
frasco_models/__init__.py
|
frascoweb/frasco-models
|
f7c1e14424cadf3dc07c2bd81cc32b0fd046ccba
|
[
"MIT"
] | null | null | null |
from frasco import Feature, action, current_app, request, abort, listens_to, current_context
from frasco.utils import (AttrDict, import_string, populate_obj, RequirementMissingError,\
find_classes_in_module, slugify)
from frasco.expression import compile_expr, eval_expr
from frasco.templating import FileLoader, FileSystemLoader
from werkzeug.local import LocalProxy
from .backend import *
from .utils import *
from .query import *
from .transaction import *
import inspect
import os
import inflection
_db = None
def get_current_db():
return _db
db = LocalProxy(get_current_db)
form_imported = False
try:
from .form import *
form_imported = True
except ImportError:
pass
class ModelsFeature(Feature):
name = "models"
defaults = {"backend": None,
"pagination_per_page": 10,
"scopes": {},
"import_models": True,
"ensure_schema": True,
"admin_models": []}
def init_app(self, app):
if not self.options["backend"]:
raise Exception("Missing backend")
self.backend_cls = self.get_backend_class(self.options["backend"])
self.backend = self.backend_cls(app, self.options)
self.scopes = compile_expr(self.options["scopes"])
self.models = {}
self.delayed_tx_calls = delayed_tx_calls
global _db
self.db = _db = self.backend.db
if self.options["import_models"]:
models_pkg = self.options['import_models']
if not isinstance(self.options['import_models'], str):
models_pkg = "models"
if app.import_name != "__main__":
models_pkg = app.import_name + "." + models_pkg
try:
__import__(models_pkg)
except ImportError as e:
if "No module named %s" % models_pkg.split('.')[-1] not in e.message:
raise
if form_imported:
app.jinja_env.loader.bottom_loaders.append(FileLoader(
os.path.join(os.path.dirname(__file__), "form_template.html"), "model_form_template.html"))
app.jinja_env.loader.bottom_loaders.append(FileLoader(
os.path.join(os.path.dirname(__file__), "bs_form_template.html"), "model_bs_form_template.html"))
def init_admin(self, admin, app):
from .admin import create_model_admin_blueprint
app.jinja_env.loader.bottom_loaders.append(FileSystemLoader(
os.path.join(os.path.dirname(__file__), "admin/templates")))
for model in self.options['admin_models']:
kwargs = {}
if isinstance(model, dict):
model, kwargs = model.items()[0]
model = self.ensure_model(model)
with_counter = kwargs.pop('with_counter', False)
counter_filters = kwargs.pop('counter_filters', {})
title = inflection.pluralize(inflection.humanize(model.__name__))
kwargs.setdefault('title', title)
kwargs.setdefault('menu', title)
name = inflection.pluralize(inflection.underscore(model.__name__))
admin.register_blueprint(create_model_admin_blueprint(name, __name__, model, **kwargs))
if with_counter:
admin.register_dashboard_counter(title,
lambda: self.query(model).filter(**counter_filters).count(),
icon=kwargs.get('icon'))
def get_backend_class(self, name):
try:
backend_cls = import_string("frasco_models.backends.%s" % name)
except ImportError:
backend_cls = import_string(name)
if inspect.ismodule(backend_cls):
# Gives the possibility to reference a module and auto-discover the Backend class
classes = find_classes_in_module(backend_cls, (Backend,))
if not classes:
raise ImportError("Cannot find a Backend class in module '%s'" % name)
if len(classes) > 1:
raise ImportError("Model backend '%s' references a module with multiple backends" % name)
backend_cls = classes[0]
elif not issubclass(backend_cls, Backend):
raise ImportError("Class '%s' is not a subclass of Backend" % name)
return backend_cls
def require_backend(self, name):
if self.backend.name != name:
raise RequirementMissingError("A models backend named '%s' is required but '%s' is used" % (name, self.backend.name))
def ensure_model(self, model_name, **fields):
if inspect.isclass(model_name):
model_name = model_name.__name__
if model_name not in self.models:
self.models[model_name] = self.backend.ensure_model(model_name)
if fields and self.options['ensure_schema']:
for k, v in fields.iteritems():
if not isinstance(v, dict):
fields[k] = dict(type=v)
self.backend.ensure_schema(model_name, fields)
return self.models[model_name]
def __getitem__(self, name):
return self.ensure_model(name)
def __setitem__(self, name, model):
self.models[name] = model
def __contains__(self, name):
return name in self.models
def query(self, model):
return Query(self.ensure_model(model), self.backend)
def transaction(self, *args, **kwargs):
return transaction(*args, **kwargs)
def scoped_query(self, model, scope=None):
q = self.query(model)
if "model_scopes" in current_context.data:
q = q.filter(**current_context.data.model_scopes.get(model.__name__, {}))
if scope:
scopes = scope if isinstance(scope, list) else list([scope])
for s in scopes:
if s not in self.scopes:
raise QueryError("Missing model scope '%s'" % s)
q = q.filter(**eval_expr(self.scopes[s], current_context.vars))
return q
@action("build_model_query")
def build_query(self, model, scope=None, filter_from=None, search_query=None, search_query_default_field=None,
order_by=None, limit=None, offset=None, **kwargs):
q = self.scoped_query(model, scope)
filters = {}
if filter_from == "form":
filters.update(dict([(f.name, f.data) for f in current_context.data.form]))
elif filter_from == "url":
filters.update(dict([(k, v) for k, v in request.values.items()]))
elif filter_from == "args":
filters.update(dict([(k, v) for k, v in request.view_args.items()]))
if 'filters_or' in kwargs:
q = q.filter(or_(*kwargs.pop('filters_or')))
filters.update(kwargs.get("filters", kwargs))
if filters:
q = q.filter(**filters)
if search_query:
q = q.filter(*parse_search_query(search_query, search_query_default_field))
if order_by:
q = q.order_by(order_by)
if limit:
q = q.limit(limit)
if offset:
q = q.offset(offset)
return q
@action("paginate_query")
def paginate(self, query, page=None, per_page=None, check_bounds=True):
if page is None:
page = int(page or request.values.get("page", 1))
if per_page is None:
per_page = self.options["pagination_per_page"]
total = query.order_by(None).offset(None).limit(None).count()
pagination = Pagination(page, per_page, total)
if check_bounds and pagination.nb_pages > 0 and (page < 1 or page > pagination.nb_pages):
raise PageOutOfBoundError()
return query.offset(pagination.offset).limit(per_page), pagination
@action("find_model")
def find_first(self, model, not_found_404=True, **query):
model = self.ensure_model(model)
obj = self.build_query(model, **query).first()
if obj is None and not_found_404:
abort(404)
if not self.find_first.as_:
self.find_first.as_ = as_single_model(model)
current_context.data.model = obj
return obj
@action("find_models", default_option="model")
def find_all(self, model, paginate=False, page=None, pagination_var="pagination", **query):
model = self.ensure_model(model)
q = self.build_query(model, **query)
if paginate:
per_page = paginate if not isinstance(paginate, bool) else None
try:
q, pagination = self.paginate(q, page, per_page)
except PageOutOfBoundError:
abort(404)
current_context.vars[pagination_var] = pagination
if not self.find_all.as_:
self.find_all.as_ = as_many_models(model)
current_context.data.models = q
return q
@action("count_models", default_option="model")
def count(self, model, **query):
model = self.ensure_model(model)
count = self.build_query(model, **query).count()
if not self.count.as_:
self.count.as_ = "%s_count" % as_single_model(model)
return count
@action("create_model", default_option="model")
def create(self, model, **attrs):
obj = self.ensure_model(model)(**clean_kwargs_proxy(attrs))
if not self.create.as_:
self.create.as_ = as_single_model(obj.__class__)
return obj
@action("save_model", default_option="obj")
@as_transaction
def save(self, obj=None, model=None, **attrs):
auto_assign = False
obj = clean_proxy(obj)
if obj is None:
obj = self.ensure_model(model)()
auto_assign = True
if attrs:
populate_obj(obj, clean_kwargs_proxy(attrs))
self.backend.add(obj)
if not self.save.as_ and auto_assign:
self.save.as_ = as_single_model(obj.__class__)
return obj
@action("create_model_from_form", default_option="model", requires=["form"])
def create_from_form(self, model, form=None, **attrs):
form = form or current_context.data.form
obj = self.ensure_model(model)()
form.populate_obj(obj)
populate_obj(obj, clean_kwargs_proxy(attrs))
if not self.create_from_form.as_:
self.create_from_form.as_ = as_single_model(obj.__class__)
return obj
@action("save_form_model", default_option="model", requires=["form"])
@as_transaction
def save_from_form(self, obj=None, model=None, form=None, **attrs):
form = form or current_context.data.form
obj = clean_proxy(obj)
auto_assign = False
if obj is None:
if isinstance(model, str):
obj = self.ensure_model(model)()
auto_assign = True
else:
obj = model()
form.populate_obj(obj)
populate_obj(obj, clean_kwargs_proxy(attrs))
self.backend.add(obj)
if not self.save_from_form.as_ and auto_assign:
self.save_from_form.as_ = as_single_model(obj.__class__)
return obj
@action("delete_model", default_option="obj")
@as_transaction
def delete(self, obj):
self.backend.remove(obj)
@action("create_form_from_model", default_option="model", requires=["form"])
def create_form_from_model(self, model, **kwargs):
return create_form_from_model(model, **kwargs)
@action("check_model_not_exists")
def check_not_exists(self, model, error_message=None, **query):
q = self.build_query(model, **query)
if q.count() > 0:
if error_message:
flash(error_message, "error")
current_context.exit(trigger_action_group="model_exists")
@action("define_model_scope")
def define_scope(self, model, **filters):
current_context.data.setdefault("model_scopes", {})
current_context.data.model_scopes.setdefault(model, {})
current_context.data.model_scopes[model].update(filters.get('filters', filters))
@action(as_="slug")
def create_unique_slug(self, value, model, column="slug", **kwargs):
slug = slugify(value)
return ensure_unique_value(model, column, slug, **kwargs)
def save_model(model):
current_app.features.models.backend.add(model)
def delete_model(model):
current_app.features.models.backend.remove(model)
| 39.018868
| 129
| 0.624758
|
f944e870aeffc54834123f04eb59470a6b6f0d45
| 4,065
|
py
|
Python
|
selfdrive/version.py
|
GratefulJinx77/comma
|
f16e30a44ff5026f1aee502f44f525db2de31d5b
|
[
"MIT"
] | 1
|
2020-12-28T01:30:27.000Z
|
2020-12-28T01:30:27.000Z
|
selfdrive/version.py
|
GratefulJinx77/comma
|
f16e30a44ff5026f1aee502f44f525db2de31d5b
|
[
"MIT"
] | 16
|
2022-01-02T01:38:29.000Z
|
2022-03-30T13:58:33.000Z
|
selfdrive/version.py
|
GratefulJinx77/comma
|
f16e30a44ff5026f1aee502f44f525db2de31d5b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import subprocess
from typing import List, Optional
from functools import lru_cache
from common.basedir import BASEDIR
from selfdrive.swaglog import cloudlog
TESTED_BRANCHES = ['devel', 'release3-staging', 'dashcam3-staging', 'release3', 'dashcam3']
training_version: bytes = b"0.2.0"
terms_version: bytes = b"2"
def cache(user_function, /):
return lru_cache(maxsize=None)(user_function)
def run_cmd(cmd: List[str]) -> str:
return subprocess.check_output(cmd, encoding='utf8').strip()
def run_cmd_default(cmd: List[str], default: Optional[str] = None) -> Optional[str]:
try:
return run_cmd(cmd)
except subprocess.CalledProcessError:
return default
@cache
def get_commit(branch: str = "HEAD", default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", branch], default=default)
@cache
def get_short_branch(default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "HEAD"], default=default)
@cache
def get_branch(default: Optional[str] = None) -> Optional[str]:
return run_cmd_default(["git", "rev-parse", "--abbrev-ref", "--symbolic-full-name", "@{u}"], default=default)
@cache
def get_origin(default: Optional[str] = None) -> Optional[str]:
try:
local_branch = run_cmd(["git", "name-rev", "--name-only", "HEAD"])
tracking_remote = run_cmd(["git", "config", "branch." + local_branch + ".remote"])
return run_cmd(["git", "config", "remote." + tracking_remote + ".url"])
except subprocess.CalledProcessError: # Not on a branch, fallback
return run_cmd_default(["git", "config", "--get", "remote.origin.url"], default=default)
@cache
def get_normalized_origin(default: Optional[str] = None) -> Optional[str]:
origin: Optional[str] = get_origin()
if origin is None:
return default
return origin.replace("git@", "", 1) \
.replace(".git", "", 1) \
.replace("https://", "", 1) \
.replace(":", "/", 1)
@cache
def get_version() -> str:
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), "common", "version.h")) as _versionf:
version = _versionf.read().split('"')[1]
return version
@cache
def get_short_version() -> str:
return get_version().split('-')[0] # type: ignore
@cache
def is_prebuilt() -> bool:
return os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
@cache
def is_comma_remote() -> bool:
# note to fork maintainers, this is used for release metrics. please do not
# touch this to get rid of the orange startup alert. there's better ways to do that
origin: Optional[str] = get_origin()
if origin is None:
return False
return origin.startswith('git@github.com:commaai') or origin.startswith('https://github.com/commaai')
@cache
def is_tested_branch() -> bool:
return get_short_branch() in TESTED_BRANCHES
@cache
def is_dirty() -> bool:
origin = get_origin()
branch = get_branch()
if (origin is None) or (branch is None):
return True
dirty = False
try:
# Actually check dirty files
if not is_prebuilt():
# This is needed otherwise touched files might show up as modified
try:
subprocess.check_call(["git", "update-index", "--refresh"])
except subprocess.CalledProcessError:
pass
dirty = (subprocess.call(["git", "diff-index", "--quiet", branch, "--"]) != 0)
except subprocess.CalledProcessError:
cloudlog.exception("git subprocess failed while checking dirty")
dirty = True
return dirty
if __name__ == "__main__":
from common.params import Params
params = Params()
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
print(f"Dirty: {is_dirty()}")
print(f"Version: {get_version()}")
print(f"Short version: {get_short_version()}")
print(f"Origin: {get_origin()}")
print(f"Normalized origin: {get_normalized_origin()}")
print(f"Branch: {get_branch()}")
print(f"Short branch: {get_short_branch()}")
print(f"Prebuilt: {is_prebuilt()}")
| 29.035714
| 111
| 0.680197
|
4a7699b5f4cd6e22c87166e2a3c86744e1a1aedd
| 3,521
|
py
|
Python
|
mne/decoding/tests/test_csp.py
|
TanayGahlot/mne-python
|
857aa97c201451b82931c5eba50642975afc423d
|
[
"BSD-3-Clause"
] | null | null | null |
mne/decoding/tests/test_csp.py
|
TanayGahlot/mne-python
|
857aa97c201451b82931c5eba50642975afc423d
|
[
"BSD-3-Clause"
] | null | null | null |
mne/decoding/tests/test_csp.py
|
TanayGahlot/mne-python
|
857aa97c201451b82931c5eba50642975afc423d
|
[
"BSD-3-Clause"
] | null | null | null |
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Romain Trachel <romain.trachel@inria.fr>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import assert_array_almost_equal
from mne import io, Epochs, read_events, pick_types
from mne.decoding.csp import CSP
from mne.utils import requires_sklearn
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
start, stop = 0, 8 # if stop is too small pca may fail in some cases, but
# we're okay on this file
def test_csp():
"""Test Common Spatial Patterns algorithm on epochs
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
csp = CSP(n_components=n_components)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).transform(epochs_data),
X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
@requires_sklearn
def test_regularized_csp():
"""Test Common Spatial Patterns algorithm using regularized covariance
"""
raw = io.Raw(raw_fname, preload=False)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs_data = epochs.get_data()
n_channels = epochs_data.shape[1]
n_components = 3
reg_cov = [None, 0.05, 'lws', 'oas']
for reg in reg_cov:
csp = CSP(n_components=n_components, reg=reg)
csp.fit(epochs_data, epochs.events[:, -1])
y = epochs.events[:, -1]
X = csp.fit_transform(epochs_data, y)
assert_true(csp.filters_.shape == (n_channels, n_channels))
assert_true(csp.patterns_.shape == (n_channels, n_channels))
assert_array_almost_equal(csp.fit(epochs_data, y).
transform(epochs_data), X)
# test init exception
assert_raises(ValueError, csp.fit, epochs_data,
np.zeros_like(epochs.events))
assert_raises(ValueError, csp.fit, epochs, y)
assert_raises(ValueError, csp.transform, epochs, y)
csp.n_components = n_components
sources = csp.transform(epochs_data)
assert_true(sources.shape[1] == n_components)
| 36.677083
| 77
| 0.656064
|
7b22bb5e9b6079f2dd4c073596fd4626980bced8
| 2,492
|
py
|
Python
|
EOSS/aws/Service.py
|
seakers/daphne-brain
|
1d703d468cd503a21395f986dd72e67b6e556451
|
[
"MIT"
] | null | null | null |
EOSS/aws/Service.py
|
seakers/daphne-brain
|
1d703d468cd503a21395f986dd72e67b6e556451
|
[
"MIT"
] | null | null | null |
EOSS/aws/Service.py
|
seakers/daphne-brain
|
1d703d468cd503a21395f986dd72e67b6e556451
|
[
"MIT"
] | null | null | null |
import boto3
from EOSS.aws.utils import dev_client, prod_client
from EOSS.aws.utils import eval_subnet, pprint
class Service:
def __init__(self, cluster_arn, dev=False):
if dev:
self.client = dev_client('ecs')
else:
self.client = prod_client('ecs')
self.cluster_arn = str(cluster_arn)
def build_service(self, problem_id, task_definition_arn):
print('\n\n---------- BUILDING NEW SERVICE ----------')
service_arn = self.does_service_exist(problem_id)
if service_arn is not None:
print('--> SERVICE ALREADY EXISTS')
return service_arn
else:
print('--> CREATING SERVICE')
service_name = Service.formulate_service_name(problem_id)
response = self.client.create_service(
cluster=self.cluster_arn,
serviceName=service_name,
taskDefinition=task_definition_arn,
desiredCount=1,
launchType='FARGATE',
networkConfiguration={
'awsvpcConfiguration': {
'subnets': [eval_subnet()],
'assignPublicIp': 'DISABLED'
}
},
schedulingStrategy='REPLICA',
deploymentController={'type': 'ECS'},
tags=[
{'key': 'PROBLEM_ID', 'value': str(problem_id)},
{'key': 'TYPE', 'value': 'EVAL'}
]
)
print('--> CREATE SERVICE RESPONSE', response)
return response['service']['serviceArn']
def does_service_exist(self, problem_id):
service_name = Service.formulate_service_name(problem_id)
service_arns = self.get_cluster_service_arns()
if not service_arns:
return None
response = self.client.describe_services(
cluster=self.cluster_arn,
services=service_arns,
)
for service in response['services']:
if service['serviceName'] == service_name:
return service['serviceArn']
return None
def get_cluster_service_arns(self):
response = self.client.list_services(
cluster=self.cluster_arn,
launchType='FARGATE',
)
return response['serviceArns']
@staticmethod
def formulate_service_name(problem_id):
return 'evaluator-service-' + str(problem_id)
| 32.789474
| 69
| 0.560995
|
a06b0e675447bff2b69e0195c8eeb25b9be068a9
| 11,135
|
py
|
Python
|
tempest/api/compute/admin/test_flavors_negative_xml.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | 3
|
2015-03-03T15:43:06.000Z
|
2016-10-24T06:12:40.000Z
|
tempest/api/compute/admin/test_flavors_negative_xml.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/compute/admin/test_flavors_negative_xml.py
|
rcbops-qe/tempest
|
88960aa32c473b64072671541a136dbae41b1d4c
|
[
"Apache-2.0"
] | 1
|
2018-10-09T06:32:04.000Z
|
2018-10-09T06:32:04.000Z
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute.admin import test_flavors_negative
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
class FlavorsAdminNegativeTestXML(test_flavors_negative.
FlavorsAdminNegativeTestJSON):
"""
Tests Flavors API Create and Delete that require admin privileges
"""
_interface = 'xml'
def flavor_clean_up(self, flavor_id):
resp, body = self.client.delete_flavor(flavor_id)
self.assertEqual(resp.status, 202)
self.client.wait_for_resource_deletion(flavor_id)
@test.attr(type=['negative', 'gate'])
def test_invalid_is_public_string(self):
# the 'is_public' parameter can be 'none/true/false' if it exists
self.assertRaises(exceptions.BadRequest,
self.client.list_flavors_with_detail,
{'is_public': 'invalid'})
@test.attr(type=['negative', 'gate'])
def test_create_flavor_using_invalid_ram(self):
# the 'ram' attribute must be positive integer
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
flavor_name, -1, self.vcpus,
self.disk, new_flavor_id)
@test.attr(type=['negative', 'gate'])
def test_create_flavor_using_invalid_vcpus(self):
# the 'vcpu' attribute must be positive integer
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
flavor_name, self.ram, -1,
self.disk, new_flavor_id)
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_name_length_less_than_1(self):
# ensure name length >= 1
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
'',
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_name_length_exceeds_255(self):
# ensure name do not exceed 255 characters
new_flavor_name = 'a' * 256
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_name(self):
# the regex of flavor_name is '^[\w\.\- ]*$'
invalid_flavor_name = data_utils.rand_name('invalid-!@#$%-')
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
invalid_flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_flavor_id(self):
# the regex of flavor_id is '^[\w\.\- ]*$', and it cannot contain
# leading and/or trailing whitespace
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
invalid_flavor_id = '!@#$%'
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
invalid_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_id_length_exceeds_255(self):
# the length of flavor_id should not exceed 255 characters
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
invalid_flavor_id = 'a' * 256
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
invalid_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_root_gb(self):
# root_gb attribute should be non-negative ( >= 0) integer
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
-1,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_ephemeral_gb(self):
# ephemeral_gb attribute should be non-negative ( >= 0) integer
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=-1,
swap=self.swap,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_swap(self):
# swap attribute should be non-negative ( >= 0) integer
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=-1,
rxtx=self.rxtx,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_rxtx_factor(self):
# rxtx_factor attribute should be a positive float
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=-1.5,
is_public='False')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_with_invalid_is_public(self):
# is_public attribute should be boolean
new_flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.BadRequest,
self.client.create_flavor,
new_flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx,
is_public='Invalid')
@test.attr(type=['negative', 'gate'])
def test_create_flavor_already_exists(self):
flavor_name = data_utils.rand_name(self.flavor_name_prefix)
new_flavor_id = str(uuid.uuid4())
resp, flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx)
self.assertEqual(200, resp.status)
self.addCleanup(self.flavor_clean_up, flavor['id'])
self.assertRaises(exceptions.Conflict,
self.client.create_flavor,
flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
ephemeral=self.ephemeral,
swap=self.swap,
rxtx=self.rxtx)
@test.attr(type=['negative', 'gate'])
def test_delete_nonexistent_flavor(self):
nonexistent_flavor_id = str(uuid.uuid4())
self.assertRaises(exceptions.NotFound,
self.client.delete_flavor,
nonexistent_flavor_id)
| 41.394052
| 78
| 0.523125
|
46c2a8c2ab192d7d6f7dc9d259d4a02f0702b698
| 5,415
|
py
|
Python
|
setup.py
|
anasitomtn/dvc
|
d54aa315e9c5bba18146b339b1076634b43368d3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
anasitomtn/dvc
|
d54aa315e9c5bba18146b339b1076634b43368d3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
anasitomtn/dvc
|
d54aa315e9c5bba18146b339b1076634b43368d3
|
[
"Apache-2.0"
] | null | null | null |
import importlib.util
import os
from pathlib import Path
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py as _build_py
# Prevents pkg_resources import in entry point script,
# see https://github.com/ninjaaron/fast-entry_points.
# This saves about 200 ms on startup time for non-wheel installs.
try:
import fastentrypoints # noqa: F401, pylint: disable=unused-import
except ImportError:
pass # not able to import when installing through pre-commit
# Read package meta-data from version.py
# see https://packaging.python.org/guides/single-sourcing-package-version/
pkg_dir = os.path.dirname(os.path.abspath(__file__))
version_path = os.path.join(pkg_dir, "dvc", "version.py")
spec = importlib.util.spec_from_file_location("dvc.version", version_path)
dvc_version = importlib.util.module_from_spec(spec)
spec.loader.exec_module(dvc_version)
version = dvc_version.__version__ # noqa: F821
# To achieve consistency between the build version and the one provided
# by your package during runtime, you need to **pin** the build version.
#
# This custom class will replace the version.py module with a **static**
# `__version__` that your package can read at runtime, assuring consistency.
#
# References:
# - https://docs.python.org/3.7/distutils/extending.html
# - https://github.com/python/mypy
class build_py(_build_py):
def pin_version(self):
path = os.path.join(self.build_lib, "dvc")
self.mkpath(path)
with open(os.path.join(path, "version.py"), "w") as fobj:
fobj.write("# AUTOGENERATED at build time by setup.py\n")
fobj.write(f'__version__ = "{version}"\n')
def run(self):
self.execute(self.pin_version, ())
_build_py.run(self)
install_requires = [
"ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
"colorama>=0.3.9",
"configobj>=5.0.6",
"gitpython>3",
"dulwich>=0.20.23",
"pygit2>=1.5.0",
"setuptools>=34.0.0",
"nanotime>=0.5.2",
"pyasn1>=0.4.1",
"voluptuous>=0.11.7",
"requests>=2.22.0",
"grandalf==0.6",
"distro>=1.3.0",
"appdirs>=1.4.3",
"ruamel.yaml>=0.17.11",
"toml>=0.10.1",
"funcy>=1.14",
"pathspec>=0.6.0,<0.9.0",
"shortuuid>=0.5.0",
"tqdm>=4.45.0,<5",
"packaging>=19.0",
"zc.lockfile>=1.2.1",
"flufl.lock>=3.2,<4",
"win-unicode-console>=0.5; sys_platform == 'win32'",
"pywin32>=225; sys_platform == 'win32' and python_version < '3.10'",
"networkx>=2.5",
"psutil>=5.8.0",
"pydot>=1.2.4",
"speedcopy>=2.0.1; python_version < '3.8' and sys_platform == 'win32'",
"dataclasses==0.7; python_version < '3.7'",
"importlib-metadata>=1.4; python_version < '3.8'",
"flatten_dict>=0.4.1,<1",
"tabulate>=0.8.7",
"pygtrie>=2.3.2",
"dpath>=2.0.1,<3",
"shtab>=1.3.4,<2",
"rich>=10.0.0",
"dictdiffer>=0.8.1",
"python-benedict>=0.21.1",
"pyparsing==2.4.7",
"typing_extensions>=3.7.4",
"fsspec>=2021.7.0",
"diskcache>=5.2.1",
]
# Extra dependencies for remote integrations
gs = ["gcsfs==2021.7.0"]
gdrive = ["pydrive2[fsspec]>=1.9.1"]
s3 = ["s3fs==2021.8.0", "aiobotocore[boto3]>1.0.1"]
azure = ["adlfs==2021.7.1", "azure-identity>=1.4.0", "knack"]
oss = ["ossfs==2021.7.5"]
ssh = ["sshfs>=2021.7.1"]
hdfs = ["pyarrow>=2.0.0; python_version < '3.10'"]
webhdfs = ["hdfs==2.5.8"]
webdav = ["webdav4>=0.9.0"]
# gssapi should not be included in all_remotes, because it doesn't have wheels
# for linux and mac, so it will fail to compile if user doesn't have all the
# requirements, including kerberos itself. Once all the wheels are available,
# we can start shipping it by default.
ssh_gssapi = ["sshfs[gssapi]>=2021.7.1"]
all_remotes = gs + s3 + azure + ssh + oss + gdrive + hdfs + webhdfs + webdav
terraform = ["python-terraform>=0.10.1", "jinja2>=2.0.0"]
tests_requirements = (
Path("test_requirements.txt").read_text().strip().splitlines()
)
setup(
name="dvc",
version=version,
description="Git for data scientists - manage your code and data together",
long_description=open("README.rst", encoding="UTF-8").read(),
author="Dmitry Petrov",
author_email="dmitry@dvc.org",
download_url="https://github.com/iterative/dvc",
license="Apache License 2.0",
install_requires=install_requires,
extras_require={
"all": all_remotes,
"gs": gs,
"gdrive": gdrive,
"s3": s3,
"azure": azure,
"oss": oss,
"ssh": ssh,
"ssh_gssapi": ssh_gssapi,
"hdfs": hdfs,
"webhdfs": webhdfs,
"webdav": webdav,
"terraform": terraform,
"tests": tests_requirements,
},
keywords="data-science data-version-control machine-learning git"
" developer-tools reproducibility collaboration ai",
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
url="http://dvc.org",
entry_points={"console_scripts": ["dvc = dvc.main:main"]},
cmdclass={"build_py": build_py},
zip_safe=False,
)
| 33.018293
| 79
| 0.640997
|
c0e822a77f246a9436696cdc1da38792088e4989
| 286,198
|
py
|
Python
|
tests/test_backend.py
|
garymm/tensorflow-onnx
|
a8f78ac7903493dee579304b7b1717aa9ec9706f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_backend.py
|
garymm/tensorflow-onnx
|
a8f78ac7903493dee579304b7b1717aa9ec9706f
|
[
"Apache-2.0"
] | null | null | null |
tests/test_backend.py
|
garymm/tensorflow-onnx
|
a8f78ac7903493dee579304b7b1717aa9ec9706f
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-License-Identifier: Apache-2.0
"""Unit tests using onnx backends."""
import os
import unittest
from distutils.version import LooseVersion
from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal
import tensorflow as tf
from tensorflow.python.ops import lookup_ops
from backend_test_base import Tf2OnnxBackendTestBase
# pylint reports unused-wildcard-import which is false positive, __all__ is defined in common
from common import * # pylint: disable=wildcard-import,unused-wildcard-import
from tf2onnx import constants, utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.tf_loader import is_tf2, tf_placeholder_with_default, tf_placeholder
from tf2onnx.onnx_opset.signal import make_dft_constant
# pylint: disable=missing-docstring,invalid-name,unused-argument,function-redefined,cell-var-from-loop
NCHW_TO_NHWC = [0, 2, 3, 1]
NHWC_TO_NCHW = [0, 3, 1, 2]
HWCN_TO_NCHW = [3, 2, 0, 1]
_STRIDE1x1 = [1, 1, 1, 1]
_KERNEL3x3 = [3, 3, 1, 1]
_DILATIONS1x1 = [1, 1, 1, 1]
# names for input and outputs for tests
_TFINPUT = "input"
_INPUT = "input:0"
_TFINPUT1 = "input1"
_INPUT1 = "input1:0"
_TFINPUT2 = "input2"
_INPUT2 = "input2:0"
_TFINPUT3 = "input3"
_INPUT3 = "input3:0"
_TFOUTPUT = "output"
_OUTPUT = "output:0"
_TFOUTPUT1 = "output1"
_OUTPUT1 = "output1:0"
_TFOUTPUT2 = "output2"
_OUTPUT2 = "output2:0"
_TFOUTPUT3 = "output3"
_OUTPUT3 = "output3:0"
if is_tf2():
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
multinomial = tf.compat.v1.random.multinomial
space_to_batch_nd = tf.compat.v1.space_to_batch_nd
batch_to_space_nd = tf.compat.v1.batch_to_space_nd
reverse_v2 = tf.compat.v1.reverse_v2
random_normal = tf.compat.v1.random_normal
random_uniform = tf.compat.v1.random_uniform
fused_batch_norm = tf.compat.v1.nn.fused_batch_norm
dropout = tf.compat.v1.nn.dropout
resize_nearest_neighbor = tf.compat.v1.image.resize_nearest_neighbor
quantize_and_dequantize = tf.quantization.quantize_and_dequantize
resize_bilinear = tf.compat.v1.image.resize_bilinear
resize_bilinear_v2 = tf.compat.v2.image.resize
is_nan = tf.math.is_nan
is_inf = tf.math.is_inf
floormod = tf.math.floormod
matrix_diag_part = tf.compat.v1.matrix_diag_part
fake_quant_with_min_max_args = tf.quantization.fake_quant_with_min_max_args
fake_quant_with_min_max_vars = tf.quantization.fake_quant_with_min_max_vars
elif LooseVersion(tf.__version__) >= "1.13":
conv2d_backprop_input = tf.compat.v1.nn.conv2d_backprop_input
conv3d_transpose = tf.compat.v1.nn.conv3d_transpose
multinomial = tf.compat.v1.random.multinomial
space_to_batch_nd = tf.compat.v1.space_to_batch_nd
batch_to_space_nd = tf.compat.v1.batch_to_space_nd
reverse_v2 = tf.compat.v1.reverse_v2
random_normal = tf.compat.v1.random_normal
random_uniform = tf.compat.v1.random_uniform
fused_batch_norm = tf.compat.v1.nn.fused_batch_norm
dropout = tf.compat.v1.nn.dropout
quantize_and_dequantize = tf.compat.v1.quantization.quantize_and_dequantize
resize_nearest_neighbor = tf.compat.v1.image.resize_nearest_neighbor
resize_bilinear = tf.compat.v1.image.resize_bilinear
if LooseVersion(tf.__version__) >= "1.14":
resize_bilinear_v2 = tf.compat.v2.image.resize
is_nan = tf.math.is_nan
is_inf = tf.math.is_inf
floormod = tf.floormod
matrix_diag_part = tf.compat.v1.matrix_diag_part
fake_quant_with_min_max_args = tf.compat.v1.quantization.fake_quant_with_min_max_args
fake_quant_with_min_max_vars = tf.compat.v1.quantization.fake_quant_with_min_max_vars
else:
conv2d_backprop_input = tf.nn.conv2d_backprop_input
conv3d_transpose = tf.nn.conv3d_transpose
multinomial = tf.multinomial
space_to_batch_nd = tf.space_to_batch_nd
batch_to_space_nd = tf.batch_to_space_nd
reverse_v2 = tf.reverse_v2
random_normal = tf.random_normal
random_uniform = tf.random_uniform
fused_batch_norm = tf.nn.fused_batch_norm
dropout = tf.nn.dropout
resize_nearest_neighbor = tf.image.resize_nearest_neighbor
resize_bilinear = tf.image.resize_bilinear
is_nan = tf.is_nan
is_inf = tf.is_inf
floormod = tf.floormod
matrix_diag_part = tf.matrix_diag_part
def make_xval(shape):
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
return x_val
def get_conv_getdata(kind=1):
if kind == 0:
# generate all combinations (costly)
dims = [
("padding", ["SAME", "VALID"]),
("input_sizes", [[32, 35, 35, 3], [32, 17, 17, 3], [1, 28, 28, 3], [32, 8, 8, 3]]),
("filter_sizes", [[1, 3, 3, 1], [1, 2, 2, 1], [1, 5, 5, 1], [1, 1, 1, 1], [1, 5, 2, 1], [1, 2, 5, 1]]),
("strides", [[1, 2, 2, 1], [1, 1, 1, 1]]),
]
values = [key_values[1] for key_values in dims]
for idx, v in enumerate(product(*values)):
if True or idx == 30:
yield (idx,) + v
elif kind == 1:
# some combination to that give decent padding coverage
data = [
('SAME', [32, 35, 35, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 2, 2, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 1, 1, 1], [1, 1, 1, 1]),
('SAME', [32, 35, 35, 3], [1, 5, 2, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 2, 5, 1], [1, 2, 2, 1]),
('SAME', [32, 35, 35, 3], [1, 2, 5, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [1, 28, 28, 3], [1, 3, 3, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 2, 2, 1], [1, 2, 2, 1]),
('SAME', [1, 28, 28, 3], [1, 2, 2, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 5, 5, 1], [1, 2, 2, 1]),
('SAME', [1, 28, 28, 3], [1, 5, 5, 1], [1, 1, 1, 1]),
('SAME', [1, 28, 28, 3], [1, 5, 2, 1], [1, 2, 2, 1]),
('SAME', [32, 8, 8, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [32, 8, 8, 3], [1, 3, 3, 1], [1, 1, 1, 1]),
('VALID', [32, 35, 35, 3], [1, 3, 3, 1], [1, 1, 1, 1]),
('VALID', [32, 35, 35, 3], [1, 2, 2, 1], [1, 2, 2, 1]),
]
for idx, v in enumerate(data):
yield (idx,) + v
else:
raise ValueError("kind not known")
def get_maxpoolwithargmax_getdata():
data = [
('SAME', [1, 3, 3, 2], [1, 3, 3, 1], [1, 2, 2, 1]),
('SAME', [2, 5, 5, 3], [1, 4, 4, 1], [1, 2, 2, 1]),
('SAME', [2, 10, 5, 1], [1, 2, 2, 1], [1, 2, 2, 1]),
('SAME', [2, 10, 5, 3], [1, 4, 4, 1], [1, 1, 1, 1]),
('VALID', [2, 3, 3, 3], [1, 3, 3, 1], [1, 2, 2, 1]),
('VALID', [2, 5, 5, 3], [1, 4, 4, 1], [1, 2, 2, 1]),
]
for idx, v in enumerate(data):
yield (idx,) + v
class BackendTests(Tf2OnnxBackendTestBase):
def _run_test_case(self, func, output_names_with_port, feed_dict, **kwargs):
kwargs["convert_var_to_const"] = False
return self.run_test_case(func, feed_dict, [], output_names_with_port, **kwargs)
def _test_expand_dims_known_rank(self, idx):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, idx)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_expand_dims_known_rank(self):
for i in [-1, 0, 1, -2]:
self._test_expand_dims_known_rank(i)
def test_expand_dims_one_unknown_rank(self):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, 0)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_expand_dims_with_list(self):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, [[0]])
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def _test_expand_dims_more_unknown_rank(self, idx):
x_val = make_xval([3, 4])
def func(x):
op = tf.expand_dims(x, idx)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_expand_dims_more_unknown_rank(self):
for i in [-1, 0, 1, -2]:
self._test_expand_dims_more_unknown_rank(i)
@check_opset_min_version(13, "Unsqueeze")
def test_expand_dims_nonconst_dims(self):
x_val = make_xval([3, 4])
y_val = np.array([-1], dtype=np.int32)
def func(x, y):
op = tf.expand_dims(x, y)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(9, "ConstantOfShape")
def test_layer_normalization(self):
x_val = make_xval([3, 4, 5])
scale_val = make_xval([3, 4, 5]) * 0.2
bias_val = make_xval([3, 4, 5]) * 0.1
def func(x):
mean = tf.reduce_mean(x, axis=[2], keepdims=True)
centered = tf.subtract(x, mean)
variance = tf.add(tf.reduce_mean(tf.square(centered), axis=[2], keepdims=True), 0.001)
inv_std_dev = tf.math.rsqrt(variance)
normalized = tf.multiply(centered, inv_std_dev)
scaled = tf.multiply(normalized, scale_val)
biased = tf.add(scaled, bias_val)
return tf.identity(biased, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05,
graph_validator=lambda g: (check_op_count(g, "InstanceNormalization", 1)))
@check_opset_min_version(9, "ConstantOfShape")
def test_eye_non_const1(self):
# tf.eye(num_rows), num_rows is not const here
x_val = np.array(5, dtype=np.int32)
def func(x):
y = tf.eye(x, dtype=tf.int32)
y1 = tf.eye(x, dtype=tf.int64)
y2 = tf.eye(x, dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT), tf.identity(y1, name=_TFOUTPUT1), tf.identity(y2, name=_TFOUTPUT2)
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: x_val}, rtol=0)
# tf.eye(num_rows, num_columns), both num_rows and num_columns are not const here
x_val = np.array([5, 10], dtype=np.int32)
def func(x):
y = tf.eye(x[0], x[1], dtype=tf.int32)
y1 = tf.eye(x[0], x[1], dtype=tf.int64)
y2 = tf.eye(x[0], x[1], dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT), tf.identity(y1, name=_TFOUTPUT1), tf.identity(y2, name=_TFOUTPUT2)
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: x_val}, rtol=0)
@check_tf_min_version("1.11", "eye has bug when version is below 1.11")
@check_opset_min_version(9, "ConstantOfShape")
def test_eye_non_const2(self):
# tf.eye(num_rows), num_rows is not const here
for np_dtype in [np.int32, np.int64, np.float32, np.float64]:
x_val = np.array(5, dtype=np_dtype)
def func(x):
y = tf.eye(x, dtype=tf.int32)
y1 = tf.eye(x, dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT),\
tf.identity(y1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val}, rtol=0)
# tf.eye(num_rows, num_columns), both num_rows and num_columns are not const here
for np_dtype in [np.int32, np.int64, np.float32, np.float64]:
x_val = np.array([5, 10], dtype=np_dtype)
def func(x):
y = tf.eye(x[0], x[1], dtype=tf.int32)
y1 = tf.eye(x[0], x[1], dtype=tf.float32)
return tf.identity(y, name=_TFOUTPUT), \
tf.identity(y1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val}, rtol=0)
@check_opset_min_version(7, "trig")
def test_trig_ops(self):
for op in [tf.sin, tf.cos, tf.tan, tf.asin, tf.acos, tf.atan]:
x_val = make_xval([3, 4])
def func(x):
op_ = op(x)
return tf.identity(op_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-06)
@check_opset_min_version(9, "trigh")
def test_atrig_ops(self):
for op in [tf.sinh, tf.cosh, tf.atanh, tf.asinh, tf.acosh]:
x_val = make_xval([3, 4])
def func(x):
op_ = op(x)
return tf.identity(op_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_opset_min_version(7, "multinomial")
def test_multinomial(self):
x_val = np.array([[10., 10.]], dtype=np.float32)
def func(x):
op = multinomial(tf.math.log(x), 5, output_dtype=tf.int32)
return tf.identity(op, name=_TFOUTPUT)
# since returned indexes are random we can only check type and shape
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False,
check_shape=True, check_dtype=True)
@skip_caffe2_backend()
@check_opset_min_version(7, "multinomial")
def test_multinomial1(self):
shape = [2, 10]
x_val = np.ones(np.prod(shape)).astype("float32").reshape(shape)
def func(x):
op = multinomial(x, 2, output_dtype=tf.int32)
return tf.identity(op, name=_TFOUTPUT)
# since returned indexes are random we can only check type and shape
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False,
check_shape=True, check_dtype=True)
def test_maxpool(self):
for p in get_conv_getdata():
_, padding, x_shape, ksize, strides = p
x_val = make_xval(x_shape)
def func(x):
mp = tf.nn.max_pool(x, ksize, strides, padding=padding)
return tf.identity(mp, name=_TFOUTPUT)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.15", "required for max_pool args")
def test_maxpool_int(self):
x_shape = [8, 16, 16, 3]
x_val = make_xval(x_shape).astype("int32")
def func(x):
mp = tf.nn.max_pool(x, ksize=[2], strides=[1, 2, 2, 1], padding="SAME")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tf_cpu("only tf_gpu can run maxpool with NCHW format")
def test_maxpool_gpu(self):
# make sure converter behaves well when data format is NCHW
# and when data format is NCHW, only gpu version of tensorflow can run it.
ksize = [1, 1, 2, 2]
strides = [1, 1, 2, 2]
x_val = make_xval([1, 3, 50, 80])
for padding in ["SAME", "VALID"]:
def func(x):
mp = tf.nn.max_pool(x, ksize, strides, padding=padding, data_format="NCHW")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("AveragePool")
def test_avgpool(self):
for p in get_conv_getdata(kind=0):
_, padding, x_shape, ksize, strides = p
x_val = make_xval(x_shape)
def func(x):
mp = tf.nn.avg_pool(x, ksize, strides, padding=padding)
return tf.identity(mp, name=_TFOUTPUT)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-06)
@check_onnxruntime_incompatibility("AveragePool")
@skip_tf_cpu("only tf_gpu can run avgpool with NCHW format")
def test_avgpool_gpu(self):
ksize = [1, 1, 2, 2]
strides = [1, 1, 2, 2]
x_val = make_xval([1, 3, 50, 80])
for padding in ["SAME", "VALID"]:
def func(x):
mp = tf.nn.avg_pool(x, ksize, strides, padding=padding, data_format="NCHW")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def _conv_test(self, x_val, w, strides=None, padding="VALID", dilations=None, rtol=1e-07):
if strides is None:
strides = _STRIDE1x1
if dilations is None:
dilations = _DILATIONS1x1
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=strides, padding=padding, dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=rtol)
def test_conv2d_1(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_test(x_val, w)
def test_conv2d_2(self):
x_val = np.array([[4, 3, 1, 0],
[2, 1, 0, 1],
[1, 2, 4, 1],
[3, 1, 0, 2]], dtype=np.float32).reshape([1, 4, 4, 1])
w = np.array([[1, 0, 1],
[2, 1, 0],
[0, 0, 1]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_test(x_val, w)
def test_conv2d_3(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_test(x_val, w)
def test_conv2d_4(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample(_KERNEL3x3).astype(np.float32)
self._conv_test(x_val, w, padding="SAME", rtol=1e-05)
def test_conv2d_5(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
kernel_shape = [3, 3, 1, 2]
w = np.random.random_sample(kernel_shape).astype(np.float32)
self._conv_test(x_val, w, padding="SAME", rtol=1e-05)
def test_conv2d_6(self):
x_shape = [1, 35, 35, 288] # out: [1, 17, 17, 384]
kernel_shape = [3, 3, 288, 384]
strides = [1, 2, 2, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="VALID", rtol=1.1e-05)
@check_tf_min_version("1.14", "tf 1.14 needed for explicit padding")
def test_conv2d_explicit_padding(self):
x_shape = [1, 35, 35, 288]
kernel_shape = [3, 3, 288, 384]
pads = [[0, 0], [1, 2], [3, 4], [0, 0]]
strides = [1, 1, 1, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding=pads, rtol=1.1e-05)
def test_conv2d_dilation_same(self):
x_shape = [1, 35, 35, 288] # NHWC
kernel_shape = [3, 3, 288, 384] # [filter_height, filter_width, in_channels, out_channels]
strides = [1, 1, 1, 1] # NHWC
dilations = [1, 3, 1, 1] # NHWC
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="SAME", dilations=dilations, rtol=1.1e-05)
def test_conv2d_dilation_strides_same(self):
x_shape = [1, 35, 35, 288] # NHWC
kernel_shape = [3, 3, 288, 384] # [filter_height, filter_width, in_channels, out_channels]
strides = [1, 2, 4, 1] # NHWC
dilations = [1, 3, 1, 1] # NHWC
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="SAME", dilations=dilations, rtol=1e-05)
def test_conv3d_1(self):
strides = [1, 1, 1, 1, 1]
dilations = [1, 1, 1, 1, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
w = np.random.random_sample([2, 3, 4, 5, 6]).astype(np.float32)
padding = "VALID"
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv3d(x, kernel, strides=strides, padding=padding, data_format="NDHWC", dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_conv3d_2(self):
strides = [1, 2, 3, 1, 1]
dilations = [1, 1, 1, 1, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
w = np.random.random_sample([2, 3, 4, 5, 6]).astype(np.float32)
padding = "VALID"
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv3d(x, kernel, strides=strides, padding=padding, data_format="NDHWC", dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_conv3d_3(self):
strides = [1, 2, 3, 1, 1]
dilations = [1, 1, 1, 1, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
w = np.random.random_sample([2, 3, 4, 5, 6]).astype(np.float32)
padding = "SAME"
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv3d(x, kernel, strides=strides, padding=padding, data_format="NDHWC", dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_avgpool3d(self):
strides = [1, 1, 1, 1, 1]
ksize = [1, 2, 2, 3, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
padding = "VALID"
def func(x):
mp = tf.nn.avg_pool3d(x, ksize, strides, padding=padding, data_format="NDHWC")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_maxpool3d(self):
strides = [1, 1, 1, 1, 1]
ksize = [1, 2, 2, 3, 1]
x_val = np.random.random_sample([2, 10, 9, 8, 5]).astype(np.float32)
padding = "VALID"
def func(x):
mp = tf.nn.max_pool3d(x, ksize, strides, padding=padding, data_format="NDHWC")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14", "tf.nn.avg_pool2d doesn't exist before tf 1.14")
def test_avgpool2d(self):
strides = [1, 1, 1, 1]
ksize = [1, 2, 3, 1]
x_val = make_xval([2, 10, 12, 3])
padding = "VALID"
def func(x):
mp = tf.nn.avg_pool2d(x, ksize, strides, padding=padding, data_format="NHWC")
return tf.identity(mp, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.7", "tf only support dilation is 1 for now")
def test_conv2d_7(self):
x_shape = [1, 35, 35, 288] # out: [1, 17, 17, 384]
kernel_shape = [3, 3, 288, 384]
strides = [1, 2, 2, 1]
dilations = [1, 3, 3, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
self._conv_test(x_val, kernel_val, strides=strides, padding="VALID",
dilations=dilations, rtol=1e-05)
def test_conv2d_8(self):
for input_shape in [[10, 10], [5, 5]]:
x_val = make_xval((1, 1, *input_shape)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample([3, 3, 1, 2]).astype(np.float32)
strides = [1, 2, 2, 1]
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=strides, padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-5)
def test_conv2d_with_pad_valid(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample([3, 3, 1, 2]).astype(np.float32)
strides = [1, 1, 1, 1]
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
x_pad = tf.pad(x, paddings=[[0, 0], [2, 2], [2, 2], [0, 0]])
conv = tf.nn.conv2d(x_pad, kernel, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-5)
def test_conv2d_with_pad_same(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w = np.random.random_sample([3, 3, 1, 2]).astype(np.float32)
strides = [1, 1, 1, 1]
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
x_pad = tf.pad(x, paddings=[[0, 0], [2, 2], [2, 2], [0, 0]])
conv = tf.nn.conv2d(x_pad, kernel, strides=strides, padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-5)
def test_conv2d_transpose(self):
x_shape = [2, 6, 4, 3]
output_shape = [2, 13, 9, 2]
kernel_shape = [3, 3, 2, 3]
strides = [1, 2, 2, 1]
x_val = make_xval(x_shape)
kernel_val = make_xval(kernel_shape)
def func(x):
f = tf.constant(kernel_val, name="kernel", dtype=tf.float32)
conv = tf.nn.conv2d_transpose(x, f, output_shape, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_min_version("0.5.0", "conv transpose is added since onnxruntime-0.5.0")
def test_conv2d_transpose2(self):
# output_shape is dynamic
extra_opset = [utils.make_opsetid(constants.MICROSOFT_DOMAIN, 1)]
process_args = {"extra_opset": extra_opset}
x_shape = [2, 6, 4, 3]
output_shape = np.array([2, 13, 9, 2]).astype(np.int32)
kernel_shape = [3, 3, 2, 3]
strides = [1, 2, 2, 1]
x_val = make_xval(x_shape)
kernel_val = make_xval(kernel_shape)
def func(x, output_shape_placeholder):
f = tf.constant(kernel_val, name="kernel", dtype=tf.float32)
conv = tf.nn.conv2d_transpose(x, f, output_shape_placeholder, strides=strides, padding="VALID")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: output_shape},
rtol=1e-05, process_args=process_args)
def test_depthwiseconv_0(self):
x_shape = [1, 3, 4, 3]
kernel_shape = [3, 3, 3, 3]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.08)
def test_depthwiseconv_1(self):
x_shape = [1, 112, 112, 32]
kernel_shape = [3, 3, 32, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=_STRIDE1x1, padding='VALID')
return tf.identity(conv, name=_TFOUTPUT)
# rtol is a bit high, 2 values have a bit high error. Maybe use different input data.
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.08)
def test_depthwiseconv_3(self):
x_shape = [1, 112, 112, 32]
kernel_shape = [3, 3, 32, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return tf.identity(conv, name=_TFOUTPUT)
# rtol is a bit high, 2 values have a bit high error. Maybe use different input data.
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.01)
def test_depthwiseconv_shared_kernel(self):
x_shape = [1, 3, 4, 3]
kernel_shape = [3, 3, 3, 3]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x, y):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv1 = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
conv2 = tf.nn.depthwise_conv2d(y, kernel, strides=[1, 1, 1, 1], padding='VALID')
conv = tf.add(conv1, conv2)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_val}, rtol=0.08)
@check_tf_min_version("1.14", "tf depthwise_conv2d dilations")
@check_opset_min_version(11, "non-const pads")
def test_depthwiseconv_dilations(self):
x_shape = [1, 32, 32, 1]
kernel_shape = [5, 5, 1, 1]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
kernel_val = np.arange(1, 1 + np.prod(kernel_shape)).astype("float32").reshape(kernel_shape)
def func(x):
kernel = tf.constant(kernel_val, dtype=tf.float32, name='k')
conv = tf.nn.depthwise_conv2d(x, kernel, strides=[1, 1, 1, 1], padding='SAME', dilations=[3, 4])
return tf.identity(conv, name=_TFOUTPUT)
# rtol is a bit high, 2 values have a bit high error. Maybe use different input data.
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.01)
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_dropout(self):
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
# Define a scope for reusing the variables
def func(x):
is_training = tf.constant(False, tf.bool)
x_ = tf.identity(x)
fc1 = tf.layers.dropout(x_, rate=.1, training=is_training)
return tf.identity(fc1, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: (check_op_count(g, "RandomUniform", 0) and
check_op_count(g, "RandomUniformLike", 0)))
def test_nn_dropout(self):
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
# Define a scope for reusing the variables
def func(x, keep_prob):
x_ = tf.identity(x)
fc1 = dropout(x_, keep_prob)
return tf.identity(fc1, name=_TFOUTPUT)
# when constant_fold is enabled, PlaceholderWithDefault will be folded into either a const or a placeholder.
# here we set it False to test PlaceholderWithDefault bug: https://github.com/onnx/tensorflow-onnx/pull/446
# Dropout with ratio 1.0 will be optimized so that only one Identity is left
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: np.array(1., dtype=np.float32)},
graph_validator=lambda g: (check_op_count(g, "RandomUniform", 0) and
check_op_count(g, "RandomUniformLike", 0)))
@check_tf_min_version("1.13")
def test_nn_dropout_with_rate(self):
rate = tf.constant(0., name="rate")
x_val = np.ones([1, 24, 24, 3], dtype=np.float32)
# Define a scope for reusing the variables
def func(x):
x_ = tf.identity(x)
fc1 = tf.nn.dropout(x_, rate=rate)
return tf.identity(fc1, name="output")
feed_dict = {"input_1:0": x_val}
input_names_with_port = ["input_1:0"]
output_names_with_port = ["output:0"]
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port,
graph_validator=lambda g: (check_op_count(g, "RandomUniform", 0) and
check_op_count(g, "RandomUniformLike", 0)))
def test_conv2d_with_input_transpose(self):
x_shape = [2, 32, 32, 3]
kernel_shape = [3, 3, 3, 3]
x_val = make_xval(x_shape)
x_val_for_onnx = x_val.transpose(NHWC_TO_NCHW)
def func(x):
kernel = tf.constant(make_xval(kernel_shape), dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding="SAME")
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05,
process_args={"inputs_as_nchw": [_INPUT]},
onnx_feed_dict={_INPUT: x_val_for_onnx})
@skip_tflite("TFlite adds ops that obscure pattern")
@check_tf_min_version("1.15")
def test_conv1d_dilations_rewriter(self):
x_shape = [2, 32, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv1D(filters=768, kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tf_cpu("only tf_gpu can run conv2d with NCHW format")
def test_conv2d_biasadd_rewriter(self):
x_shape = [2, 3, 32, 16]
x_val = make_xval(x_shape)
def func(x):
middles = tf.keras.layers.ZeroPadding2D(
padding=(0, 4),
data_format="channels_first",
name="padding"
)(x)
t = tf.keras.layers.Conv2D(
filters=768,
kernel_size=3,
strides=1,
use_bias=True,
data_format="channels_first",
name="conv2d"
)(middles)
return tf.identity(t, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Add", 0, disabled=False))
@check_tf_min_version("1.15")
def test_conv2d_dilations_rewriter(self):
x_shape = [2, 32, 16, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv2D(filters=768, kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
def func(x):
t = tf.keras.layers.DepthwiseConv2D(kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tf_cpu("only tf_gpu can run conv2d with NCHW format")
def test_nchw_conv2d_dilations_rewriter(self):
x_shape = [2, 3, 32, 16]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv2D(
filters=768,
kernel_size=3,
dilation_rate=3,
padding=p,
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
def func(x):
t = tf.keras.layers.DepthwiseConv2D(
kernel_size=3,
dilation_rate=3,
padding=p,
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tflite("TFlite adds ops that obscure pattern")
@allow_missing_shapes("Rewriting makes some shapes known")
def test_conv2d_dilations_rewriter_unknown_shape(self):
x_shape = [2, 32, 16, 3]
x_val = make_xval(x_shape)
def func():
x = tf_placeholder(tf.float32, [2, None, None, 3], name=_TFINPUT)
t = tf.keras.layers.Conv2D(filters=768, kernel_size=3, dilation_rate=3, padding="VALID")
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2,
as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tflite("TFlite adds ops that obscure pattern")
@skip_tf_cpu("only tf_gpu can run conv2d with NCHW format")
@allow_missing_shapes("Rewriting makes some shapes known")
def test_nchw_conv2d_dilations_rewriter_unknown_shape(self):
x_shape = [2, 3, 32, 16]
x_val = make_xval(x_shape)
def func():
x = tf_placeholder(tf.float32, [2, 3, None, None], name=_TFINPUT)
t = tf.keras.layers.Conv2D(
filters=768,
kernel_size=3,
dilation_rate=3,
padding="VALID",
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2,
as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
def test_conv3d_dilations_rewriter(self):
x_shape = [2, 32, 16, 8, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv3D(filters=768, kernel_size=3, dilation_rate=3, padding=p)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@check_tf_min_version("1.15")
@skip_tf_cpu("only tf_gpu can run conv3d with NCDHW format")
def test_ncdhw_conv3d_dilations_rewriter(self):
x_shape = [2, 3, 32, 16, 8]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
t = tf.keras.layers.Conv3D(
filters=768,
kernel_size=3,
dilation_rate=3,
padding=p,
data_format='channels_first'
)
t.build(x_shape)
y = t.call(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@skip_tf2("Uses tf.layers")
def test_conv1d_tf1_dilations_rewriter(self):
x_shape = [2, 32, 3]
x_val = make_xval(x_shape)
for p in ['SAME', 'VALID']:
def func(x):
y = tf.layers.conv1d(x, filters=768, kernel_size=3, dilation_rate=3, padding=p, name="conv1")
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2, as_session=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
@skip_tf2("Uses tf.layers")
def test_conv1d_tf1_dilations_rewriter_unknown_shape(self):
x_shape = [2, 32, 3]
x_val = make_xval(x_shape)
def func():
x = tf_placeholder(tf.float32, [2, None, 3], name=_TFINPUT)
y = tf.layers.conv1d(x, filters=768, kernel_size=3, dilation_rate=3, padding="VALID", name="conv1")
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04, atol=1e-2,
as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "Reshape", 0, disabled=False))
def test_lrn_default(self):
x_shape = [1, 3, 4, 3]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
op = tf.nn.local_response_normalization(x)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_lrn(self):
# can't set bias = 0
x_shape = [1, 2, 2, 8]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
op = tf.nn.local_response_normalization(x, depth_radius=4, bias=2, alpha=2, beta=1)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_incompatibility("Abs")
def test_abs(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.abs(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Add")
def test_const(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
y = tf.constant(x_val, name="y")
return tf.add(x, y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Add")
def test_add(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.add(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_placeholder(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_placeholder_with_default_use_default(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf.constant(x_val, name="x")
y = tf_placeholder_with_default(x, x_val.shape, name=_TFINPUT)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, as_session=True, premade_placeholders=True)
def test_placeholder_with_default_use_feed(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf.constant(x_val, name="x")
y = tf_placeholder_with_default(x, x_val.shape, name=_TFINPUT)
return tf.identity(y, name=_TFOUTPUT)
x_feed_val = np.array([11.0, 22.0, -33.0, -44.0], dtype=np.float32).reshape((2, 2))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_feed_val}, as_session=True, premade_placeholders=True)
def test_placeholder_with_default_computed_use_default(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
y_val = np.array([2.0, -4.0, 6.0, -8.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf_placeholder(tf.float32, x_val.shape, name=_TFINPUT)
y = tf_placeholder(tf.float32, y_val.shape, name=_TFINPUT1)
total = tf.add(x, y)
z = tf_placeholder_with_default(total, x_val.shape, name=_TFINPUT2)
total2 = tf.add(total, z)
return tf.identity(total2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, as_session=True,
premade_placeholders=True, process_args={'use_default': [_TFINPUT2]})
def test_placeholder_with_default_computed_ignore_default(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
y_val = np.array([2.0, -4.0, 6.0, -8.0], dtype=np.float32).reshape((2, 2))
z_val = np.array([3.0, 6.0, 9.0, 10.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf_placeholder(tf.float32, x_val.shape, name=_TFINPUT)
y = tf_placeholder(tf.float32, y_val.shape, name=_TFINPUT1)
total = tf.add(x, y)
z = tf_placeholder_with_default(total, x_val.shape, name=_TFINPUT2)
total2 = tf.add(total, z)
return tf.identity(total2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val}, as_session=True,
premade_placeholders=True, process_args={'ignore_default': [_TFINPUT2]})
def test_fold_cond_keras_learning_phase(self):
# keras_learning_phase can slip into frozen graphs and cause huge inefficiencies with If nodes.
# Should be removed and Ifs folded.
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func():
x = tf_placeholder(tf.float32, [None, None], name=_TFINPUT)
learning_phase = tf_placeholder_with_default(False, [], name="keras_learning_phase")
y = tf.cond(learning_phase, lambda: x * 2, lambda: x * 3)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, as_session=True, premade_placeholders=True,
graph_validator=lambda g: check_op_count(g, "If", 0, disabled=False))
@check_onnxruntime_incompatibility("Add")
def test_add_bcast(self):
x1_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
x2_val = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0], dtype=np.float32).reshape((2, 2, 2))
def func(x1, x2):
x_ = tf.add(x1, x2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x1_val, _INPUT1: x2_val})
@check_onnxruntime_incompatibility("Add")
def test_add_bcast1(self):
# example taken from onnx doc
x1_val = np.random.randn(3, 4, 5).astype(np.float32)
x2_val = np.random.randn(5).astype(np.float32)
def func(x1, x2):
x_ = tf.add(x1, x2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x1_val, _INPUT1: x2_val})
def test_matmul0(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.matmul(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("Issue with matmul with 2 copies of same input")
def test_matmul1(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0, 5.0, 6.0], dtype=np.float32).reshape((2, 3))
def func(x):
x_ = tf.matmul(x, x, transpose_a=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_matmul2(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
y_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x, y):
x_ = tf.matmul(x, y, transpose_b=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@unittest.skipIf(get_test_config().is_mac and get_test_config().is_onnxruntime_backend
and get_test_config().backend_version == "0.2.1", "onnxruntime 0.2.1 has bug on mac")
def test_matmul3(self):
x_shape = [1, 12, 256, 64]
x_val = np.arange(np.prod(x_shape)).astype("float32").reshape((x_shape))
def func(x, y):
x_ = tf.matmul(x, y, transpose_b=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_val}, rtol=1e-5)
@check_onnxruntime_incompatibility("Sub")
def test_sub(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.subtract(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Mul")
def test_multiply(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.multiply(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Div")
def test_div(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.realdiv(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "float equality")
def test_div_no_nan(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0, 5.0, 0.0, float("nan"), float("-inf"), float("inf")], dtype=np.float32)
y_val = np.array([1.0, 0.5, 0.0, -4.0, 0.0, 0.0, 0.0, 2.0, 0.0], dtype=np.float32)
def func(x, y):
x_ = tf.math.divide_no_nan(x, y)
return tf.identity(x_, name=_TFOUTPUT)
# TFLite expresses infinity as a value > 1e38
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, mtol=1e38)
@check_onnxruntime_incompatibility("Exp")
def test_exp(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.exp(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_incompatibility("Log")
def test_log(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.log(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Log")
def test_log_double(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float64).reshape((2, 2))
def func(x):
x_ = tf.math.log(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_gather(self):
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
idx = np.array([1, 0, 2], dtype=np.int32)
idx_flattened = np.array([i * x_val.shape[1] + idx for i in range(0, x_val.shape[0])])
def func(x):
x_ = tf.gather(tf.reshape(x, [-1]), tf.constant(idx_flattened))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_gather_batch_dims_no_trans(self):
x_val = np.arange(2 * 2 * 3 * 5 * 4, dtype=np.float32).reshape((2, 2, 3, 5, 4))
idx_val = np.array([[[1, 0, 2, 0], [1, 1, 1, 0]], [[0, 0, 0, 0], [2, 1, 1, 0]]], dtype=np.int32)
def func(x, idx):
x_ = tf.gather(x, idx, batch_dims=2, axis=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: idx_val})
@check_tf_min_version("1.14")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_gather_batch_dims(self):
x_val = np.arange(2 * 2 * 3 * 5 * 4, dtype=np.float32).reshape((2, 2, 3, 5, 4))
idx_val = np.array([[[1, 0, 2, 0], [1, 1, 1, 0]], [[0, 0, 0, 0], [2, 1, 1, 0]]], dtype=np.int32)
def func(x, idx):
x_ = tf.gather(x, idx, batch_dims=2, axis=3)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: idx_val})
@check_opset_min_version(10, "Slice")
def test_roll_axis_scalar(self):
x_val = np.arange(4 * 3 * 5 * 2, dtype=np.float32).reshape((4, 3, 5, 2))
shift_val = np.array(4, dtype=np.int64)
axes_val = np.array(2, dtype=np.int32)
def func(x, shift):
x_ = tf.roll(x, shift, axes_val)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shift_val})
@check_opset_min_version(10, "Slice")
def test_roll_axis_vector(self):
x_val = np.arange(4 * 3 * 5 * 2, dtype=np.float32).reshape((4, 3, 5, 2))
shift_val = np.array([2, 3, 4], dtype=np.int32)
axes_val = np.array([1, 2, 1], dtype=np.int32)
def func(x, shift):
x_ = tf.roll(x, shift, axes_val)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shift_val})
@check_opset_min_version(10, "Slice")
def test_roll_neg_axis(self):
def func(input_ids):
shifted_input_ids = tf.cast(input_ids, tf.int32)
shifted_input_ids = tf.roll(shifted_input_ids, 1, axis=-1)
return tf.identity(shifted_input_ids, name=_TFOUTPUT)
x_val = np.array([[0, 1, 2, 3, 4, 5, 6, 7], [1, 2, 3, 4, 5, 6, 7, 8]], dtype=np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_roll_neg_shift(self):
x_val = np.arange(4 * 3 * 5 * 2, dtype=np.float32).reshape((4, 3, 5, 2))
shift_val = np.array([-2, 13, -3], dtype=np.int32)
axes_val = np.array([1, 2, -1], dtype=np.int32)
def func(x, shift):
x_ = tf.roll(x, shift, axes_val)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shift_val})
@check_tf_min_version("2.2")
def test_large_model_format(self):
x_val = np.array([2.0], dtype=np.float32)
y_const = np.arange(2000, dtype=np.float32)
def func(x):
x_ = tf.multiply(x, tf.constant(y_const))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, large_model=True)
@check_target('rs6', 'GatherNd')
def test_gathernd(self):
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[[0, 1], [1, 1]], [[1, 2], [0, 2]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float32)
indices = np.array([[[0], [2]], [[4], [7]], [[6], [1]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_target('rs6', 'GatherNd')
def test_gathernd_less_index(self):
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
indices = np.array([[[0], [1]], [[2], [0]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
# shape: 2*2*2
x_val = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]], dtype=np.float32)
indices = np.array([[[0, 0], [0, 1]], [[1, 0], [1, 1]]], dtype=np.int32)
def func(x):
x_ = tf.gather_nd(x, tf.constant(indices))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_opset_min_version(7, "tile")
def test_tile(self):
x_val = np.array([[0, 1], [2, 3]], dtype=np.float32)
def func(x):
multiple = tf.constant([2, 2])
x_ = tf.tile(x, multiple)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "tile")
def test_tile_const(self):
# Should be folded
x_val = np.array([[0, 1], [2, 3]], dtype=np.float32)
def func():
multiple = tf.constant([1000, 2])
x_ = tf.tile(tf.constant(x_val), multiple)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, graph_validator=lambda g: check_op_count(g, "Tile", 0, disabled=False))
@check_opset_min_version(7, "tile")
def test_tile_large_const(self):
# Should not be folded since it is so large
x_val = np.array([[0, 1], [2, 3]], dtype=np.float32)
def func():
multiple = tf.constant([1000000, 2])
x_ = tf.tile(tf.constant(x_val), multiple)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, graph_validator=lambda g: check_op_count(g, "Tile", 1, disabled=False))
@check_onnxruntime_incompatibility("Neg")
def test_neg(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.negative(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Mul")
def test_square(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.square(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Min")
def test_min(self):
x_val1 = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([4.0, 4.0, 4.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.minimum(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
x_val1 = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([4.0, 4.0, 4.0, 4.0], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.minimum(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@skip_caffe2_backend("issue with broadcasting scalar")
@check_onnxruntime_incompatibility("Sub")
def test_min_broadcast(self):
# tests if the broadcast for min/max is working
x_val1 = np.array([2.0, 16.0, 5.0, 1.6], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([4.0], dtype=np.float32)
def func(x1):
x2 = tf.constant(x_val2, dtype=tf.float32, name='x2')
mi = tf.minimum(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
@check_onnxruntime_incompatibility("Add")
def test_logicaland(self):
x_val1 = np.array([1, 0, 1, 1], dtype=np.bool).reshape((2, 2))
x_val2 = np.array([0, 1, 1, 1], dtype=np.bool).reshape((2, 2))
def func(x1, x2):
mi = tf.logical_and(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Greater")
def test_greater(self):
for op in [tf.greater, tf.greater_equal]:
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = op(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Greater")
def test_greater_unsupport_type(self):
for op in [tf.greater, tf.greater_equal]:
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = op(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Less")
def test_less(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.less(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_onnxruntime_incompatibility("Less")
def test_less_unsupport_type(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.less(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
@check_opset_min_version(11, "Equal")
def test_equal_float(self):
x_val1 = np.array([0., 1., 2., 3., 4., -1., -2], dtype=np.float32)
x_val2 = np.array([0., 1., 2.1, 3.5, 4.6, -1.1, -2.9], dtype=np.float32)
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_equal(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_not_equal(self):
x_val1 = np.array([4, 2, 4, 1], dtype=np.int32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.int32).reshape((2, 2))
def func(x1, x2):
mi = tf.not_equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
x_val1 = np.array([4, 2, 4, 1], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([2, 4, 4, 1], dtype=np.float32).reshape((2, 2))
def func(x1, x2):
mi = tf.not_equal(x1, x2)
return tf.identity(mi, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2})
def test_sequeeze_no_axis_specified(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 1, 2, 1, 1))
def func(x):
x_ = tf.squeeze(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_sequeeze_no_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.squeeze(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Pad")
def test_sequeeze_no_axis_specified_unknown_rank(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
y_val = np.array([2, 1, 2, 1, 1], dtype=np.int64)
z_val = np.zeros((1, 2), dtype=np.int64)
def func(x, y, z):
y_ = tf.pad(y, z)
x_ = tf.reshape(x, y_)
x_ = tf.squeeze(x_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
def test_sequeeze_positive_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2, 1))
def func(x):
x_ = tf.squeeze(x, [2])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_sequeeze_negative_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2, 1))
def func(x):
x_ = tf.squeeze(x, [-1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_sequeeze_mixed_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((1, 2, 2, 1))
def func(x):
x_ = tf.squeeze(x, [0, -1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Squeeze")
def test_sequeeze_mixed_axis_unknown_rank(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)
y_val = np.array([2, 1, 2, 1, 1], dtype=np.int64)
z_val = np.zeros((1, 2), dtype=np.int64)
def func(x, y, z):
y_ = tf.pad(y, z)
x_ = tf.reshape(x, y_)
x_ = tf.squeeze(x_, [1, -1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
def test_transpose(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=np.float32).reshape((2, 3))
def func(x):
x_ = tf.transpose(x) # perm=[1,0])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reshape(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
shape = tf.constant([1, 4])
x_ = tf.reshape(x, shape)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_shape=True)
def test_reshape_reshape(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
shape = tf.constant([1, 4])
shape_2 = tf.constant([4, 1])
x_ = tf.reshape(x, shape)
x_ = tf.reshape(x_, shape_2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: check_op_count(g, "Reshape", 1, disabled=False))
@check_opset_min_version(6, "cast")
def test_reshape_int(self):
x_val = np.array([1, 2, 3, 4], dtype=np.int32).reshape((2, 2))
def func(x):
shape = tf.constant([1, 4])
x_ = tf.reshape(x, shape)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_shape=True)
@check_opset_min_version(6, "cast")
def test_reshape_dynamic(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
shape_val = np.array([4, 1], dtype=np.int32)
def func(x, shape):
x_ = tf.reshape(x, shape)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: shape_val}, check_shape=True)
@check_onnxruntime_incompatibility("Relu")
def test_relu(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.nn.relu(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("fails on caffe2 with dim issue")
@check_onnxruntime_incompatibility("Mul")
@check_tf_min_version("1.6")
def test_leaky_relu_int(self):
# starting from tf 1.6, leaky_relu supports `feature` x of int type
x_types = [np.int32, np.int64]
for x_type in x_types:
x_val = 1000 * np.random.random_sample([1000, 100]).astype(x_type)
for alpha in [0.1, -0.1, 1.0, -1.0]:
def func(x):
x_ = tf.nn.leaky_relu(x, alpha)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("fails on caffe2 with dim issue")
@check_onnxruntime_incompatibility("Mul")
def test_leaky_relu_with_dependency(self):
x_val = 1000 * np.random.random_sample([1000, 100]).astype(np.float32)
def func(x):
# simulate leaky_relu
alpha = tf.constant(0.5)
y = alpha * x
x_ = tf.maximum(y, x)
dependency = y - 1
return tf.identity(x_, name=_TFOUTPUT), tf.identity(dependency, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@skip_caffe2_backend("fails on caffe2 with dim issue")
@check_onnxruntime_incompatibility("Mul")
def test_leaky_relu_float(self):
x_val = 1000 * np.random.random_sample([1000, 100]).astype(np.float32)
for alpha in [0.1, -0.1, 1.0, -1.0]:
def func(x):
x_ = tf.nn.leaky_relu(x, alpha)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Elu")
def test_elu(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.nn.elu(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Tanh")
def test_tanh(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.tanh(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_relu6(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0, 6, 7], dtype=np.float32).reshape((2, 3))
def func(x):
x_ = tf.nn.relu6(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_incompatibility("Sub")
def test_relu6_dynamic(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.nn.relu6(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_concat(self):
x_val1 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
x_val2 = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.float32)
x_val3 = np.array([[13, 14, 15], [16, 17, 18]], dtype=np.float32)
def func(x1, x2, x3):
x_ = tf.concat([x1, x2, x3], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, "input3:0": x_val3})
def test_concat_empty_const_input(self):
x_val1 = np.array([1, 2, 3], dtype=np.float32)
x_val2 = np.array([], dtype=np.float32)
def func(x1):
x2 = tf.constant(x_val2, dtype=tf.float32)
x_ = tf.concat([x1, x2], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
x_val1 = np.array([[1, 2, 3]], dtype=np.float32)
x_val2 = np.array([[]], dtype=np.float32)
def func(x1):
x2 = tf.constant(x_val2, dtype=tf.float32)
x_ = tf.concat([x1, x2], 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
x_val1 = np.array([1, 2, 3], dtype=np.float32)
x_val2 = np.array([], dtype=np.float32)
x_val3 = np.array([13, 14, 15], dtype=np.float32)
def func(x1, x3):
x2 = tf.constant(x_val2, dtype=tf.float32)
x_ = tf.concat([x1, x2, x3], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val3})
@check_opset_min_version(6, "cast")
def test_concat_int64(self):
x_val1 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.int64)
x_val2 = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.int64)
x_val3 = np.array([[13, 14, 15], [16, 17, 18]], dtype=np.int64)
def func(x1, x2, x3):
x_ = tf.concat([x1, x2, x3], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, "input3:0": x_val3})
def test_concat_negative_axis(self):
x_val1 = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
x_val2 = np.array([[7, 8, 9], [10, 11, 12]], dtype=np.float32)
x_val3 = np.array([[13, 14, 15], [16, 17, 18]], dtype=np.float32)
def func(x1, x2, x3):
x_ = tf.concat([x1, x2, x3], -1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, "input3:0": x_val3})
def test_concat_const_string(self):
x_val1 = np.array([["Hello world", "abc"], ["def", "♦♥♠♣"]], dtype=np.str)
const_val = np.array([["Hello there", "wxyz"], ["", "π"]], dtype=np.str)
def func(x1):
x_ = tf.concat([x1, const_val], 0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
@check_onnxruntime_incompatibility("Pow")
def test_pow(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32)
e = np.array([2.0, 2.0, 2.0, 2.0], dtype=np.float32)
def func(x):
x_ = tf.pow(x, tf.constant(e))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_embedding_lookup(self):
x_val1 = np.array([[1]], dtype=np.int32)
x_val2 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]], dtype=np.float32)
def func(x):
t = tf.constant(x_val2)
x_ = tf.nn.embedding_lookup(t, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1})
@skip_tflite("Advanced constant shape folding not implemented for tflite")
@skip_tfjs("Advanced constant folding not implemented for tfjs")
def test_slice_from_shape_const_fold(self):
x_val = np.array([4, 3], dtype=np.int64)
x_shape = np.array([-1, 3], dtype=np.int64)
def func(x):
z = tf.zeros(x)
x = tf.reshape(z, tf.constant(x_shape))
s = tf.shape(x)
t1 = tf.constant([1], dtype=tf.int32)
t2 = tf.constant([2], dtype=tf.int32)
y = tf.strided_slice(s, t1, t2, shrink_axis_mask=1)
return tf.identity(y, name=_TFOUTPUT)
def graph_validator(g):
# After constant folding just an input and const output node remain
return len(g.get_nodes()) == 2
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, graph_validator=graph_validator)
def test_slice(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
def func(x):
t1 = tf.constant([0, 1], dtype=tf.int32)
t2 = tf.constant([2, 2], dtype=tf.int32)
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_slice_neg_size(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
def func(x):
t1 = tf.constant([0, 1], dtype=tf.int32)
t2 = tf.constant([-1, 2], dtype=tf.int32)
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice in opset 10 can accept dymaic 'start' and 'ends'")
def test_slice_with_non_const(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
t1 = np.array([0, 1], dtype=np.int32)
t2 = np.array([2, 2], dtype=np.int32)
def func(x, t1, t2):
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: t1, _INPUT2: t2})
@check_opset_min_version(10, "Slice in opset 10 can accept dymaic 'start' and 'ends'")
def test_slice_with_size_is_negative_one(self):
x_val = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.float32)
t1 = np.array([0, 1], dtype=np.int32)
# input "size" contains -1
t2 = np.array([2, -1], dtype=np.int32)
def func(x, t1, t2):
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: t1, _INPUT2: t2})
@skip_caffe2_backend()
def test_slice1(self):
# FIXME: only 1 dimension supported by caffe2
x_val = np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]], [[5, 5, 5], [6, 6, 6]]], dtype=np.float32)
def func(x):
t1 = tf.constant([1, 0, 0], dtype=tf.int32)
t2 = tf.constant([1, 1, 3], dtype=tf.int32)
x_ = tf.slice(x, t1, t2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_split(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
def func(x):
x_, _, _ = tf.split(x, [4, 15, 11], 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(13, "Split")
def test_split_nonconst(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
y_val = np.array([4, 15, 11], np.int32)
def func(x, y):
x_, _, _ = tf.split(x, y, 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tfjs("TFJS executes model incorrectly")
def test_split_with_more_outputs(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
def func(x):
return tf.split(x, [4, 15, 11], 1, name="split_test")
self._run_test_case(func, ["split_test:0", "split_test:1", "split_test:2"], {_INPUT: x_val})
def test_negative_split(self):
x_val = np.linspace(1.0, 5 * 30.0, 5 * 30).astype(np.float32).reshape((5, 30))
def func(x):
x_, _, _ = tf.split(x, [4, 15, -1], 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reducesum(self):
# not supported by onnx-caffe2
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_sum(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(13, "ReduceSum")
def test_reducesum_nonconst_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 1, 2))
y_val = np.array([1, 2], dtype=np.int32)
def func(x, y):
x_ = tf.reduce_sum(x, axis=y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(13, "ReduceSum")
def test_reducesum_empty_axis(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 1, 2))
y_val = np.array([], dtype=np.int32)
def func(x, y):
x_ = tf.reduce_sum(x, axis=y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "ScatterND")
def test_segment_sum_data_vector(self):
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.array([5, 1, 7, 2, 3, 4, 1, 3], dtype=np.float32)
def func(data, segments):
x_ = tf.math.segment_sum(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_segment_sum_unknown_rank(self):
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
data_shape_val = np.array([8, 2, 3, 1], dtype=np.int64)
shape_pad_val = np.zeros((1, 2), dtype=np.int64)
def func(data, segments, data_shape, shape_pad):
# Some hackery to make the rank unknown
data_shape_ = tf.pad(data_shape, shape_pad, constant_values=0)
data = tf.reshape(data, data_shape_)
x_ = tf.math.segment_sum(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT],
{_INPUT: data_val, _INPUT1: segs_val, _INPUT2: data_shape_val, _INPUT3: shape_pad_val})
@check_opset_min_version(11, "ScatterND")
def test_segment_ops_data_tensor(self):
for tf_op in [tf.math.segment_sum, tf.math.segment_prod, tf.math.segment_min, tf.math.segment_max]:
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
def func(data, segments):
x_ = tf_op(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: segs_val})
@check_opset_min_version(11, "ScatterND")
@skip_tflite("unknown rank")
def test_segment_mean_unknown_rank(self):
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
data_shape_val = np.array([8, 2, 3, 1], dtype=np.int64)
shape_pad_val = np.zeros((1, 2), dtype=np.int64)
def func(data, segments, data_shape, shape_pad):
# Some hackery to make the rank unknown
data_shape_ = tf.pad(data_shape, shape_pad, constant_values=0)
data = tf.reshape(data, data_shape_)
x_ = tf.math.segment_mean(data, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT],
{_INPUT: data_val, _INPUT1: segs_val, _INPUT2: data_shape_val, _INPUT3: shape_pad_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_sum(self):
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3, 3], dtype=np.int32)
def func(data, indices, segments):
x_ = tf.sparse.segment_sum(data, indices, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_mean(self):
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3, 3], dtype=np.int32)
def func(data, indices, segments):
x_ = tf.sparse.segment_mean(data, indices, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_sqrtn(self):
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 2, 2, 3, 3, 3], dtype=np.int32)
def func(data, indices, segments):
x_ = tf.sparse.segment_sqrt_n(data, indices, segments)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_segment_ops_with_num_segments(self):
for tf_op in [tf.sparse.segment_sum, tf.sparse.segment_mean, tf.sparse.segment_sqrt_n]:
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=np.int32)
segs_val = np.array([0, 0, 0, 1, 3, 3, 4, 4, 4], dtype=np.int32)
def func(data, indices, segments):
x_ = tf_op(data, indices, segments, num_segments=6)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "ScatterND")
@check_tf_min_version("2.3", "needs tf 2.3")
def test_unsorted_segment_ops(self):
tf_ops = [
tf.math.unsorted_segment_max,
tf.math.unsorted_segment_min,
tf.math.unsorted_segment_sum,
tf.math.unsorted_segment_prod,
tf.math.unsorted_segment_mean,
tf.math.unsorted_segment_sqrt_n,
]
for tf_op in tf_ops:
segs_val = np.array([1, 3, 0, 1, 2, 4, 2, 1], dtype=np.int32)
data_val = np.arange(8 * 2 * 3, dtype=np.float32).reshape([8, 2, 3])
def func(data, segments):
x_ = tf_op(data, segments, num_segments=5)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: segs_val})
@check_opset_min_version(11, "ScatterND")
@check_tf_min_version("2.3", "num_segments can be int64 in tf 2.3")
def test_segment_op_types(self):
data_dtypes = [np.int32, np.float32]
seg_dtypes = [np.int32, np.int64]
for dtypes in product(data_dtypes, seg_dtypes, seg_dtypes, seg_dtypes):
data_val = np.arange(8 * 2 * 3, dtype=dtypes[0]).reshape([8, 2, 3])
indices_val = np.array([2, 0, 1, 3, 5, 4, 3, 5, 5], dtype=dtypes[1])
segs_val = np.array([0, 0, 0, 1, 3, 3, 4, 4, 4], dtype=dtypes[2])
def func(data, indices, segments):
x_ = tf.sparse.segment_sum(data, indices, segments, num_segments=np.array(6, dtype=dtypes[3]))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: data_val, _INPUT1: indices_val, _INPUT2: segs_val})
@check_opset_min_version(11, "CumSum")
@check_tf_min_version("1.14")
def test_set_union(self):
a_val = np.array([[10, 2, 30, 2, 5], [10, 9, 1, 9, 3]], np.int32)
b_val = np.array([[4, 5, 10, 8, 9], [1, 4, 1, 1, 5]], np.int32)
def func(a, b):
s = tf.sets.union(a, b)
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: a_val, _INPUT1: b_val})
@check_opset_min_version(11, "CumSum")
@check_tf_min_version("1.14")
def test_set_intersection(self):
a_val = np.array([[10, 2, 30, 2, 5], [10, 9, 1, 9, 3]], np.int32)
b_val = np.array([[4, 5, 10, 8, 9], [1, 4, 1, 1, 5]], np.int32)
def func(a, b):
s = tf.sets.intersection(a, b)
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: a_val, _INPUT1: b_val})
@check_opset_min_version(11, "CumSum")
@check_tf_min_version("1.14")
def test_set_difference(self):
a_val = np.array([[10, 2, 30, 2, 5], [10, 9, 1, 9, 3]], np.int32)
b_val = np.array([[4, 5, 10, 8, 9], [1, 4, 1, 1, 5]], np.int32)
for aminusb in [True, False]:
def func(a, b):
s = tf.sets.difference(a, b, aminusb)
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: a_val, _INPUT1: b_val})
@check_onnxruntime_incompatibility("Sqrt")
def test_sqrt(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.sqrt(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def _test_range_const(self, extra_opset=None):
process_args = {}
if extra_opset is not None:
process_args["extra_opset"] = [extra_opset]
def func():
x = tf.range(5)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(3, 3, 5)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(0, -5, -2)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(-5.0, 5.0, 1.5)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def func():
x = tf.range(2.5, 5.0, 10.0)
return tf.identity(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
def _test_range_non_const(self, extra_opset=None):
process_args = {}
if extra_opset is not None:
process_args["extra_opset"] = [extra_opset]
def func():
x = tf.range(5.0)
return tf.identity(x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
# TODO: tf-2.0 uses the optimizer which will most likely make the range const which is not what we want to test
# self.assertTrue(extra_opset is None
# or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
def func():
x = tf.range(0, -5.0, -2)
return tf.identity(x*x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
# TODO: tf-2.0 uses the optimizer which will most likely make the range const which is not what we want to test
# self.assertTrue(extra_opset is None
# or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
# disable this case due to onnxruntime loop issue
# https://github.com/microsoft/onnxruntime/issues/1272
# x = tf.range(3.0, 3.0, 5)
# return tf.identity(x, name=_TFOUTPUT)
# g = self._run_test_case(func, [_OUTPUT], {}, process_args=process_args)
# self.assertTrue(extra_opset is None
# or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
delta_val = np.array(1.5, dtype=np.float32)
def func(delta):
x = tf.range(-5.0, 5.0, delta)
return tf.identity(x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: delta_val}, process_args=process_args)
self.assertTrue(extra_opset is None
or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
start_val = np.array(2.5, dtype=np.float32)
def func(start):
x = tf.range(start, 5.0, 10.0)
return tf.identity(x, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: start_val}, process_args=process_args)
self.assertTrue(extra_opset is None
or check_node_domain(group_nodes_by_type(g)["Range"][0], extra_opset.domain))
@check_opset_min_version(7, "cast")
def test_range_const(self):
self._test_range_const()
def test_range_non_const(self):
self._test_range_non_const()
@test_ms_domain()
def test_ms_range_const(self, extra_opset):
self._test_range_const(extra_opset)
@test_ms_domain()
def test_ms_range_non_const(self, extra_opset):
self._test_range_non_const(extra_opset)
@check_onnxruntime_incompatibility("Sqrt")
def test_rsqrt(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.rsqrt(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
@check_onnxruntime_incompatibility("Reciprocal")
def test_reciprocal(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.math.reciprocal(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
def test_reducemax(self):
# not supported by onnx-caffe2
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_max(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05)
def test_reducemax_global_max_pool(self):
for keepdims in [True, False]:
x_val = make_xval((2, 3, 4, 5, 6))
def func(x):
x_ = tf.reduce_max(x, axis=[2, 3, 4], keepdims=keepdims)
return tf.add(x_, 0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_reduceprod(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_prod(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reducemean(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.reduce_mean(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_reducemean_global_avg_pool(self):
for keepdims in [True, False]:
x_val = make_xval((2, 3, 4, 5))
def func(x):
x_ = tf.reduce_mean(x, axis=[2, 3], keepdims=keepdims)
return tf.add(x_, 0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_onnxruntime_incompatibility("Pow")
def test_pow_scalar(self):
x_val = np.array([4.0, 16.0, 4.0, 1.6], dtype=np.float32)
e = np.array(2.0, dtype=np.float32)
def func(x):
x_ = tf.pow(x, tf.constant(e))
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_pad_const_default_val(self):
params = [
("CONSTANT", [[1, 1], [2, 2]], [[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]]),
("CONSTANT", [[0, 0], [3, 3], [3, 3], [0, 0]], np.random.randn(1, 3, 4, 5).astype(np.float32)),
]
for p in params:
mode, pad, xv = p
x_val = np.array(xv, dtype=np.float32)
def func(x):
paddings = tf.constant(pad)
op = tf.pad(x, paddings, mode)
return tf.identity(op, name=_TFOUTPUT)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_pad_const(self):
x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
def func(x):
paddings = tf.constant([[1, 1], [2, 2]], name="paddings")
op = tf.pad(x, paddings, mode="CONSTANT", name="const_with_val", constant_values=999)
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
def test_pad_reflect(self):
x_val = np.array([[1, 2, 3], [4, 5, 6]], dtype=np.float32)
def func(x):
paddings = tf.constant([[1, 1], [2, 2]], name="paddings")
op = tf.pad(x, paddings, mode="REFLECT", name="reflect")
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "Compress")
def test_pad_symmetric(self):
x_val = make_xval([4, 1, 5])
def func(x):
paddings = tf.constant([[1, 3], [0, 0], [2, 4]], name="paddings")
op = tf.pad(x, paddings, mode="SYMMETRIC", name="symmetric")
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Pad")
def test_dynamic_pad_symmetric(self):
x_val = make_xval([4, 1, 5])
y_val = np.array([[1, 3], [0, 0], [2, 4]], np.int32)
def func(x, y):
op = tf.pad(x, y, mode="SYMMETRIC", name="symmetric")
return tf.identity(op, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_caffe2_backend()
def test_randomuniform(self):
def func():
shape = tf.constant([2, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.float32)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
def test_random_std_normal(self):
def func():
shape = tf.constant([20, 10, 50], name="shape")
x_ = tf.random.normal(shape)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})[0]
self.assertTrue(-0.1 < np.mean(results) < 0.1)
self.assertTrue(0.9 < np.std(results) < 1.1)
def test_randomnormal(self):
def func():
shape = tf.constant([20, 10, 50], name="shape")
x_ = tf.random.normal(shape, mean=10, stddev=2)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})[0]
self.assertTrue(9.8 < np.mean(results) < 10.2)
self.assertTrue(1.9 < np.std(results) < 2.1)
@check_opset_min_version(9, "RandomNormalLike")
def test_randomnormal_unknown_shape(self):
shape_val = np.array([20, 10, 50], np.int32)
def func(shape):
x_ = tf.random.normal(shape)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
feed_dict = {_INPUT: shape_val}
g = self._run_test_case(func, [_OUTPUT], feed_dict, check_value=False, check_shape=True)
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)[0]
self.assertTrue(-0.1 < np.mean(results) < 0.1)
self.assertTrue(0.9 < np.std(results) < 1.1)
@check_opset_min_version(10, "TopK")
def test_random_shuffle(self):
x_val = make_xval([5, 4, 3])
def func(x):
x_ = tf.random.shuffle(x)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False, check_shape=True)
feed_dict = {_INPUT: x_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
np.testing.assert_allclose(x_val, np.sort(results[0], axis=0))
def test_randomuniform_int(self):
def func():
shape = tf.constant([100, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=2, maxval=10)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(2, 10)))
def test_randomuniform_int_scalar(self):
def func():
shape = tf.constant(np.array([], np.int32), name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=2, maxval=10)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
g = self._run_test_case(func, [_OUTPUT], {}, check_value=False, check_shape=True)
results = self.run_backend(g, g.outputs, {})
self.assertTrue(2 <= results[0] < 10)
def test_randomuniform_int_nonconst_max(self):
m_val = np.array(8, dtype=np.int32)
def func(m):
shape = tf.constant([100, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=0, maxval=m)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: m_val}, check_value=False, check_shape=True)
feed_dict = {_INPUT: m_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(8)))
def test_randomuniform_int_nonconst_min_max(self):
n_val = np.array(2, dtype=np.int32)
m_val = np.array(10, dtype=np.int32)
def func(n, m):
shape = tf.constant([100, 3], name="shape")
x_ = random_uniform(shape, name="rand", dtype=tf.int32, minval=n, maxval=m)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: n_val, _INPUT1: m_val}, check_value=False, check_shape=True)
feed_dict = {_INPUT: n_val, _INPUT1: m_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(2, 10)))
@check_opset_min_version(9, "RandomUniformLike")
def test_randomuniform_int_nonconst_min_max_shape(self):
n_val = np.array(2, dtype=np.int32)
m_val = np.array(10, dtype=np.int32)
s_val = np.array([100, 3], dtype=np.int64)
def func(n, m, s):
x_ = random_uniform(s, name="rand", dtype=tf.int32, minval=n, maxval=m)
x_ = tf.identity(x_, name="output1")
x_ = tf.identity(x_, name="output2")
return tf.identity(x_, name=_TFOUTPUT)
g = self._run_test_case(func, [_OUTPUT], {_INPUT: n_val, _INPUT1: m_val, _INPUT2: s_val},
check_value=False, check_shape=True)
feed_dict = {_INPUT: n_val, _INPUT1: m_val, _INPUT2: s_val}
if "input" in g.input_names:
# TFLite inputs don't have port numbers
feed_dict = {k.split(":")[0]: v for k, v in feed_dict.items()}
results = self.run_backend(g, g.outputs, feed_dict)
numbers = set(results[0].flatten())
self.assertEqual(sorted(numbers), list(range(2, 10)))
@skip_caffe2_backend()
@check_opset_after_tf_version("2.2", 9, "RandomUniform")
def test_randomuniform_dyn_shape(self):
# test for dynamic shape coming from a shape op
x_val = np.array([0, 1, 2, 3, 5], dtype=np.int64)
def func(x):
ret = random_uniform(x[3:], dtype=tf.float32)
return tf.identity(ret, name=_TFOUTPUT)
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False, check_shape=True)
@skip_caffe2_backend()
def test_randomuniform_calc_shape(self):
# test for dynamic shape coming from some subgraph
x_val = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.float32)
def func(x):
x_ = tf.identity(x)
x_ = tf.shape(x_, name="shape")[1:]
x_ = random_uniform(x_, name="rand", dtype=tf.float32)
x_ = tf.identity(x_)
return tf.identity(x_, name=_TFOUTPUT)
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False, check_shape=True)
@check_opset_min_version(9, "Compress")
@skip_onnx_checker("Checker fails type inference for Compress")
def test_sample_distorted_bounding_box_v2(self):
x_val = np.array([200, 300, 3], dtype=np.int32)
y_val = np.random.uniform(size=[1, 1000, 4]).astype(np.float32)
y_val = np.array([[0, 0, 0.1, 0.1], [0.9, 0.9, 1, 1]], np.float32).reshape([1, 2, 4])
def func(image_size, bounding_boxes):
begin, size, bboxes = tf.image.sample_distorted_bounding_box(
image_size, bounding_boxes, seed=42, min_object_covered=0.8,
aspect_ratio_range=[0.05, 3], area_range=[0.05, 1], max_attempts=100,
use_image_if_no_bounding_boxes=False)
begin_ = tf.identity(begin, name=_TFOUTPUT)
size_ = tf.identity(size, name=_TFOUTPUT1)
bboxes_ = tf.identity(bboxes, name=_TFOUTPUT2)
return begin_, size_, bboxes_
# since results are random, compare the shapes only
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: x_val, _INPUT1: y_val},
check_value=False, check_shape=True)
@skip_caffe2_backend()
def test_argminmax(self):
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.argmin(x, axis=0)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 2, -2, -1], dtype=np.int32).reshape((2, 2))
def func(x):
x_ = tf.argmax(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 2, -2, -1], dtype=np.int32).reshape((2, 2))
def func(x):
x_ = tf.argmax(x, output_type=x_val.dtype)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(6, "cast")
def test_cast(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.cast(x, tf.int32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite does not support uint32 if tf version <= 2.3.0")
@check_opset_min_version(6, "cast")
def test_cast_unit32(self):
x_val = np.array([1, 2, 3, 4], dtype=np.uint32).reshape((2, 2))
def func(x):
x_ = tf.cast(x, tf.uint64)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "sign")
def test_sign(self):
x_vals = [np.array([1.0, 2.0, 0.0, -1.0, 0.0, -2.0], dtype=np.float32).reshape((2, 3)),
np.array([1, 2, 0, -1, 0, -2], dtype=np.int32).reshape((2, 3)),
np.array([1, 2, 0, -1, 0, -2], dtype=np.int64).reshape((2, 3))]
for x_val in x_vals:
def func(x):
x_ = tf.math.sign(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tfjs("tfjs produces incorrect results")
def test_onehot0(self):
x_val = np.array([0, 1, 2], dtype=np.int32)
depth = 5
for dtype, axis in [(tf.float32, -1), (tf.int64, 0), (tf.float64, 1)]:
def func(x):
val1 = tf.constant(5, dtype)
val2 = tf.constant(1, dtype)
x_ = tf.one_hot(x, depth, on_value=val1, axis=axis, off_value=val2, dtype=dtype)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@unittest.skip("only rank 1 is currently implemented")
def test_onehot1(self):
# only rank 1 is currently implemented
x_val = np.array([[0, 2], [1, -1]], dtype=np.int32)
depth = 3
def func(x):
x_ = tf.one_hot(x, depth, on_value=5.0, axis=-1, off_value=0.0, dtype=tf.float32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_target("rs6", "onehot")
def test_onehot2(self):
for axis in [-1, 0, 1]:
x_val = np.array([0, 1, 2, 1, 2, 0, 1, 2, 1, 2], dtype=np.int32)
depth = 20
def func(x):
x_ = tf.one_hot(x, depth, on_value=5.0, axis=axis, off_value=1.0, dtype=tf.float32)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_target("rs6", "onehot")
@check_opset_min_version(9, "onehot")
def test_onehot3(self):
# rank 1
for np_dtype in [np.int32, np.int64]:
x_val = np.array([0, 1, 2, 1, 2, 0, 1, 2, 1, 2], dtype=np_dtype)
depth = np.array(20).astype(np.int64)
def func(x):
on_off = np.array([5.6, 1.2]).astype(np_dtype)
x_ = tf.one_hot(x, depth, on_value=on_off[0], axis=-1, off_value=on_off[1])
return tf.identity(x_, name=_TFOUTPUT)
graph = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
self.assertTrue(len(group_nodes_by_type(graph)["OneHot"]) == 1, "onnx onehot should be used")
# rank 2
for aixs in [-1, 0, 1, 2]:
for np_dtype in [np.int32, np.int64]:
x_val = np.arange(0, 50, dtype=np_dtype).reshape([-1, 10])
depth = np.array(20).astype(np.int64)
def func(x):
on_off = np.array([5.6, 1.2]).astype(np_dtype)
x_ = tf.one_hot(x, depth, on_value=on_off[0], axis=aixs, off_value=on_off[1])
return tf.identity(x_, name=_TFOUTPUT)
graph = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
self.assertTrue(len(group_nodes_by_type(graph)["OneHot"]) == 1, "onnx onehot should be used")
@check_opset_min_version(9, "onehot")
@skip_tfjs("tfjs produces incorrect results")
def test_onehot_rank0(self):
depth = 5
for np_dtype in [np.int32, np.int64]:
x_val = np.array(3, dtype=np_dtype)
for axis in [-1, 0]:
def func(x):
x_ = tf.one_hot(x, depth, axis=axis)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("issue undefined dim 1")
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_flatten0(self):
x_val = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]], dtype=np.float32)
def func(x):
x_ = tf.contrib.layers.flatten(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("issue undefined dim 1")
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_flatten1(self):
x_val = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=np.float32)
def func(x):
x_ = tf.contrib.layers.flatten(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_max_version("1.15", "not supported in tf-2.0")
def test_flatten2(self):
x_val = np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]], dtype=np.float32)
def func(x):
x_ = tf.contrib.layers.flatten(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_cancel_transpose(self):
x_val = np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]], dtype=np.float32)
def func(x):
x_ = tf.identity(x, _TFINPUT)
x_ = tf.transpose(x_, perm=NHWC_TO_NCHW)
x_ = tf.transpose(x_, perm=NCHW_TO_NHWC)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_onnxruntime_min_version("0.5.0", "topk-10's shape inference function has a bug")
@check_opset_min_version(6, "cast")
def test_topk1(self):
x_val = np.arange(3 * 2 * 3).astype("float32")
def func(x):
values, _ = tf.nn.top_k(x, 5, sorted=True)
return tf.identity(values, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "TopK with dynamic K")
def test_topk2(self):
x_val = np.arange(3 * 2 * 3).astype("float32")
k_val = np.array(10).astype(np.int32)
def func(x, k):
values, _ = tf.nn.top_k(x, k, sorted=True)
return tf.identity(values, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: k_val})
@check_onnxruntime_min_version("0.5.0", "topk-10's shape inference function has a bug")
def test_topk3(self):
# test topk index output
x_val = np.arange(3 * 2 * 3).astype("float32")
def func(x):
_, idx = tf.nn.top_k(x, 5, sorted=True)
return tf.identity(idx, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_stack_axis(self):
for axis in [0, 1]:
x_val = [np.random.randn(3, 4).astype("float32") for _ in range(10)]
def func():
x = [tf.constant(x_val[i], dtype=tf.float32) for i in range(10)]
x_ = tf.stack(x, axis=axis)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
def test_unstack_axis(self):
for axis in [0, 1]:
x_val = np.random.randn(10, 3, 4).astype("float32")
def func():
x = tf.constant(x_val, dtype=tf.float32)
x_ = tf.unstack(x, axis=axis)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {})
def _test_reorganize_data(self, op, shape):
x_val = make_xval(shape)
def func(x):
x_ = op(x, block_size=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("Space2Depth not implemented")
def test_space_to_depth(self):
self._test_reorganize_data(tf.nn.space_to_depth, [1, 28, 28, 3])
@skip_caffe2_backend("Depth2Space not implemented")
def test_depth_to_space(self):
self._test_reorganize_data(tf.nn.depth_to_space, [1, 14, 14, 12])
def _test_reorganize_data_gpu(self, op, shape):
x_val = make_xval(shape)
def func(x):
x_ = op(x, block_size=2, data_format="NCHW")
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tf_cpu("only tf_gpu can run Space2Depth with NCHW format")
@skip_caffe2_backend("Space2Depth not implemented")
def test_space_to_depth_gpu(self):
self._test_reorganize_data_gpu(tf.nn.space_to_depth, [1, 3, 50, 80])
@skip_tf_cpu("only tf_gpu can run Depth2Space with NCHW format")
@skip_caffe2_backend("Depth2Space not implemented")
def test_depth_to_space_gpu(self):
self._test_reorganize_data_gpu(tf.nn.depth_to_space, [1, 120, 25, 40])
@check_opset_min_version(6, "addn")
def test_addn(self):
x_val = np.arange(3 * 2 * 3).astype("float32")
def func(x):
x_ = tf.add_n([x, x, x])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice1(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = tf.strided_slice(x, [1, 0, 0], [2, 1, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_strided_slice2(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = tf.strided_slice(x, [1, 0, 0], [2, 2, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_strided_slice3(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = x[1:]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_strided_slice4(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = x[:2]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice5(self):
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
def func(x):
x_ = x[:2, 0:1, 1:]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice6(self):
# example from here:
# https://www.tensorflow.org/versions/r1.0/api_docs/cc/class/tensorflow/ops/strided-slice
x_val = np.arange(5 * 6).astype("float32").reshape((5, 6))
def func(x):
x_ = x[2, :]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice7(self):
x_val = np.arange(5 * 6).astype("float32").reshape((5, 6))
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], begin_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], end_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], shrink_axis_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = tf.strided_slice(x, [0, 1], [3, 4], [1, 1], ellipsis_mask=2)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice8(self):
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
def func(x):
x_ = x[0:1, ..., 1, 2:, :6]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
def func(x):
x_ = x[0:1, 1, 2:, :6, ...]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
def func(x):
x_ = x[..., 0:1, 1, 2:, :6]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_1(self):
# simple case
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array([0, 1, 2], dtype=np.int32)
def func(x, y):
x_ = tf.strided_slice(x, y, [2, 2, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_2(self):
# int32
x_val = np.arange(3 * 2 * 3).astype("int32").reshape((3, 2, 3))
y_val = np.array([0, 1, 2], dtype=np.int32)
def func(x, y):
x_ = tf.strided_slice(x, y, [2, 2, 3], [1, 1, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_3(self):
# common usage, ellipsis_mask
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y:2, :, :]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_4(self):
# begin_mask, end_mask
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y:, :y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_5(self):
# only slice the first axis
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y:2]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_6(self):
# shrink mask
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
x_val = np.arange(3 * 2 * 3).astype("float32").reshape((3, 2, 3))
y_val = np.array(-1, dtype=np.int32)
def func(x, y):
x_ = x[y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_caffe2_backend("multiple dims not supported")
def test_strided_slice_dynamic_7(self):
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[0:y, ..., y, y:, :y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[0:y, y, y:, :y, ...]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
x_val = np.arange(1 * 2 * 3 * 4 * 5 * 6).astype("float32").reshape((1, 2, 3, 4, 5, 6))
y_val = np.array(1, dtype=np.int32)
def func(x, y):
x_ = x[..., 0:y, y, y:, :y]
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
def test_strided_slice_reverse_1(self):
x_val = np.arange(16 * 32).astype(np.float32).reshape((1, 16, 32, 1))
def func(x):
return tf.concat([x[:, :, :10], x[:, :, :21:-1]], axis=0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
def test_strided_slice_reverse_2(self):
x_val = np.arange(16 * 32).astype(np.float32).reshape((1, 16, 32, 1))
def func(x):
return tf.concat([x[:, :, :10], x[:, :, 9::-1]], axis=0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converts strided slice incorrectly (steps 1 dim larger than starts/stops)")
@check_opset_min_version(10, "Slice")
def test_strided_slice_reverse_3(self):
x_val = np.zeros((1, 16, 32, 1)).astype(np.float32)
y_val = np.array(9).astype(np.int32)
z_val = np.array(-1).astype(np.int32)
def func(x, y, z):
return tf.concat([x[:, :, :10], x[:, :, y::z]], axis=0, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(10, "Slice")
@skip_tfjs("TFJS executes model incorrectly")
def test_new_axis_mask(self):
def func(x, y):
x_ = x[tf.newaxis, 0:y, y::2, tf.newaxis, :, tf.newaxis, :y, tf.newaxis, ..., 9]
return tf.identity(x_, name=_TFOUTPUT)
x_val = np.arange(5*10*10*10*10*20*30).astype("float32").reshape((5, 10, 10, 10, 10, 20, 30))
y_val = np.array(9, dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(10, "Slice")
@skip_tflite("not supported in tflite")
def test_strided_slice_ellipse(self):
def func1(x):
x_ = x[..., tf.newaxis]
return tf.identity(x_, name=_TFOUTPUT)
shape = [1, 8, 64]
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
def func2(x):
x_ = x[:, tf.newaxis, ..., :, tf.newaxis]
return tf.identity(x_, name=_TFOUTPUT)
shape = [2, 3, 4, 5]
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
self._run_test_case(func2, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(10, "Slice")
@skip_tflite("not supported in tflite")
def test_strided_slice_only_ellipsis(self):
def func1(x):
x_ = x[...]
return tf.identity(x_, name=_TFOUTPUT)
shape = [1, 8, 64]
x_val = np.arange(np.prod(shape)).astype("float32").reshape(shape)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "batchnorm")
def test_fused_batchnorm(self):
x_shape = [1, 28, 28, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
# only nhwc is support on cpu for tensorflow
data_format = "NHWC"
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = fused_batch_norm(
x, scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format=data_format, is_training=False)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
@check_opset_min_version(7, "batchnorm")
@check_tf_min_version("2.4", "tf version above 2.4 supports NDHWC")
def test_fused_batchnorm_3d(self):
x_shape = [1, 28, 28, 2, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
data_format = "NDHWC"
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = fused_batch_norm(
x, scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format=data_format, is_training=False)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
@check_opset_min_version(7, "batchnorm")
@skip_tfjs("TFJS executes model incorrectly")
def test_fused_batchnorm_training(self):
x_shape = [1, 28, 28, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
# only nhwc is support on cpu for tensorflow
data_format = "NHWC"
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
epsilon = 0.001
y, _, _ = fused_batch_norm(
x, scale, offset, mean=None, variance=None,
epsilon=epsilon, data_format=data_format, is_training=True)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-04)
@skip_tflite("tflite converts aborts")
@skip_tfjs("TFJS executes model incorrectly")
@check_opset_min_version(11, "batchnorm")
@check_tf_min_version("2.4")
def test_batchnorm_mixed(self):
x_shape = [1, 32, 32, 2]
x_dtype = np.float16
scale_dtype = np.float32
scale_shape = [2]
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x, mean, offset, var):
scale = tf.constant(scale_val, name='scale')
y = tf.raw_ops.FusedBatchNormV3(x=x, scale=scale, offset=offset, mean=mean, variance=var,
is_training=False, name=_TFOUTPUT)
return y
self._run_test_case(func, [_OUTPUT],
{_INPUT: x_val, _INPUT1: mean_val, _INPUT2: offset_val, _INPUT3: var_val})
@check_opset_min_version(7, "batchnorm")
@check_tf_min_version("1.13")
def test_batchnorm(self):
x_shape = [1, 128, 128, 2]
x_dtype = np.float32
scale_dtype = np.float32
scale_shape = [2]
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func(x, mean, offset, var):
scale = tf.constant(scale_val, name='scale')
epsilon = 0.001
y = tf.nn.batch_normalization(x, mean, var, offset, scale, epsilon)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: mean_val, _INPUT2: offset_val, _INPUT3: var_val})
@check_opset_min_version(7, "batchnorm")
def test_conv2d_batchnorm_fusion(self):
x_shape = [1, 28, 28, 2]
x_val = np.random.random_sample(x_shape).astype(np.float32)
w = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
# 2 channels for input and output
w = np.concatenate([w, w, w, w]).reshape([3, 3, 2, 2])
scale_dtype = np.float32
scale_shape = x_shape[-1:]
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
def func_conv2d(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv2d(x, kernel, strides=[1, 1, 1, 1], padding='VALID')
return conv
def func_fusedbn(x):
scale = tf.constant(scale_val, name='scale')
offset = tf.constant(offset_val, name='offset')
mean = tf.constant(mean_val, name='mean')
var = tf.constant(var_val, name='variance')
epsilon = 0.1234
y, _, _ = fused_batch_norm(
func_conv2d(x), scale, offset, mean=mean, variance=var,
epsilon=epsilon, data_format='NHWC', is_training=False)
return tf.identity(y, name=_TFOUTPUT)
def graph_validator(g):
if 'BatchNormalization' in [n.type for n in g.get_nodes()]:
return False
return True
self._run_test_case(func_fusedbn, [_OUTPUT], {_INPUT: x_val}, rtol=1e-05, graph_validator=graph_validator)
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_unsigned_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, 1.0, 6.0, signed_input=False, range_given=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_optimizer(self):
x_shape = [3, 3, 2]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, 1.0, 6.0, signed_input=False, range_given=True)
x_ = tf.transpose(x_, [1, 2, 0])
x_ = tf.reshape(x_, tf.constant([9, 2]))
x_ = quantize_and_dequantize(x_, 1.0, 6.0, signed_input=False, range_given=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: check_op_count(g, "DequantizeLinear", 1, disabled=False))
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_optimizer_split_concat(self):
x_shape = [7, 3, 5]
y_shape = [7, 2, 5]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape)
y_val = np.arange(1, 1+np.prod(y_shape)).astype("float32").reshape(y_shape)
def func(x, y):
x_ = quantize_and_dequantize(x, 1.0, 30.0, signed_input=False, range_given=True)
a, _, c = tf.unstack(x_, axis=1)
ac = tf.stack([a, c], axis=1)
y_ = quantize_and_dequantize(y, 1.0, 30.0, signed_input=False, range_given=True)
m = tf.matmul(ac, tf.transpose(y_, [0, 2, 1]))
m_ = m[2:, :, :]
m_ = quantize_and_dequantize(m_, 1.0, 30.0, signed_input=False, range_given=True)
return tf.identity(m_, name=_TFOUTPUT)
def validate_graph(g):
# MatMul should be wrapped in Dq/Q
for n in g.get_nodes():
if n.type == "MatMul":
if not all(inp.type == "DequantizeLinear" for inp in n.inputs):
return False
if not all(c.type == "QuantizeLinear" for c in g.find_output_consumers(n.output[0])):
return False
return True
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, graph_validator=validate_graph)
@check_tf_min_version("1.15")
@check_opset_min_version(11, "ScatterND")
@skip_tflite("TFLite uses a pattern for ScatterND so number of DequantizeLinear won't match")
def test_qdq_optimizer_scatter(self):
x_val = np.array([10, 20, 30, 40], dtype=np.float32).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array([8, 11], dtype=np.float32).reshape((2))
def func(x, y, z):
x_ = quantize_and_dequantize(x, 1.0, 30.0, signed_input=False, range_given=True)
z_ = quantize_and_dequantize(z, 1.0, 30.0, signed_input=False, range_given=True)
w = tf.tensor_scatter_nd_update(x_, y, z_)
w_ = quantize_and_dequantize(w, 1.0, 30.0, signed_input=False, range_given=True)
return tf.identity(w_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val},
graph_validator=lambda g: check_op_count(g, "DequantizeLinear", 1, disabled=False))
def func(x, y, z):
x_ = quantize_and_dequantize(x, 1.0, 30.0, signed_input=False, range_given=True)
w = tf.tensor_scatter_nd_update(x_, y, z)
w_ = quantize_and_dequantize(w, 1.0, 30.0, signed_input=False, range_given=True)
return tf.identity(w_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_dyn_range_unsigned_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(1, 1+np.prod(x_shape)).astype("float32").reshape(x_shape) + 0.1
def func(x):
x_ = quantize_and_dequantize(x, 1.0, 6.0, signed_input=False, range_given=False)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converter mistranslates quantize op")
@check_tf_min_version("1.15")
@check_opset_min_version(10, "quantize_and_dequantize")
def test_qdq_signed_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(-np.prod(x_shape)/2, np.prod(x_shape)/2).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, -6.0, 6.0, signed_input=True, narrow_range=False, range_given=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converter crashes")
@check_tf_min_version("2.0")
@check_opset_min_version(13, "quantize_and_dequantize")
def test_qdq_per_channel_signed_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(-np.prod(x_shape)/2, np.prod(x_shape)/2).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, np.array([-1.72, -3.89]).astype(np.float32), \
np.array([5.12, 2.36]).astype(np.float32), \
signed_input=True, narrow_range=False, \
range_given=True, axis=-1)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite converter crashes")
@check_tf_min_version("2.0")
@check_opset_min_version(13, "quantize_and_dequantize")
def test_qdq_dyn_range_per_channel_signed_input(self):
x_shape = [3, 3, 2]
x_val = np.arange(-np.prod(x_shape)/2, np.prod(x_shape)/2).astype("float32").reshape(x_shape)
def func(x):
x_ = quantize_and_dequantize(x, np.array([-1.72, -3.89]).astype(np.float32), \
np.array([5.12, 2.36]).astype(np.float32), \
signed_input=True, narrow_range=False, \
range_given=False, axis=-1)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_opset_min_version(7, "resize_nearest_neighbor")
def test_resize_nearest_neighbor(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "resize_nearest_neighbor")
def test_resize_nearest_neighbor_with_non_const(self):
x_shape = [3, 10, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape), dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_nearest_neighbor(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
@skip_caffe2_backend()
@check_opset_min_version(7, "resize_bilinear")
def test_resize_bilinear(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_bilinear(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_caffe2_backend()
@check_tf_min_version("1.14")
@check_opset_min_version(11, "coordinate_transformation_mode attr")
def test_resize_bilinear_half_pixel_centers(self):
x_shape = [1, 15, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_bilinear(x, x_new_size_, half_pixel_centers=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "resize_bilinear")
def test_resize_bilinear_with_non_const(self):
x_shape = [3, 10, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape), dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_bilinear(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
@check_opset_min_version(10, "resize scale can less than 1")
def test_resize_bilinear_with_non_const2(self):
# scales has an element larger than 1 and also has an element less that 1
x_shape = [3, 100, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape), dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_bilinear(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "resize_bilinear_v2")
def test_resize_bilinear_v2_with_non_const(self):
x_shape = [3, 10, 8, 5]
x_val = np.arange(1, 1 + np.prod(x_shape), dtype=np.float32).reshape(x_shape)
x_new_size = np.array([20, 16]).astype(np.int32)
def func(x, x_new_size_):
x_ = resize_bilinear_v2(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_new_size})
def test_adjust_contrast(self):
x_shape = [4, 3, 2]
x_val = np.arange(1, 1 + np.prod(x_shape), dtype=np.float32).reshape(x_shape)
y_val = np.array(2.1, np.float32)
def func(x, y):
x_ = tf.image.adjust_contrast(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "GatherElements")
def test_adjust_saturation(self):
x_val = np.array([[1, 2, 3], [4, 4, 4], [3, 2, 3], [3, 2, 2]], dtype=np.float32).reshape([2, 2, 3])
y_val = np.array(2.1, np.float32)
def func(x, y):
x_ = tf.image.adjust_saturation(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
y_val = np.array(0.5, np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "GatherND")
def test_adjust_hue(self):
x_val = np.array([[1, 2, 3], [4, 4, 4], [10, 2, 1], [10, 1, 2],
[4, 6, 5], [5, 6, 4], [1, 3, 2], [3, 5, 3]], dtype=np.float32).reshape([2, 4, 3])
def func(x, y):
x_ = tf.image.adjust_hue(x, y)
return tf.identity(x_, name=_TFOUTPUT)
for i in range(-10, 10, 2):
y_val = np.array(i / 10, np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val}, rtol=1e-6, atol=2e-5)
@check_tf_min_version("2.0", "Results are slightly different in tf1")
@check_opset_min_version(11, "resize bicubic")
def test_resize_bicubic(self):
x_shape = [1, 15, 20, 2]
new_size_val = np.array([30, 40], dtype=np.int32)
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x, new_size):
y = tf.image.resize(x, new_size, method=tf.image.ResizeMethod.BICUBIC)
return tf.identity(y, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: new_size_val}, rtol=1e-6, atol=1e-5)
@check_opset_min_version(10, "resize scale can less than 1")
def test_resize_nearest_neighbor2(self):
x_shape = [1, 300, 20, 2]
x_new_size = [30, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "coordinate_transformation_mode attr")
def test_resize_nearest_neighbor_half_pixel_centers(self):
x_shape = [1, 10, 20, 2]
x_new_size = [20, 40]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x):
x_new_size_ = tf.constant(x_new_size)
x_ = resize_nearest_neighbor(x, x_new_size_, half_pixel_centers=True)
return tf.identity(x_, name=_TFOUTPUT)
_ = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "fill")
def test_fill_float32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9.0)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "fill")
def test_fill_int32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("int32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "fill")
def test_fill7_float32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("float32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9.0)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "fill")
def test_fill7_int32(self):
x_shape = [1, 15, 20, 2]
x_val = np.arange(1, 1 + np.prod(x_shape)).astype("int32").reshape(x_shape)
def func(x0):
x1 = tf.fill(x_val.shape, 9)
x2 = tf.add(x0, x1)
return tf.identity(x2, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "div")
def test_tf_div(self):
# pylint: disable=E0001,C0415
from tensorflow.python.ops.gen_math_ops import div
shape = 1000
# test floating data
x_val = (np.random.sample(shape) + 1e-6).astype(np.float32)
y_val = (np.random.sample(shape) + 1e-6).astype(np.float32)
def func(x, y):
output = div(x, y, name=_TFOUTPUT)
# assert output.op.type == "Div"
return output
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
# test integer data
x_val = (100 * np.random.sample(shape) + 1).astype(np.int32)
y_val = (100 * np.random.sample(shape) + 1).astype(np.int32)
def func(x, y):
output = div(x, y, name=_TFOUTPUT)
# assert output.op.type == "Div"
return output
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(7, "erf")
def test_erf(self):
x_shape = [2, 2]
x_val0 = np.random.random(np.prod(x_shape)).astype(np.float32).reshape(x_shape)
x_val1 = np.array([[-1, -0.5], [1, 0.5]]).astype(np.float32)
for x_val in [x_val0, x_val1]:
def func(x):
x_ = tf.math.erf(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=0.01)
@check_opset_min_version(8, "Scan")
@skip_opset(9, "ReverseSequence")
def test_reverse_sequence_batch_major(self):
x_val = np.array([[[1, 2, 3], [4, 5, 6], [0, 0, 0]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[1, 2, 3], [0, 0, 0], [0, 0, 0]]],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=1, batch_axis=0, seq_lengths=[2, 3, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3],
[4, 5, 6], [4, 5, 6], [1, 1, 1],
[0, 0, 0], [7, 8, 9], [0, 0, 0]
],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=1, batch_axis=0, seq_lengths=[3] * 9)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [5, 5, 7, 8, 9]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=1, batch_axis=0, seq_lengths=[5, 5, 5, 5, 5])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(8, "Scan")
@skip_opset(9, "ReverseSequence")
def test_reverse_sequence_time_major(self):
x_val = np.array([[[1, 2, 3], [1, 2, 3], [1, 2, 3]],
[[4, 5, 6], [4, 5, 6], [0, 0, 0]],
[[0, 0, 0], [7, 8, 9], [0, 0, 0]]],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=0, batch_axis=1, seq_lengths=[2, 3, 1])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3],
[4, 5, 6], [4, 5, 6], [1, 1, 1],
[0, 0, 0], [7, 8, 9], [0, 0, 0]],
dtype=np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=0, batch_axis=1, seq_lengths=[9, 9, 9])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [5, 5, 7, 8, 9]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = tf.reverse_sequence(x, seq_axis=0, batch_axis=1, seq_lengths=[5, 5, 5, 5, 5])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite interpreter crashes on empty axis")
@check_opset_min_version(10, "ReverseSequence")
def test_reversev2_constant_axis(self):
# Tests for constant axis.
x_val_shape = [1, 2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[3])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
# Empty axis vector.
x_val_shape = [2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite reverse_v2 does not support multiple axes")
@check_opset_min_version(10, "ReverseSequence")
def test_reversev2_vector_axis(self):
x_val_shape = [1, 2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[0, -3, 2, 3])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [2, 3, 4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[-3, 1, 2])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val_shape = [5, 5, 9, 7, 8, 9]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[0, 1, -2, 3, 5])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("tflite interpreter crashes on empty axis")
@check_opset_min_version(10, "ReverseSequence")
def test_reversev2_1D_tensor(self):
# For tensors with 1 dimension and no axis to reverse.
# Adds an identity block.
x_val_shape = [4]
x_val = np.random.randint(0, 100, x_val_shape).astype(np.float32)
def func(x):
x_ = reverse_v2(x, axis=[0])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def func(x):
x_ = reverse_v2(x, axis=[])
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.float32)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.float32)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array(1, dtype=np.float32)
true_result = np.array(100, dtype=np.float32)
false_result = np.array(-111, dtype=np.float32)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "IsNaN")
def test_where_isnan(self):
x_val = np.array([1, 2, -3, float('nan'), -5, -6, float('nan'), 8, 9, 0], dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.float32)
def func(x):
picks = tf.where(is_nan(x), true_result, x)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "Where for strings needs opset 9")
@skip_tfjs("Technically tf where doesn't support strings and tfjs doesn't like it")
def test_where_string(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.str)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.str)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_bool(self):
x_val = np.array([1, 2, -3, 4, -5], dtype=np.float32)
true_result = np.array([True, False, True, False, True],
dtype=np.bool)
false_result = np.array([False, True, False, True, True],
dtype=np.bool)
def func(x):
picks = tf.where(x > -1, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
#@check_target("rs6", "onnxruntime Where type limitation")
def test_where_int32(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.int32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.int32)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.int32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
@check_tf_max_version("1.15", "issues in tf-2.0, fix later")
def test_where_with_two_rank_input(self):
x_val = np.array([1, 2, -3, 4, -5, -6, -7, 8, 9, 0], dtype=np.float32)
true_result = np.array([[111, 111], [222, 222], [333, 333], [444, 444], [555, 555],
[666, 666], [777, 777], [888, 888], [999, 999], [1000, 1000]],
dtype=np.float32)
false_result = np.array([[-111, -111], [-222, -222], [-333, -333], [-444, -444], [-555, -555],
[-666, -666], [-777, -777], [-888, -888], [-999, -999], [-1000, -1000]],
dtype=np.float32)
def func(x):
cond = tf.greater_equal(x, 0)
picks = tf.where(cond, true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_with_two_rank_condition(self):
x_val = np.array([[1, 2, -3, 4, -5, -6, -7, 8, 9, 0]], dtype=np.float32)
true_result = np.array([[111, 222, 333, 444, 555, 666, 777, 888, 999, 1000]],
dtype=np.float32)
false_result = np.array([[-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000]],
dtype=np.float32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_with_three_rank_condition(self):
x_val = np.array([[[1, 2, -3, 4, -5, -6, -7, 8, 9, 0]]], dtype=np.float32)
true_result = np.array([[[111, 222, 333, 444, 555, 666, 777, 888, 999, 1000]]],
dtype=np.float32)
false_result = np.array([[[-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000]]],
dtype=np.float32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(7, "GreaterEqual")
def test_where_scalar(self):
x_val = np.array(6, dtype=np.float32)
true_result = np.array([111, 222, 333, 444, 555, 666, 777, 888, 999, 1000],
dtype=np.float32)
false_result = np.array([-111, -222, -333, -444, -555, -666, -777, -888, -999, -1000],
dtype=np.float32)
def func(x):
picks = tf.where(tf.greater_equal(x, 0), true_result, false_result)
return tf.identity(picks, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(9, "NonZero")
#@check_target("rs6", "onnxruntime Transpose type limitation")
def test_where_with_cond_only(self):
for np_type in [np.int32, np.float32]:
x_val = np.random.randint(0, 2, size=[10, 20, 30]).astype(np_type)
def func(x):
# FIXME: was tf_placeholder(tf_type, shape=[None] * x_val.ndim, name=_TFINPUT)
res = tf.where(x)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14", "tf.strings.lower")
@check_opset_min_version(10, "StringNormalizer")
def test_string_lower(self):
text_val1 = np.array([["a", "Test 1 2 3", "♠♣"], ["Hi there", "test test", "♥♦"]], dtype=np.str)
def func(text1):
x = tf.strings.lower(text1)
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: text_val1})
@check_tf_min_version("1.14", "tf.strings.lower")
@check_opset_min_version(10, "StringNormalizer")
def test_string_lower_flat(self):
text_val1 = np.array(["a", "Test 1 2 3", "♠♣", "Hi there", "test test", "♥♦"], dtype=np.str)
def func(text1):
x = tf.strings.lower(text1)
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: text_val1})
@check_tf_min_version("1.14", "tf.strings.lower")
@check_opset_min_version(10, "StringNormalizer")
def test_string_upper(self):
text_val1 = np.array([["a", "Test 1 2 3", "♠♣"], ["Hi there", "test test", "♥♦"]], dtype=np.str)
def func(text1):
x = tf.strings.upper(text1)
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: text_val1})
@check_opset_min_version(6, "cast")
def test_shape_int32(self):
x_val = np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]], dtype=np.float32)
def func(x):
x_ = tf.multiply(x, x)
x_ = tf.shape(x_, out_type=tf.int32)
return tf.identity(x_, name=_TFOUTPUT)
kwargs = {"check_dtype": True}
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, **kwargs)
@unittest.skipIf(get_test_config().is_onnxruntime_backend and get_test_config().opset < 7,
"mul-1, mul-6 not supported in onnxruntime. conversion is covered since opset6")
def test_shape_int64(self):
x_val = np.array([[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]], dtype=np.float32)
def func(x):
x_ = tf.multiply(x, x)
x_ = tf.shape(x_, out_type=tf.int64)
return tf.identity(x_, name=_TFOUTPUT)
kwargs = {"check_dtype": True}
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, **kwargs)
# @check_opset_min_version(7, "broadcasting op")
@unittest.skip("disable it for now, since fold const has bug")
def test_softmax_cross_entropy_with_logits(self):
num_class = 5
data_shape = [100, num_class]
for np_dtype in [np.int32, np.int64]:
label_val = np.random.randint(0, num_class - 1, data_shape).astype(np_dtype)
logits_val = np.random.random(data_shape).astype(np.float32)
def func(label, logits):
res1 = tf.nn.softmax_cross_entropy_with_logits_v2(labels=label, logits=logits)
return tf.identity(res1, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: label_val, _INPUT1: logits_val}, atol=1e-5)
@check_opset_min_version(9, "sparse_softmax_cross_entropy_with_logits")
def test_sparse_softmax_cross_entropy_with_logits(self):
# FIXME: fails for opset 8 on onnxruntime-1.0, disable for now
num_class = 5
label_val = np.array([3, 2, 0, 4]).astype(np.int32)
logits_val = np.random.random((len(label_val), num_class)).astype(np.float32)
def func(label, logits):
res1 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits)
return tf.identity(res1, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: label_val, _INPUT1: logits_val})
@check_target('rs6', 'SparseSoftmaxCrossEntropyWithLogits')
def test_sparse_softmax_cross_entropy_with_logits_large_class(self):
num_class = 30000
label_val = np.array([3374, 2127, 10002, 48]).astype(np.int32)
logits_val = np.random.random((len(label_val), num_class)).astype(np.float32)
def func(label, logits):
res = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logits)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: label_val, _INPUT1: logits_val}, rtol=1e-6)
def test_matrix_band_part(self):
input_val = np.random.randint(0, 666, (10, 15)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, -1, 0)
res1 = tf.linalg.band_part(input_x, 0, -1)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
def test_matrix_band_part_2(self):
input_val = np.random.randint(0, 666, (1, 1)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, -1, 0)
res1 = tf.linalg.band_part(input_x, 0, -1)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "CumSum")
def test_matrix_band_part_3(self):
for low, high in [(-1, 3), (2, 3), (4, 3), (0, -1), (0, 0), (-1, -1)]:
input_val = np.random.randint(0, 666, (10, 15)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, low, high)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(11, "CumSum")
def test_matrix_band_part_4(self):
for low, high in [(-1, 3), (2, 3), (4, 3), (0, -1), (0, 0)]:
input_val = np.random.randint(0, 666, (2, 3, 10, 15)).astype(np.int32)
def func(input_x):
res = tf.linalg.band_part(input_x, low, high)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(11, "CumSum")
def test_matrix_band_part_5(self):
for low_val, high_val in [(2, 3), (4, 3), (0, 0), (2, 0)]:
low_val = np.array(low_val, np.int32)
high_val = np.array(high_val, np.int32)
input_val = np.random.randint(0, 666, (2, 3, 10, 15)).astype(np.int32)
def func(input_x, low, high):
res = tf.linalg.band_part(input_x, low, high)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val, _INPUT1: low_val, _INPUT2: high_val})
def test_floordiv(self):
input_val_1 = np.random.random_sample(100).astype(np.int32)
input_val_2 = (np.random.random_sample(100) + 1).astype(np.int32)
def func(input_1, input_2):
res = tf.math.floordiv(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
input_val_1 = np.random.random_sample(100).astype(np.float32)
input_val_2 = (np.random.random_sample(100) + 1).astype(np.float32)
def func(input_1, input_2):
res = tf.math.floordiv(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
# test broadcasting
input_val_1 = np.random.random_sample((10, 50)).astype(np.float32)
input_val_2 = (np.random.random_sample(50) + 1).astype(np.float32)
def func(input_1, input_2):
res = tf.math.floordiv(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
def test_floormod(self):
input_val_1 = 100 * np.random.random_sample(100).astype(np.int32)
input_val_2 = (100 * np.random.random_sample(100) + 1).astype(np.int32)
def func(input_1, input_2):
res = floormod(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2})
input_val_1 = 100 * np.random.random_sample(100).astype(np.float32)
input_val_2 = (100 * np.random.random_sample(100) + 1).astype(np.float32)
def func(input_1, input_2):
res = floormod(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2}, rtol=1e-5)
# test broadcasting case
input_val_1 = (50 * np.random.random_sample((10, 50)) + 1).astype(np.float32)
input_val_2 = (50 * np.random.random_sample(50) + 1).astype(np.float32)
def func(input_1, input_2):
res = floormod(input_1, input_2)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val_1, _INPUT1: input_val_2}, rtol=1e-4)
def test_logical_not(self):
input_val = np.random.randint(0, 2, (10, 20)).astype(np.bool)
def func(x):
res = tf.logical_not(x)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
def test_reduce_all(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_all(input_tensor=x, keepdims=False)
res1 = tf.reduce_all(input_tensor=x, axis=[0], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(input_x):
res = tf.reduce_all(input_tensor=input_x, keepdims=True)
res1 = tf.reduce_all(input_tensor=input_x, axis=[0], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
def test_reduce_any(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=False)
res1 = tf.reduce_any(input_tensor=x, axis=[0], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=True)
res1 = tf.reduce_any(input_tensor=x, axis=[0], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "ReduceMin")
def test_reduce_all_negative_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_all(input_tensor=x, keepdims=False)
res1 = tf.reduce_all(input_tensor=x, axis=[-1], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(input_x):
res = tf.reduce_all(input_tensor=input_x, keepdims=True)
res1 = tf.reduce_all(input_tensor=input_x, axis=[-1], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "ReduceSum")
def test_reduce_any_negative_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=False)
res1 = tf.reduce_any(input_tensor=x, axis=[-1], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=True)
res1 = tf.reduce_any(input_tensor=x, axis=[-1], keepdims=True)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(11, "ReduceSum")
@check_tf_min_version("1.15")
def test_reduce_any_empty_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_any(input_tensor=x, keepdims=False)
res1 = tf.reduce_any(input_tensor=x, axis=[], keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
def test_reduce_all_scalar_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
def func(x):
res = tf.reduce_all(input_tensor=x, keepdims=False)
res1 = tf.reduce_all(input_tensor=x, axis=0, keepdims=False)
return tf.identity(res, name=_TFOUTPUT), tf.identity(res1, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: input_val})
@check_opset_min_version(13, "ReduceSum")
@check_tf_min_version("1.15")
def test_reduce_any_nonconst_axis(self):
input_val = np.random.randint(0, 2, (2, 20)).astype(np.bool)
y_val = np.array([1], np.int32)
def func(x, y):
res = tf.reduce_any(input_tensor=x, axis=y, keepdims=False)
return tf.identity(res, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val, _INPUT1: y_val})
@check_opset_min_version(7, "fill")
def test_zeros_like(self):
input_x = np.random.random_sample([10, 20]).astype(np.float32)
input_y = np.array([20, 10]).astype(np.int64)
def func(x, y):
z = tf.reshape(x, y)
return tf.zeros_like(z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x, _INPUT1: input_y})
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x.astype(np.int32), _INPUT1: input_y})
@check_opset_min_version(8, "BroadcastTo")
def test_zeros_like_bool(self):
input_x = np.random.random_sample([10, 20]).astype(np.float32)
input_y = np.array([20, 10]).astype(np.int64)
def func(x, y):
z = tf.reshape(x, y)
return tf.zeros_like(z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x > 0.5, _INPUT1: input_y})
@check_opset_min_version(9, "is_nan")
def test_isnan(self):
# only compatible with dtype `float32`
x_val1 = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
x_val2 = np.array([np.nan, np.nan, np.nan, np.nan], dtype=np.float32).reshape((2, 2))
x_val3 = np.array([1.0, np.nan, -3.0, np.nan], dtype=np.float32).reshape((2, 2))
for x_val in [x_val1, x_val2, x_val3]:
def func(x):
x_ = is_nan(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_ceil(self):
x_val = np.array([-1.5, 1.2], dtype=np.float32)
def func(x):
x_ = tf.math.ceil(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_softplus(self):
x_val = np.array([-1, 0, 1], dtype=np.float32)
def func(x):
x_ = tf.math.softplus(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_softsign(self):
x_val = np.array([-1, 0, 1], dtype=np.float32)
def func(x):
x_ = tf.math.softsign(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_batch_to_spacend(self):
block_size = [2, 2]
crop = [[1, 0], [2, 1]]
input_val = np.random.random_sample([40, 3, 5, 100]).astype(np.float32)
def func(x):
return batch_to_space_nd(x, block_size, crop, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(11, "BatchToSpaceND")
@unittest.skip("this was recently removed - but don't we want this to work ?")
def test_batch_to_spacend_non_const(self):
def func(input_x, block_shape, crops):
return batch_to_space_nd(input_x, block_shape, crops, name=_TFOUTPUT)
input_x_val = np.random.random_sample([40, 3, 5, 100]).astype(np.float32) # NHWC
block_shape_val = np.array([2, 2]).astype(np.int64)
crops_val = np.array([[1, 0], [2, 1]]).astype(np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x_val, _INPUT1: block_shape_val, _INPUT2: crops_val})
@check_opset_min_version(11, "SpaceToBatchND")
@unittest.skip("this was recently removed - but don't we want this to work ?")
def test_space_to_batchnd_non_const(self):
input_x_val = np.random.random_sample([40, 5, 7, 66]).astype(np.float32) # NHWC
def func(input_x, block_size, pad):
return batch_to_space_nd(input_x, block_size, pad, name=_TFOUTPUT)
block_size_val = np.array([2, 2]).astype(np.int64)
pad_val = np.array([[0, 1], [2, 1]]).astype(np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x_val, _INPUT1: block_size_val, _INPUT2: pad_val})
@check_opset_min_version(11, "BatchToSpaceND")
def test_batch_to_spacend_non_const_7d(self):
x_type, y_type, z_type = np.float32, np.int64, np.int64
# test 3D upto 7D input tensors
for x_shape in [[12, 4, 4], [12, 4, 8, 3], [12, 4, 8, 3, 2], [12, 4, 8, 3, 2, 3], [12, 4, 8, 3, 2, 1, 3]]:
# test 1D upto 2D block shapes
for block_shape in [[2, 3], [2, 2], [2]]:
# crop 1 layer at end of each dim
# x and z can be dynamic.
# y = block_shape cannot be dynamic without change to Transpose op spec
crops = [[0, 1] for dim in block_shape]
y_val = np.array(block_shape).astype(y_type)
x_val = np.array([x + 1 for x in range(0, np.prod(x_shape))], dtype=x_type).reshape(x_shape)
z_val = np.array(crops).astype(z_type)
def func(x, z):
y = tf.constant(dtype=y_type, value=y_val, shape=y_val.shape, name=_TFINPUT1)
return batch_to_space_nd(x, y, z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT2: z_val})
def test_depthwise_dilations_pattern(self):
x_val = np.random.random_sample([1, 33, 34, 960]).astype(np.float32)
kernel = np.random.random_sample([3, 3, 960, 1]).astype(np.float32)
block_size = np.array([3, 3], np.int64)
pad = np.array([[2, 4], [5, 3]])
crop = np.array([[0, 0], [0, 0]])
def func(x):
y = space_to_batch_nd(x, block_size, pad)
z = tf.nn.depthwise_conv2d(y, kernel, strides=[1, 1, 1, 1], padding='VALID')
return batch_to_space_nd(z, block_size, crop, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "SpaceToBatchND")
def test_space_to_batchnd_non_const_7d(self):
x_type, y_type, z_type = np.float32, np.int64, np.int64
# test 3D upto 7D input tensors
for x_shape in [[2, 4, 4], [1, 4, 8, 3], [1, 4, 8, 3, 2], [1, 4, 8, 3, 2, 3], [1, 4, 8, 3, 2, 1, 3]]:
# test 1D upto 2D block shapes
for block_shape in [[2], [2, 2]]:
# pad 1 layer at begin and end of each dim
pads = [[1, 1] for dim in block_shape]
y_val = np.array(block_shape).astype(y_type)
x_val = np.array([x + 1 for x in range(0, np.prod(x_shape))], dtype=x_type).reshape(x_shape)
z_val = np.array(pads).astype(z_type)
# x and z can be dynamic.
# y = block_shape cannot be dynamic without change to Transpose op spec
def func(x, z):
y = tf.constant(dtype=y_type, value=y_val, shape=y_val.shape, name=_TFINPUT1)
return space_to_batch_nd(x, y, z, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT2: z_val})
@check_opset_min_version(10, "CropAndResize")
def test_crop_and_resize(self):
boxes_val = [[0.5, 0.7, 0.7, 0.9], [0.2, 0.4, 0.4, 0.6]]
def func(input_x, box_ind):
boxes = tf.constant(boxes_val, dtype=tf.float32)
corp_size = tf.constant(np.array([20, 20]).astype(np.int32))
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, method='bilinear')
input_x_val = np.random.randint(low=0, high=256, size=[2, 36, 36, 3]).astype(np.float32) # NHWC
box_ind_val = np.array([1, 0]).astype(np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_x_val, _INPUT2: box_ind_val},
rtol=1e-04, atol=1e-03)
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_linear(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, method='bilinear')
input_x_val = np.random.randint(low=0, high=256, size=[2, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([[0.5, 0.7, 0.7, 0.9], [0.2, 0.4, 0.4, 0.6]]).astype(np.float32)
box_ind_val = np.array([1, 0]).astype(np.int32)
corp_size_val = np.array([20, 20]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-05, atol=1e-04)
@check_tf_min_version("1.9")
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_nearest(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, method='nearest')
input_x_val = np.random.randint(low=0, high=256, size=[1, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([[0.2, 0.4, 0.6, 0.8]]).astype(np.float32)
box_ind_val = np.array([0]).astype(np.int32)
corp_size_val = np.array([30, 30]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-05, atol=1e-04)
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_extrapolation(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, extrapolation_value=1.0)
input_x_val = np.random.randint(low=0, high=256, size=[1, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([[0.2, 0.4, 1.2, 1.4]]).astype(np.float32)
box_ind_val = np.array([0]).astype(np.int32)
corp_size_val = np.array([40, 40]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-04, atol=1e-03)
@check_opset_min_version(11, "CropAndResize")
def test_crop_and_resize_empty_tensor(self):
def func(input_x, boxes, box_ind, corp_size):
return tf.image.crop_and_resize(input_x, boxes, box_ind, corp_size, name=_TFOUTPUT, extrapolation_value=1.0)
input_x_val = np.random.randint(low=0, high=256, size=[0, 36, 36, 3]).astype(np.float32) # NHWC
boxes_val = np.array([]).astype(np.float32).reshape([0, 4])
box_ind_val = np.array([]).astype(np.int32)
corp_size_val = np.array([40, 40]).astype(np.int32)
self._run_test_case(func, [_OUTPUT],
{_INPUT: input_x_val, _INPUT1: boxes_val, _INPUT2: box_ind_val, _INPUT3: corp_size_val},
rtol=1e-04, atol=1e-03)
def test_batch_to_space3d(self):
block_size = [2, 2]
crop = [[0, 1], [2, 1]]
input_val = np.random.random_sample([40, 3, 100]).astype(np.float32)
def func(x):
return batch_to_space_nd(x, block_size, crop, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
def test_space_to_batchnd(self):
block_size = [2, 2]
pad = [[0, 1], [2, 1]]
input_val = np.random.random_sample([40, 5, 7, 66]).astype(np.float32)
def func(x):
return space_to_batch_nd(x, block_size, pad, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
pad = [[0, 0], [1, 2]]
input_val = np.random.random_sample([10, 6, 7, 66]).astype(np.float32)
def func(x):
return space_to_batch_nd(x, block_size, pad, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(10, "is_inf")
def test_isinf(self):
x_types = [np.float32, np.float64]
for x_type in x_types:
x_val1 = np.array([1.0, -2.0, 3.0, -4.0], dtype=x_type)
x_val2 = np.array([np.inf, np.inf, np.inf, np.inf], dtype=x_type).reshape((2, 2))
x_val3 = np.array([1.0, np.inf, -3.0, np.inf, 5.0, np.inf, -7.0, np.inf], dtype=x_type).reshape((2, 2, 2))
for x_val in [x_val1, x_val2, x_val3]:
def func(x):
x_ = is_inf(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v2(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
res1 = tf.raw_ops.NonMaxSuppressionV2(boxes=boxes, scores=scores,
max_output_size=int(box_num / 2), iou_threshold=0.5)
res2 = tf.raw_ops.NonMaxSuppressionV2(boxes=boxes, scores=scores,
max_output_size=0, iou_threshold=0.5)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v3(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
res1 = tf.raw_ops.NonMaxSuppressionV3(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=int(box_num / 2), iou_threshold=0.5)
res2 = tf.raw_ops.NonMaxSuppressionV3(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=0, iou_threshold=0.5)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
@skip_tfjs("TFJS executes model incorrectly")
def test_non_max_suppression_v4(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func1(boxes, scores):
res1, res2 = tf.raw_ops.NonMaxSuppressionV4(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=int(box_num / 2), iou_threshold=0.5)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func1, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
def func2(boxes, scores):
res1, res2 = tf.raw_ops.NonMaxSuppressionV4(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=2 * box_num, iou_threshold=0.5,
pad_to_max_output_size=True)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func2, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v5(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func1(boxes, scores):
res1, res2, res3 = tf.raw_ops.NonMaxSuppressionV5(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=int(box_num / 2), iou_threshold=0.5,
soft_nms_sigma=0)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1), \
tf.identity(res3, name=_TFOUTPUT2)
self._run_test_case(func1, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: boxes_val, _INPUT1: scores_val})
def func2(boxes, scores):
res1, res2, res3 = tf.raw_ops.NonMaxSuppressionV5(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size=2 * box_num, iou_threshold=0.5,
soft_nms_sigma=0, pad_to_max_output_size=True)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1), \
tf.identity(res3, name=_TFOUTPUT2)
self._run_test_case(func2, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
res1 = tf.image.non_max_suppression(boxes, scores, max_output_size=int(box_num / 2))
res2 = tf.image.non_max_suppression(boxes, scores, max_output_size=0)
return tf.identity(res1, name=_TFOUTPUT), tf.identity(res2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_opset_min_version(10, "NonMaxSuppression")
@allow_missing_shapes("TF shape inference misses reshape to scalar")
@skip_tfjs("TFJS executes model incorrectly")
def test_non_max_suppression_v4_padded(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
ret1, ret2 = tf.image.non_max_suppression_padded(boxes, scores, max_output_size=int(box_num * 2),
pad_to_max_output_size=True)
return tf.identity(ret1, name=_TFOUTPUT), tf.identity(ret2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_opset_min_version(10, "NonMaxSuppression")
@allow_missing_shapes("TF shape inference misses reshape to scalar")
@skip_tfjs("TFJS executes model incorrectly")
def test_non_max_suppression_v4_no_padding(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
ret1, ret2 = tf.image.non_max_suppression_padded(boxes, scores, max_output_size=int(box_num),
pad_to_max_output_size=False)
return tf.identity(ret1, name=_TFOUTPUT), tf.identity(ret2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("1.15")
@check_opset_min_version(10, "NonMaxSuppression")
def test_non_max_suppression_v5(self):
box_num = 10
boxes_val = np.random.random_sample([box_num, 4]).astype(np.float32)
scores_val = np.random.random_sample([box_num]).astype(np.float32)
def func(boxes, scores):
ret1, ret2 = tf.image.non_max_suppression_with_scores(boxes, scores, max_output_size=int(box_num / 2),
soft_nms_sigma=0.0)
return tf.identity(ret1, name=_TFOUTPUT), tf.identity(ret2, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_combined_non_max_suppression_pad_and_clip(self):
batch_size = 8
box_num = 10
classes_num = 2
max_total_size = 9
boxes_val = np.random.random_sample([batch_size, box_num, 1, 4]).astype(np.float32) * 2 - 0.5
scores_val = np.random.random_sample([batch_size, box_num, classes_num]).astype(np.float32)
def func(boxes, scores):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = \
tf.image.combined_non_max_suppression(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size_per_class=3, max_total_size=max_total_size,
iou_threshold=0.5, pad_per_class=True, clip_boxes=True)
out1 = tf.identity(nmsed_boxes, name=_TFOUTPUT)
out2 = tf.identity(nmsed_scores, name=_TFOUTPUT1)
out3 = tf.identity(nmsed_classes, name=_TFOUTPUT2)
out4 = tf.identity(valid_detections, name=_TFOUTPUT3)
return out1, out2, out3, out4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: boxes_val, _INPUT1: scores_val})
@check_tf_min_version("2.3")
@check_opset_min_version(12, "GatherND with batch_dims")
def test_combined_non_max_suppression_no_pad_no_clip(self):
batch_size = 8
box_num = 10
classes_num = 2
max_total_size = 9
boxes_val = np.random.random_sample([batch_size, box_num, 1, 4]).astype(np.float32) * 2 - 0.5
scores_val = np.random.random_sample([batch_size, box_num, classes_num]).astype(np.float32)
def func(boxes, scores):
nmsed_boxes, nmsed_scores, nmsed_classes, valid_detections = \
tf.image.combined_non_max_suppression(boxes=boxes, scores=scores, score_threshold=0.1,
max_output_size_per_class=3, max_total_size=max_total_size,
iou_threshold=0.5, pad_per_class=False, clip_boxes=False)
out1 = tf.identity(nmsed_boxes, name=_TFOUTPUT)
out2 = tf.identity(nmsed_scores, name=_TFOUTPUT1)
out3 = tf.identity(nmsed_classes, name=_TFOUTPUT2)
out4 = tf.identity(valid_detections, name=_TFOUTPUT3)
return out1, out2, out3, out4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: boxes_val, _INPUT1: scores_val})
def _conv1d_test(self, x_val, w, stride=None, padding="VALID", rtol=1e-07):
if stride is None:
stride = 1
def func(x):
kernel = tf.constant(w, dtype=tf.float32, name='k')
conv = tf.nn.conv1d(x, kernel, stride=stride, padding=padding)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=rtol)
def test_conv1d_1(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w)
def test_conv1d_2(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w, stride=2)
def test_conv1d_3(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w, padding="SAME")
def test_conv1d_4(self):
x_val = make_xval((1, 7, 1))
w = np.array([2., 1., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w, rtol=1e-05)
def test_conv1d_5(self):
x_val = make_xval((1, 7, 1))
w = np.array([3., 3., 3.], dtype=np.float32).reshape(3, 1, 1)
self._conv1d_test(x_val, w)
@check_opset_min_version(10, "ThresholdedRelu")
def test_thresholded_relu(self):
# tf.keras.layers.ThresholdedReLU only supports `float32` for x
x_val = np.array([0.0, 1.0, -1.0, 2.0, -2.0, 0.5, -0.5, 1.5, -1.5], dtype=np.float32).reshape((3, 3))
theta_vals = [0.0, 0.5, 1.0, 2.0]
for theta_val in theta_vals:
def func(x):
t = tf.keras.layers.ThresholdedReLU(theta=theta_val)
x_ = t.call(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val},
graph_validator=lambda g: check_op_count(g, "ThresholdedRelu", 1))
@check_tf_min_version("1.13")
@check_opset_min_version(11, "MaxPoolWithArgmax")
def test_maxpoolwithargmax(self):
for p in get_maxpoolwithargmax_getdata():
_, padding, x_shape, ksize, strides = p
x_val = np.random.uniform(0, 10, x_shape)
def func(x):
mp = tf.nn.max_pool_with_argmax(x, ksize, strides, padding=padding)
return tf.identity(mp[0], name=_TFOUTPUT), tf.identity(mp[1], name=_TFOUTPUT1)
self.logger.debug(str(p))
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_tf_min_version("1.15")
@check_opset_min_version(11, "MaxPoolWithArgmax")
def test_maxpoolwithargmax_batch_in_index(self):
padding = 'SAME'
x_shape = [2, 10, 5, 3]
ksize = [1, 4, 4, 1]
strides = [1, 1, 1, 1]
x_val = np.random.uniform(0, 10, x_shape)
def func(x):
mp = tf.nn.max_pool_with_argmax(x, ksize, strides, padding=padding, include_batch_in_index=True)
return tf.identity(mp[0], name=_TFOUTPUT), tf.identity(mp[1], name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_tf_min_version("1.15")
@check_opset_min_version(11, "MaxPoolWithArgmax")
def test_maxpoolwithargmax_unknown_c(self):
padding = 'SAME'
x_shape = [2, 10, 5, 1]
ksize = [1, 4, 4, 1]
strides = [1, 1, 1, 1]
x_val = np.random.uniform(0, 10, x_shape)
s_val = np.array([2, 10, 5, 4], np.int64)
def func(x, s):
x = tf.broadcast_to(x, s)
mp = tf.nn.max_pool_with_argmax(x, ksize, strides, padding=padding, include_batch_in_index=True)
return tf.identity(mp[0], name=_TFOUTPUT), tf.identity(mp[1], name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val, _INPUT1: s_val})
@check_opset_min_version(10, "Selu")
def test_selu(self):
x_val = np.random.random_sample([3]).astype(np.float32)
def func(x):
y = tf.nn.selu(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(8, "ClipByValue (needs broadcast)")
def test_clip_by_value(self):
# float32, dynamic min/max
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 8])
x_minval = np.array(8.5, dtype=np.float32)
x_maxval = np.array(16.5, dtype=np.float32)
def func(x, x_min, x_max):
y = tf.clip_by_value(x, x_min, x_max)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: x_minval, _INPUT2: x_maxval})
# float32, const min/max
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 8])
def func(x):
y = tf.clip_by_value(x, 8.5, 16.5)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
# int32, converter needs to cast, const min/max
x_val = np.arange(0, 24, dtype=np.int32).reshape([3, 8])
def func(x):
y = tf.clip_by_value(x, 8, 16)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_softmax(self):
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 1, 8])
def func(x):
y = tf.nn.softmax(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_log_softmax(self):
x_val = np.arange(0, 24, dtype=np.float32).reshape([3, 1, 8])
def func(x):
y = tf.nn.log_softmax(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Range")
def test_ctc_greedy_decoder(self):
x_val = np.random.uniform(size=(3, 4, 5)).astype(np.float32)
s_val = np.array([3, 3, 2, 3], np.int32)
def func(x, s):
[decoded], logits = tf.nn.ctc_greedy_decoder(x, s, merge_repeated=False)
r1 = tf.identity(decoded.indices, name=_TFOUTPUT)
r2 = tf.identity(decoded.values, name=_TFOUTPUT1)
r3 = tf.identity(decoded.dense_shape, name=_TFOUTPUT2)
r4 = tf.identity(logits, name=_TFOUTPUT3)
return r1, r2, r3, r4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: x_val, _INPUT1: s_val})
@check_opset_min_version(11, "Range")
def test_ctc_greedy_decoder_merge_repeated(self):
x_val = np.random.uniform(size=(6, 4, 5)).astype(np.float32)
s_val = np.array([5, 6, 4, 6], np.int32)
def func(x, s):
[decoded], logits = tf.nn.ctc_greedy_decoder(x, s, merge_repeated=True)
r1 = tf.identity(decoded.indices, name=_TFOUTPUT)
r2 = tf.identity(decoded.values, name=_TFOUTPUT1)
r3 = tf.identity(decoded.dense_shape, name=_TFOUTPUT2)
r4 = tf.identity(logits, name=_TFOUTPUT3)
return r1, r2, r3, r4
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2, _OUTPUT3], {_INPUT: x_val, _INPUT1: s_val})
# test for gemm pattern0: alpha*A*B + beta*C
def test_gemm_pattern0(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
alpha = tf.constant(1.0, dtype=tf.float32)
beta = tf.constant(2.0, dtype=tf.float32)
mul1 = tf.multiply(alpha, tf.matmul(a, b))
mul2 = tf.multiply(beta, c)
x_ = mul1 + mul2
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern1: alpha*A*B + C
def test_gemm_pattern1(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
alpha = tf.constant(1.0, dtype=tf.float32)
x_ = tf.multiply(alpha, tf.matmul(a, b)) + c
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern2: A*B + beta*C
def test_gemm_pattern2(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
beta = tf.constant(2.0, dtype=tf.float32)
x_ = tf.matmul(a, b) + tf.multiply(beta, c)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern3: A*B + C
def test_gemm_pattern3(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number)
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(m, k).astype("float32")
def func(a, b, c):
x_ = tf.matmul(a, b) + c
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern4: A*B + C [addbias] - 1D bias!
def test_gemm_pattern4(self):
max_number = 10
m = np.random.randint(max_number)
n = np.random.randint(max_number)
k = np.random.randint(max_number) # bias add requires 1D tensor
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(k).astype("float32")
def func(a, b, c):
x_ = tf.nn.bias_add(tf.matmul(a, b), c)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=lambda g: check_op_count(g, "Gemm", 1))
# test for gemm pattern0: alpha*A*B + beta*C
@check_opset_min_version(12, "Optimizer bug in ORT 1.2")
def test_gemm_pattern0_fail_broadcast(self):
# shapes (3, 3) * (3, 1) + (1, 4) => (3, 1) + (1, 4)
# c not uni-broadcastable to a * b, so should not use GEMM
m, n, k = 3, 3, 1
x_val1 = np.random.rand(m, n).astype("float32")
x_val2 = np.random.rand(n, k).astype("float32")
x_val3 = np.random.rand(k, 4).astype("float32")
def func(a, b, c):
alpha = tf.constant(1.0, dtype=tf.float32)
beta = tf.constant(2.0, dtype=tf.float32)
mul1 = tf.multiply(alpha, tf.matmul(a, b))
mul2 = tf.multiply(beta, c)
x_ = mul1 + mul2
return tf.identity(x_, name=_TFOUTPUT)
def graph_validator(g):
if 'Gemm' in [n.type for n in g.get_nodes()]: return False
return True
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val1, _INPUT1: x_val2, _INPUT2: x_val3},
graph_validator=graph_validator)
def test_graph_matcher(self):
shape = [2, 6]
x_val = np.random.random(shape).astype(np.float32)
y_val = np.random.random(shape).astype(np.float32)
z_val = np.random.random(shape).astype(np.float32)
def func(x, y, z):
tmp1 = x + y
tmp2 = x - y
tmp3 = tf.multiply(tmp1, z)
tmp4 = tf.multiply(tmp2, z)
return tf.add(tmp4, tmp3, name=_TFOUTPUT)
onnx_graph = self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
pattern = \
OpTypePattern('Add', name='output', inputs=[
OpTypePattern('Mul', inputs=[
OpTypePattern('Add', name='input1'),
OpTypePattern('*', name='input2')]),
OpTypePattern('Mul', inputs=[
OpTypePattern('Sub', name='input1'),
OpTypePattern('*', name='input2')])])
matcher = GraphMatcher(pattern, allow_reorder=False)
match_results = list(matcher.match_ops(onnx_graph.get_nodes()))
self.assertTrue(len(match_results) == 0)
matcher = GraphMatcher(pattern, allow_reorder=True)
match_results = list(matcher.match_ops(onnx_graph.get_nodes()))
self.assertTrue(len(match_results) == 1)
def test_add2(self):
x_val = np.array([1.0, 2.0, -3.0, -4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.add(x, x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "CumSum")
def test_cumsum(self):
x_val = np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32).reshape((2, 2))
def func(x):
x_ = tf.cumsum(x, axis=1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "CumSum")
def test_cumsum_axis1_reverse_exclusive(self):
x_val = np.array([1., 2., 3., 4.,
5., 6., 7., 8.,
9., 10., 11., 12.,
13., 14., 15., 16.,
17., 18., 19., 20.,
21., 22., 23., 24.], dtype=np.float32).reshape((2, 3, 4))
def func(x):
x_ = tf.cumsum(x, axis=1, reverse=True)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Round")
def test_round(self):
x_val = np.array([-0.7, -0.5, -0.0, 0.0, +0.0, 0.3, 0.5, 0.7, float('nan')], dtype=np.float32)
def func(x):
x_ = tf.round(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
def test_round_approx(self):
# In lower opsets there is no Round, but we can approximate it forgoing nearest even
x_val = np.array([-0.7, -0.5, -0.0, 0.0, +0.0, 0.3, 1.5, 0.7, float('nan')], dtype=np.float32)
def func(x):
x_ = tf.round(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Round")
def test_rint(self):
x_val = np.array([-2.7, -1.5, -0.0, +0.0, 0.3, 0.5, 1.5, 2.5, 3.4, 3.5, float('nan')], dtype=np.float32)
def func(x):
x_ = tf.math.rint(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Det")
@unittest.skip("unclear how this is called in tf-2, fix later")
def test_determinant(self):
x_val = np.array([1., 2., 3., 4., 1., 2.,
2., 1., 1., 3., 3., 1.,
1., 2., 3., 4., 1., 2.,
2., 1., 1., 3., 3., 1.],
dtype=np.float32).reshape((1, 2, 3, 2, 2))
def func(x):
x_ = tf.matrix_determinant(x)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "BitShift")
def test_bitshift_left(self):
x_val = np.array([16, 4, 1], dtype=np.int32)
y_val = np.array([1, 2, 3], dtype=np.int32)
def func(x, y):
x_ = tf.bitwise.left_shift(x, y)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "BitShift")
def test_bitshift_right(self):
info = np.iinfo(np.int32)
x_val = np.array([-1, 0, 1, info.max, info.min], dtype=np.int32)
def func(x):
x_ = tf.bitwise.right_shift(x, 1)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14", "tensor_scatter_nd_update needs tf 1.14")
@check_opset_min_version(11, "ScatterND")
def test_tensor_scatter_update(self):
x_val = np.array([10, 20, 30, 40], dtype=np.int32).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array([8, 11], dtype=np.int32).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_tf_min_version("1.15", "tensor_scatter_nd_update for strings needs tf 1.15")
@check_opset_min_version(11, "ScatterND")
@skip_tflite("Conversion crashes")
def test_tensor_scatter_update_str(self):
x_val = np.array(['A', '♠♣♥♦', 'B', 'C'], dtype=np.str).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array(['☺', '11'], dtype=np.str).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_tf_min_version("1.15", "tensor_scatter_nd_update for strings needs tf 1.15")
@check_opset_min_version(11, "ScatterND")
@skip_tflite("Conversion crashes")
def test_tensor_scatter_update_str_const(self):
x_val = np.array(['A', '♠♣♥♦', 'B', 'C'], dtype=np.str).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array(['☺', '11'], dtype=np.str).reshape((2))
def func(x, y):
z = tf.constant(z_val)
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_tf_min_version("1.14", "tensor_scatter_nd_update needs tf 1.14")
@check_opset_min_version(11, "ScatterND")
def test_tensor_scatter_update_cast_indices(self):
x_val = np.array([10, 20, 30, 40], dtype=np.int32).reshape((4))
y_val = np.array([0, 2], dtype=np.int32).reshape((2, 1))
z_val = np.array([8, 11], dtype=np.int32).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_update(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(16, "ScatterND")
def test_scatternd_add(self):
x_val = np.array([10, 20, 30, 40], dtype=np.int32).reshape((4))
y_val = np.array([0, 2], dtype=np.int64).reshape((2, 1))
z_val = np.array([20, 30], dtype=np.int32).reshape((2))
def func(x, y, z):
x_ = tf.tensor_scatter_nd_add(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(11, "ScatterND")
def test_scatternd_1d(self):
x_val = np.array([4, 3, 1, 7], dtype=np.int32).reshape((4, 1))
y_val = np.array([9, 10, 11, 12], dtype=np.int64).reshape((4))
z_val = np.array([8], dtype=np.int32).reshape(1)
def func(x, y, z):
x_ = tf.scatter_nd(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(11, "ScatterND")
def test_scatternd_3d(self):
x_val = np.array([0, 2], dtype=np.int32).reshape((2, 1))
y_val = np.array([[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6],
[7, 7, 7, 7], [8, 8, 8, 8]]], dtype=np.float32).reshape((2, 4, 4))
z_val = np.array([4, 4, 4], dtype=np.int32).reshape(3)
def func(x, y, z):
x_ = tf.scatter_nd(x, y, z)
return tf.identity(x_, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val, _INPUT2: z_val})
@check_opset_min_version(11, "Unique")
def test_unique(self):
x_val = np.array([1, 2, 8, 1, 2, 2, 7, 7, 7, 1], dtype=np.float32)
def func(x):
x1_, _ = tf.unique(x)
y1 = tf.identity(x1_, name=_TFOUTPUT)
return y1
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "Unique")
def test_unique_indices_int64(self):
x_val = np.array([2, 3, 3, 6, 4, 1, 1], dtype=np.float32)
def func(x):
x1_, x2_ = tf.unique(x, out_idx=tf.int64)
y1 = tf.identity(x1_, name=_TFOUTPUT)
y2 = tf.identity(x2_, name=_TFOUTPUT1)
return y1, y2
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_opset_min_version(11, "Unique")
def test_unique_indices_int32(self):
x_val = np.array([2, 3, 3, 6, 4, 1, 1], dtype=np.float32)
def func(x):
x1_, x2_ = tf.unique(x, out_idx=tf.int32)
y1 = tf.identity(x1_, name=_TFOUTPUT)
y2 = tf.identity(x2_, name=_TFOUTPUT1)
return y1, y2
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val})
@check_opset_min_version(11, "Unique")
def test_bincount(self):
x_val = np.array([5, 2, 3, 1, 3, 2, 7, 5, 9, 10], dtype=np.int32)
def func(x):
x_ = tf.math.bincount(x)
y_ = tf.identity(x_, name=_TFOUTPUT)
return y_
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@skip_tflite("Bug in tflite output shapes")
@skip_tfjs("TFJS executes model incorrectly")
@check_opset_min_version(11, "Unique")
@check_tf_min_version("2.3", "needs tf.math.bincount with axis attr")
def test_dense_bincount(self):
x_val = np.array([[5, 2, 3, 1, 3], [2, 7, 5, 9, 10]], dtype=np.int32)
y_val = np.array([[2.0, 1.5, 3.5, 4.5, 5.5], [6.5, 7.5, 8.5, 9.5, 10.5]], dtype=np.float32)
for a in [0, -1]:
for b in [True, False]:
def func(x, y):
x_ = tf.math.bincount(x, axis=a, binary_output=b)
y_ = tf.identity(x_, name=_TFOUTPUT)
return y_
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11, "ScatterND")
def test_sparse_to_dense(self):
i_val = np.array([[0, 0, 0], [0, 0, 2], [0, 1, 3], [1, 2, 2], [1, 2, 3]], dtype=np.int64)
v_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.float32)
ds_val = np.array([2, 3, 4], dtype=np.int64)
d_val = np.array(2.5, dtype=np.float32)
def func(indices, values, dense_shape, default):
st = tf.SparseTensor(indices, values, dense_shape)
dense = tf.sparse.to_dense(st, default, validate_indices=True)
x_ = tf.identity(dense, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: i_val, _INPUT1: v_val, _INPUT2: ds_val, _INPUT3: d_val})
@check_opset_min_version(11, "Unique")
def test_sparse_fill_empty_rows(self):
i_val = np.array([[1, 0, 0], [1, 0, 2], [1, 1, 3], [3, 2, 2], [3, 2, 3]], dtype=np.int64)
v_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.float32)
ds_val = np.array([5, 3, 4], dtype=np.int64)
d_val = np.array(2.5, dtype=np.float32)
def func(indices, values, dense_shape, default):
st = tf.SparseTensor(indices, values, dense_shape)
st_, indicator = tf.sparse.fill_empty_rows(st, default)
dense = tf.sparse.to_dense(st_, 0, validate_indices=False)
dense_ = tf.identity(dense, name=_TFOUTPUT)
indicator_ = tf.identity(indicator, name=_TFOUTPUT1)
return dense_, indicator_
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: i_val, _INPUT1: v_val, _INPUT2: ds_val, _INPUT3: d_val})
@check_opset_min_version(11, "CumSum")
def test_sparse_reshape(self):
indices_val = np.array([[1, 0, 0], [1, 0, 2], [1, 1, 3], [3, 2, 2], [3, 2, 3]], dtype=np.int64)
values_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.int64)
dense_shape_val = np.array([5, 3, 4], dtype=np.int64)
new_shape_val = np.array([2, -1, 1, 3], dtype=np.int64)
def func(indices, values, dense_shape, new_shape):
st = tf.SparseTensor(indices, values, dense_shape)
st_ = tf.sparse.reshape(st, new_shape)
indices_ = st_.indices
dense_shape_ = st_.dense_shape
indices_ = tf.identity(indices_, name=_TFOUTPUT)
dense_shape_ = tf.identity(dense_shape_, name=_TFOUTPUT1)
return indices_, dense_shape_
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: indices_val, _INPUT1: values_val,
_INPUT2: dense_shape_val, _INPUT3: new_shape_val})
@check_opset_min_version(11, "CumSum")
def test_sparse_reshape_unknown_rank(self):
indices_val = np.array([[1, 0, 0], [1, 0, 2], [1, 1, 3], [3, 2, 2], [3, 2, 3]], dtype=np.int64)
values_val = np.array([1.5, 1.6, 1.7, 1.8, 1.9], dtype=np.int64)
dense_shape_val = np.array([5, 3, 4], dtype=np.int64)
new_shape_val = np.array([2, 10, 1, 3], dtype=np.int64)
shape_pad_val = np.zeros((1, 2), dtype=np.int64)
def func(indices, dense_shape, new_shape, shape_pad):
st = tf.SparseTensor(indices, values_val, dense_shape)
# Some hackery to make the rank unknown
new_shape_ = tf.pad(new_shape, shape_pad, constant_values=0)
st_ = tf.sparse.reshape(st, new_shape_)
indices_ = st_.indices
dense_shape_ = st_.dense_shape
indices_ = tf.identity(indices_, name=_TFOUTPUT)
dense_shape_ = tf.identity(dense_shape_, name=_TFOUTPUT1)
return indices_, dense_shape_
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: indices_val, _INPUT1: dense_shape_val,
_INPUT2: new_shape_val, _INPUT3: shape_pad_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
def test_ragged_tensor_to_sparse(self):
splits_val1 = np.array([0, 1, 1, 5], dtype=np.int32)
splits_val2 = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
def func(splits1, splits2, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits1, splits2], validate=True)
s = x.to_sparse()
indices, values, shape = s.indices, s.values, s.dense_shape
indices = tf.identity(indices, name=_TFOUTPUT)
values = tf.identity(values, name=_TFOUTPUT1)
shape = tf.identity(shape, name=_TFOUTPUT2)
return indices, values, shape
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2],
{_INPUT: splits_val1, _INPUT1: splits_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
def test_ragged_gather(self):
splits_val = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
indices_val = np.array([1, 3, 2, 0, 1, 1, 4, 3, 3], dtype=np.int32)
def func(splits, rt_dense_values, indices):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits], validate=True)
g = tf.gather(x, indices)
rt_nested_splits = tf.identity(g.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(g.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1],
{_INPUT: splits_val, _INPUT1: dense_vals_val, _INPUT2: indices_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
@skip_tflite("unknown rank")
def test_ragged_tensor_to_tensor(self):
splits_val1 = np.array([0, 1, 1, 5], dtype=np.int32)
splits_val2 = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
def func(splits1, splits2, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits1, splits2], validate=True)
y = x.to_tensor(default_value=7)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: splits_val1, _INPUT1: splits_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "CumSum")
@skip_tflite("unknown rank")
def test_ragged_tensor_to_tensor_row_ids(self):
ids_val1 = np.array([0, 0, 0, 2, 2], dtype=np.int32)
ids_val2 = np.array([0, 0, 2, 2, 2, 3, 3, 4], dtype=np.int32)
dense_vals_val = make_xval([8, 2, 3])
def func(ids1, ids2, rt_dense_values):
x = tf.RaggedTensor.from_nested_value_rowids(rt_dense_values, [ids1, ids2], [4, 5])
y = x.to_tensor(default_value=7)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: ids_val1, _INPUT1: ids_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("2.2", "ragged to_tensor with constrained shape")
@check_opset_min_version(11, "CumSum")
def test_ragged_tensor_to_tensor_constrain_shape(self):
splits_val1 = np.array([0, 1, 1, 5], dtype=np.int32)
splits_val2 = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.array([10, 11, 12, 13, 14, 15, 16, 17, 18, 19], dtype=np.float32)
dense_vals_val = make_xval([10, 2, 3])
def func(splits1, splits2, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits1, splits2], validate=True)
y = x.to_tensor(default_value=7, shape=[20, None, 2, None, 3])
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: splits_val1, _INPUT1: splits_val2, _INPUT2: dense_vals_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "Range")
def test_ragged_range_float(self):
starts_val = np.array([0, 0, 1, 10, 0.5, 0.5], dtype=np.float32)
limits_val = np.array([-5, -2, 7, 100, 1, 1], dtype=np.float32)
deltas_val = np.array([-1, 1, 2, 20, 1, 1.1], dtype=np.float32)
def func(starts, limits, deltas):
x = tf.ragged.range(starts, limits, deltas)
rt_nested_splits = tf.identity(x.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(x.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: starts_val, _INPUT1: limits_val,
_INPUT2: deltas_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "Range")
def test_ragged_range_int(self):
starts_val = np.array([0, 1, 3, 0, 0, 0, 0, 0, 0, 0, 0], dtype=np.int32)
limits_val = np.array([-6, -5, -4, -1, 0, 1, 4, 5, 6, 2, -2], dtype=np.int32)
deltas_val = np.array([-5, -5, -5, -5, 5, 5, 5, 5, 5, 1, -1], dtype=np.int32)
def func(starts, limits, deltas):
x = tf.ragged.range(starts, limits, deltas)
rt_nested_splits = tf.identity(x.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(x.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: starts_val, _INPUT1: limits_val,
_INPUT2: deltas_val})
@check_tf_min_version("1.14", "ragged needs tf 1.14")
@check_opset_min_version(11, "Range")
def test_ragged_range_scalar(self):
starts_val = np.array(0, dtype=np.int32)
limits_val = np.array([5, -1, -1, 2, 7, 100, 4, 5, 6], dtype=np.int32)
deltas_val = np.array(1, dtype=np.int32)
def func(starts, limits, deltas):
x = tf.ragged.range(starts, limits, deltas)
rt_nested_splits = tf.identity(x.row_splits, name=_TFOUTPUT)
rt_dense_values = tf.identity(x.flat_values, name=_TFOUTPUT1)
return rt_nested_splits, rt_dense_values
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: starts_val, _INPUT1: limits_val,
_INPUT2: deltas_val})
@check_tf_min_version("2.2", "ragged variant needs tf 2.2")
@check_opset_min_version(13, "Loop over tensor sequences")
def test_ragged_to_variant(self):
splits_val = np.array([0, 3, 3, 5, 9, 10], dtype=np.int32)
dense_vals_val = np.arange(10 * 3 * 2, dtype=np.float32).reshape([10, 3, 2])
def fn(elem):
res = elem + elem * elem
return res
def func(splits, rt_dense_values):
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits], validate=True)
y = tf.map_fn(fn, x)
return tf.identity(y.row_splits, name=_TFOUTPUT), tf.identity(y.flat_values, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: splits_val, _INPUT1: dense_vals_val})
@check_tf_min_version("2.2", "ragged variant needs tf 2.2")
@check_opset_min_version(13, "Loop over tensor sequences")
def test_ragged_to_variant_unknown_shape(self):
splits_val = np.array([0, 3, 3, 5, 9, 10], dtype=np.int64)
dense_vals_shape = np.array([10, 3, 2], dtype=np.int32)
splits_pads_val = np.array([[0, 0]], dtype=np.int32)
def fn(elem):
res = elem + elem * elem
return res
def func(splits, rt_dense_values_shape, splits_pads):
rt_dense_values = tf.ones(rt_dense_values_shape, dtype=tf.int32)
splits = tf.pad(splits, splits_pads)
x = tf.RaggedTensor.from_nested_row_splits(rt_dense_values, [splits], validate=True)
y = tf.map_fn(fn, x)
return tf.identity(y.row_splits, name=_TFOUTPUT), tf.identity(y.flat_values, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1],
{_INPUT: splits_val, _INPUT1: dense_vals_shape, _INPUT2: splits_pads_val})
@check_opset_min_version(9, "Compress")
def test_dynamic_partition_both_vector(self):
data_val = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float32)
part_val = np.array([0, 0, 1, 1, 0, 2, 1, 0], dtype=np.int32)
def func(data, partitions):
p1, p2, p3 = tf.dynamic_partition(data, partitions, num_partitions=3)
p1_ = tf.identity(p1, name=_TFOUTPUT)
p2_ = tf.identity(p2, name=_TFOUTPUT1)
p3_ = tf.identity(p3, name=_TFOUTPUT2)
return p1_, p2_, p3_
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: data_val, _INPUT1: part_val})
@check_opset_min_version(9, "Compress")
def test_dynamic_partition_data_tensor(self):
data_val = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], dtype=np.float32)
part_val = np.array([0, 2, 1, 0, 1], dtype=np.int32)
def func(data, partitions):
p1, p2, p3 = tf.dynamic_partition(data, partitions, num_partitions=3)
p1_ = tf.identity(p1, name=_TFOUTPUT)
p2_ = tf.identity(p2, name=_TFOUTPUT1)
p3_ = tf.identity(p3, name=_TFOUTPUT2)
return p1_, p2_, p3_
self._run_test_case(func, [_OUTPUT, _OUTPUT1, _OUTPUT2], {_INPUT: data_val, _INPUT1: part_val})
@check_opset_min_version(11, "ScatterElements")
@unittest.skip("this test is failing for some opsets, disabled until fixed")
def test_dynamic_stitch_both_vector(self):
data_val = np.array([[5, 1, 3], [7, 2, 4]], dtype=np.float32)
indices_val = np.array([[0, 1, 4], [2, 3, 5]], dtype=np.int32)
def func(indices, data):
x = tf.dynamic_stitch(tf.unstack(indices), tf.unstack(data))
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: indices_val, _INPUT1: data_val})
@check_opset_min_version(11, "ScatterElements")
def test_dynamic_stitch_data_tensor(self):
data_val = np.arange(2 * 3 * 2 * 4, dtype=np.float32).reshape((2, 3, 2, 4))
indices_val = np.array([[0, 1, 4], [2, 3, 5]], dtype=np.int32)
def func(indices, data):
x = tf.dynamic_stitch(tf.unstack(indices), tf.unstack(data))
x_ = tf.identity(x, name=_TFOUTPUT)
return x_
self._run_test_case(func, [_OUTPUT], {_INPUT: indices_val, _INPUT1: data_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const(self):
input_sizes_val_ = np.array([1, 10, 10, 3], dtype=np.int32)
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 1, 1, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const_strided(self):
input_sizes_val_ = np.array([1, 10, 10, 3], dtype=np.int32)
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 2, 2, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})
@check_tf_min_version("1.15", "tf.repeat needs tf 1.15")
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_shape_implied(self):
batch_dim_val = np.array(1, dtype=np.int32)
def func(filter_val, out_backprop_val, batch_dim):
out_backprop_val = tf.repeat(out_backprop_val, batch_dim, axis=0)
s = tf.shape(out_backprop_val)
t1 = tf.constant([0], dtype=tf.int32)
t2 = tf.constant([1], dtype=tf.int32)
batch_dim = tf.strided_slice(s, t1, t2, shrink_axis_mask=1)
# Sometimes the size given is a stack of constants with unknown batch dim
input_sizes_val = tf.stack([batch_dim, 10, 10, 3])
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 2, 2, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
def graph_validator(g):
for n in g.get_nodes():
if n.type == 'ConvTranspose':
return "pads" in n.attr or "output_shape" in n.attr
return False
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val, _INPUT2: batch_dim_val},
graph_validator=graph_validator)
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_const_valid(self):
input_sizes_val_ = np.array([1, 12, 12, 3], dtype=np.int32)
def func(filter_val, out_backprop_val):
input_sizes_val = tf.constant(input_sizes_val_, dtype=tf.int32)
return conv2d_backprop_input(input_sizes=input_sizes_val, filter=filter_val,
out_backprop=out_backprop_val, strides=[1, 1, 1, 1],
padding='VALID', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: filters_val, _INPUT1: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 1, 1, 1],
padding='SAME', name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
input_sizes_val = np.array([1, 10, 10, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_strided(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 2, 2, 1], padding='SAME',
name=_TFOUTPUT)
input_sizes_val = np.array([1, 10, 10, 3], dtype=np.int32)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(10, "Conv2DBackpropInput")
def test_Conv2DBackpropInput_valid(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 1, 1, 1],
padding='VALID', name=_TFOUTPUT)
input_sizes_val = np.array([1, 12, 12, 3], dtype=np.int32)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(12, "Conv2DBackpropInput with strided workaround")
def test_Conv2DBackpropInput_strided_same(self):
def func(input_sizes, filters, out_backprop):
return conv2d_backprop_input(input_sizes, filters, out_backprop, strides=[1, 5, 10, 1], padding='SAME',
name=_TFOUTPUT)
input_sizes_val = np.array([1, 10, 10, 3], dtype=np.int32)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 5]).astype(np.float32)
out_backprop_val = np.random.randint(low=0, high=256, size=[1, 2, 1, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_sizes_val, _INPUT1: filters_val, _INPUT2: out_backprop_val})
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const(self):
output_shape_val_ = np.array([1, 10, 10, 10, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 1, 1, 1, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const_strided(self):
output_shape_val_ = np.array([1, 10, 10, 10, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 2, 2, 2, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_const_valid(self):
output_shape_val_ = np.array([1, 12, 12, 12, 3], dtype=np.int32)
def func(value, filters):
output_shape_val = tf.constant(output_shape_val_, dtype=tf.int32)
return conv3d_transpose(value, filters, output_shape_val, strides=[1, 1, 1, 1, 1],
padding='VALID', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val}, rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[2, 3, 4, 4, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[2, 7, 8, 9, 5]).astype(np.float32)
output_shape_val = np.array([2, 7, 8, 9, 4], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_strided(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 2, 2, 2, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 5, 5, 5, 5]).astype(np.float32)
output_shape_val = np.array([1, 10, 10, 10, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(10, "Conv3DBackpropInputV2")
def test_Conv3DBackpropInputV2_valid(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 1, 1, 1, 1],
padding='VALID', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=0, high=256, size=[3, 3, 3, 3, 5]).astype(np.float32)
value_val = np.random.randint(low=0, high=256, size=[1, 10, 10, 10, 5]).astype(np.float32)
output_shape_val = np.array([1, 12, 12, 12, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(12, "Conv3DBackpropInputV2 with strided workaround")
def test_Conv3DBackpropInputV2_strided_same(self):
def func(value, filters, output_shape):
return conv3d_transpose(value, filters, output_shape, strides=[1, 10, 4, 3, 1],
padding='SAME', data_format="NDHWC", name=_TFOUTPUT)
filters_val = np.random.randint(low=1, high=256, size=[1, 1, 1, 1, 1]).astype(np.float32)
value_val = np.random.randint(low=1, high=256, size=[1, 3, 2, 5, 1]).astype(np.float32)
output_shape_val = np.array([1, 30, 8, 15, 1], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: value_val, _INPUT1: filters_val, _INPUT2: output_shape_val},
rtol=1e-6)
@check_opset_min_version(8, "CategoryMapper")
@skip_tfjs("TFJS does not initialize table")
@skip_onnx_checker("ONNX can't do type inference on CategoryMapper")
def test_hashtable_lookup(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry", "grape"]
query = np.array(['cherry'], dtype=np.object)
with open(filnm, "w") as f:
for word in words:
f.write(word + "\n")
def func(query_holder):
hash_table = lookup_ops.index_table_from_file(filnm)
lookup_results = hash_table.lookup(query_holder)
ret = tf.add(lookup_results, 0, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {_INPUT: query}, as_session=True)
os.remove(filnm)
@check_opset_min_version(8, "CategoryMapper")
@skip_tfjs("TFJS does not initialize table")
def test_hashtable_lookup_const(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry ♥", "grape"]
query_val = np.array(['cherry ♥', 'banana'], dtype=np.object).reshape((1, 2, 1))
with open(filnm, "w", encoding='UTF-8') as f:
for word in words:
f.write(word + "\n")
def func():
hash_table = lookup_ops.index_table_from_file(filnm)
query = tf.constant(query_val)
lookup_results = hash_table.lookup(query)
ret = tf.add(lookup_results, 0, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {}, as_session=True)
os.remove(filnm)
@skip_tfjs("TFJS does not initialize table")
def test_hashtable_size(self):
filnm = "vocab.tmp"
words = ["apple", "pear", "banana", "cherry", "grape"]
query = np.array(['cherry'], dtype=np.object)
with open(filnm, "w") as f:
for word in words:
f.write(word + "\n")
def func(query_holder):
hash_table = lookup_ops.index_table_from_file(filnm)
lookup_size = hash_table.size()
ret = tf.add(lookup_size, 0, name=_TFOUTPUT)
return ret
self._run_test_case(func, [_OUTPUT], {_INPUT: query}, as_session=True)
os.remove(filnm)
@check_opset_min_version(11)
@skip_onnx_checker("Fails. Fix later.")
def test_matrix_diag_part(self):
input_vals = [
np.array([[[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15], [16, 17, 18, 19, 20]]], dtype=np.int64),
np.array([[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]], dtype=np.int64),
np.array([[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]],
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]]], dtype=np.int64)]
def func(input_holder):
return matrix_diag_part(input_holder, name=_TFOUTPUT)
for input_val in input_vals:
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val})
@check_opset_min_version(8)
def test_broadcast(self):
input_tensor_val = np.random.randint(low=0, high=256, size=[2, 3]).astype(np.float32)
new_shape_val = np.array([3, 2, 3]).astype(np.int64)
def func(input_tensor, new_shape):
return tf.broadcast_to(input_tensor, new_shape, _TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_tensor_val, _INPUT1: new_shape_val})
def test_bfloat(self):
x_val = np.array([0, 1, 2], dtype=np.float32)
y_val = np.array([3, 4, 5], dtype=np.float32)
def func(x, y):
x_ = tf.cast(x, tf.bfloat16)
y_ = tf.cast(y, tf.bfloat16)
s_ = tf.add(x_, y_)
return tf.cast(s_, tf.float32, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(11)
@check_tf_min_version("2.2")
@skip_onnx_checker("Fails. Fix later.")
def test_matrix_diag_part_v3(self):
def func(X, K):
v2 = tf.raw_ops.MatrixDiagPartV2(input=X, k=K, padding_value=0.123, name=_TFOUTPUT)
v3 = tf.raw_ops.MatrixDiagPartV3(input=X, k=K, padding_value=0.123, align='LEFT_RIGHT', name=_TFOUTPUT1)
return v2, v3
for x_shape in ([4, 5], [2, 3, 4, 5], [5, 4], [7, 5]):
x_val = np.random.random(x_shape).astype(np.float32)
for raw_k in ([0], [1], [3], [-1], [-3], [1, 2], [-2, -1], [-1, 1]):
k_val = np.array(raw_k).astype(np.int32)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val, _INPUT1: k_val})
@test_ms_domain()
def test_inverse(self, extra_opset):
# this depends on onnx Inverse which was removed from opset-12 but does exists in the ms-domain
x_val = np.random.random([5, 5]).astype(np.float32)
def func(x):
return tf.linalg.inv(x, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, process_args={"extra_opset": [extra_opset]})
@check_opset_min_version(12)
def test_squared_distance(self):
x_val = np.random.random([4, 5]).astype(np.float32)
y_val = np.random.random([4, 5]).astype(np.float32)
def func(x, y):
return tf.math.squared_difference(x, y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(12)
@check_tf_min_version("2.1")
def test_einsum(self):
x_val = np.random.random([10]).astype(np.float32)
y_val = np.random.random([10]).astype(np.float32)
def func(x, y):
ret = tf.einsum("i,j->ij", x, y)
return tf.identity(ret, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(12)
@check_tf_min_version("2.1")
def test_einsum_to_matmul(self):
x_val = np.random.random([4, 10, 20]).astype(np.float32)
y_val = np.random.random([20, 30]).astype(np.float32)
def func(x, y):
ret = tf.einsum("bik,kj->bij", x, y)
return tf.identity(ret, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(12)
@check_tf_min_version("2.1")
def test_einsum_to_matmul_transpose(self):
x_val = np.random.random([4, 10, 20]).astype(np.float32)
y_val = np.random.random([30, 20]).astype(np.float32)
def func(x, y):
ret = tf.einsum("bik,jk->bij", x, y)
return tf.identity(ret, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT1: y_val})
@check_opset_min_version(7)
def test_compare(self):
x_val = np.random.random([10, 20]).astype(np.float32)
y_val = np.random.random([10, 20]).astype(np.float32)
def func(x, y):
return tf.math.less_equal(x, y, name=_TFOUTPUT), \
tf.math.greater_equal(x, y, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: x_val, _INPUT1: y_val})
@check_tf_min_version("1.14", "required for tf.math.is_finite")
@check_opset_min_version(10)
def test_is_finite(self):
x_val = np.array([5.0, 4.8, 6.8, np.inf, np.nan], dtype=np.float32)
def func(x):
y = tf.math.is_finite(x)
return tf.identity(y, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_multi_dim(self):
raw_diag = [[[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0],
[7.0, 8.0, 9.0]],
[[10.0, 11.0, 12.0],
[13.0, 14.0, 15.0],
[16.0, 17.0, 18.0]]]
diag_val = np.array(raw_diag).astype(np.float32)
k_val = np.array([-1, 1]).astype(np.int32)
row_val = np.array(-1).astype(np.int32)
col_val = np.array(-1).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.123, align='RIGHT_RIGHT', name=_TFOUTPUT), \
tf.raw_ops.MatrixDiagV2(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.123, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_multi_dim_min_row(self):
raw_diag = [[[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]],
[[7.0, 8.0, 9.0],
[10.0, 11.0, 12.0]]]
diag_val = np.array(raw_diag).astype(np.float32)
k_val = np.array([2, 3]).astype(np.int32)
row_val = np.array(-1).astype(np.int32)
col_val = np.array(6).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.456, align='LEFT_LEFT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_single_dim_min_col(self):
raw_diag = [1.0, 2.0, 3.0]
diag_val = np.array(raw_diag).astype(np.float32)
k_val = np.array(-1).astype(np.int32)
row_val = np.array(5).astype(np.int32)
col_val = np.array(-1).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=0.789, align='LEFT_RIGHT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_2single_dim_row_col(self):
raw_diag = [[1, 2, 3], [4, 5, 6]]
diag_val = np.array(raw_diag).astype(np.int64)
k_val = np.array(0).astype(np.int32)
row_val = np.array(3).astype(np.int32)
col_val = np.array(4).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=7, align='LEFT_RIGHT', name=_TFOUTPUT), \
tf.raw_ops.MatrixDiag(diagonal=diag, name=_TFOUTPUT1)
self._run_test_case(func, [_OUTPUT, _OUTPUT1],
{_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
def test_matrix_diag_v3_1single_dim_row_col(self):
raw_diag = [1, 2, 3, 4, 5]
diag_val = np.array(raw_diag).astype(np.int64)
k_val = np.array(0).astype(np.int32)
row_val = np.array(5).astype(np.int32)
col_val = np.array(10).astype(np.int32)
def func(diag, k, row, col):
return tf.raw_ops.MatrixDiagV3(diagonal=diag, k=k, num_rows=row, num_cols=col,
padding_value=7, align='LEFT_RIGHT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: diag_val, _INPUT1: k_val,
_INPUT2: row_val, _INPUT3: col_val})
@check_opset_min_version(12)
@check_tf_min_version("2.2")
@skip_onnx_checker("Checker fails. Fix later.")
def test_matrix_set_diag_v3(self):
input_val = np.array([[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]],
[[7, 7, 7, 7],
[7, 7, 7, 7],
[7, 7, 7, 7]]]).astype(np.int64)
diag_val = np.array([[1, 2, 3],
[4, 5, 6]]).astype(np.int64)
k_val = np.array([0]).astype(np.int32)
def func(base_matrix, diag, k):
return tf.raw_ops.MatrixSetDiagV3(input=base_matrix, diagonal=diag, k=k, align='RIGHT_LEFT', name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: input_val, _INPUT1: diag_val, _INPUT2: k_val})
@check_opset_min_version(10)
@check_tf_min_version("1.14")
@skip_tfjs("TFJS executes model incorrectly")
def test_fakequant_with_min_max(self):
def func(x):
ret = fake_quant_with_min_max_args(
x, min=-1024, max=1023, num_bits=8, narrow_range=False, name=None)
return tf.identity(ret, name=_TFOUTPUT)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024.
x_val0 = np.abs(x_val)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val0}, rtol=1e-6, atol=1e-4)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024
x_val[0, 0] = -1024
x_val[0, 1] = -1023
x_val[0, 2] = 1024
x_val[1, 0] = 1023
x_val[1, 1] = 1025
x_val[1, 2] = -1025
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
@check_opset_min_version(10)
@check_tf_min_version("1.14")
def test_fakequant_with_min_max_same_sign(self):
def func_neg(x):
ret = fake_quant_with_min_max_args(
x, min=-1024*3, max=-1024, num_bits=8, narrow_range=False, name=None)
return tf.identity(ret, name=_TFOUTPUT)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024 * 3.
try:
self._run_test_case(func_neg, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
except ValueError:
pass
@check_opset_min_version(10)
@check_tf_min_version("1.14")
@skip_tfjs("Results differ slightly in TFJS")
def test_fakequant_with_min_max_vars(self):
def func(x):
ret = fake_quant_with_min_max_vars(
x, min=-1024, max=1023, num_bits=8, narrow_range=False, name=None)
return tf.identity(ret, name=_TFOUTPUT)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024.
x_val0 = np.abs(x_val)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val0}, rtol=1e-6, atol=1e-4)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
x_val = np.random.random(size=[4, 3]).astype(np.float32) * 2048. - 1024
x_val[0, 0] = -1024
x_val[0, 1] = -1023
x_val[0, 2] = 1024
x_val[1, 0] = 1023
x_val[1, 1] = 1025
x_val[1, 2] = -1025
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, rtol=1e-6, atol=1e-4)
@check_opset_min_version(9, "atan2")
def test_atan2(self):
# Test all possible pairs of pos, neg, zero for x and y.
def atan2(y, x):
sx = np.sign(x)
sy = np.sign(y)
pi_part = (sy + sx * (sy ** 2 - 1)) * (sx - 1) * (-np.pi/2)
atan_part = np.arctan(y / (x + (1 - sx ** 2))) * sx ** 2
return atan_part + pi_part
test_pairs = [[y, x] for x in [3., -4., 0.] for y in [5., -6., 0.]]
y_val = np.array([y for y, x in test_pairs], dtype=np.float32)
x_val = np.array([x for y, x in test_pairs], dtype=np.float32)
assert_almost_equal(np.arctan2(y_val, x_val), atan2(y_val, x_val))
def func(y, x):
atan2_ = tf.math.atan2(y, x)
return tf.identity(atan2_, name=_TFOUTPUT)
self._run_test_case(
func, [_OUTPUT], {_INPUT: y_val, _INPUT2: x_val}, rtol=1e-06)
def _conv_kernel_as_input_test(self, x_val, w_val, strides=None,
padding="VALID", dilations=None, rtol=1e-07):
if strides is None:
strides = _STRIDE1x1
if dilations is None:
dilations = _DILATIONS1x1
def func(x, kernel):
conv = tf.nn.conv2d(x, kernel, strides=strides, padding=padding,
dilations=dilations)
return tf.identity(conv, name=_TFOUTPUT)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val, _INPUT2: w_val}, rtol=rtol)
def test_conv2d_1_kernel_as_input(self):
x_val = make_xval((1, 1, 5, 5)).transpose(NCHW_TO_NHWC)
w_val = np.array([[2., 1., 1.],
[1., 3., 1.],
[1., 1., 4.]], dtype=np.float32).reshape(_KERNEL3x3)
self._conv_kernel_as_input_test(x_val, w_val)
def test_equal_with_different_parameters(self):
input_val = np.array([5], dtype=np.int32)
def func(input_val):
tensor = tf.zeros(input_val)
input_size = tf.size(tensor)
constant = tf.constant(3, dtype=tf.int32)
return tf.math.equal(input_size, constant, name="output")
feed_dict = {"input:0": input_val}
input_names_with_port = ["input:0"]
output_names_with_port = ["output:0"]
current_opset = self.config.opset
self.config.opset = 12
try:
self.run_test_case(func, feed_dict, input_names_with_port, output_names_with_port)
finally:
self.config.opset = current_opset
@check_tf_min_version("1.14")
@skip_tfjs("Fails to run tfjs model")
def test_rfft_ops(self):
def dft_slow(x, M, fft_length):
xt = x[:, :fft_length].T
size = fft_length // 2 + 1
res = np.dot(M[:, :, :fft_length], xt)[:, :size, :]
return np.transpose(res, (0, 2, 1))
x_val = make_xval([2, 4]).astype(np.float32)
M_both = make_dft_constant(x_val.shape[1], x_val.dtype, x_val.shape[1])
fft = dft_slow(x_val, M_both, x_val.shape[1])
fft_npy = np.fft.rfft(x_val)
assert_almost_equal(fft[0, :, :], np.real(fft_npy))
assert_almost_equal(fft[1, :, :], np.imag(fft_npy))
x_val = make_xval([2, 4]).astype(np.float32)
M_both = make_dft_constant(x_val.shape[1], x_val.dtype, x_val.shape[1]-1)
fft = dft_slow(x_val, M_both, x_val.shape[1]-1)
fft_npy = np.fft.rfft(x_val, x_val.shape[1]-1)
assert_almost_equal(fft[0, :, :], np.real(fft_npy))
assert_almost_equal(fft[1, :, :], np.imag(fft_npy))
x_val = make_xval([3, 4]).astype(np.float32)
def func1(x):
op_ = tf.signal.rfft(x)
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
def func2(x):
op_ = tf.signal.rfft(x)
return tf.cos(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func2, [_OUTPUT], {_INPUT: x_val})
def func3(x):
op_ = tf.signal.rfft(x)
return tf.identity(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func3, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tfjs("TFJS executes rfft with poor accuracy")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length(self):
x_val = make_xval([3, 9]).astype(np.float32)
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([8], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tfjs("TFJS executes rfft with poor accuracy")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length_many(self):
for i in range(4, 7):
for j in range(4, 7):
for m in range(0, 3):
with self.subTest(shape=(i, j), fft_length=j-m):
x_val = make_xval([i, j]).astype(np.float32)
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([j-m], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length_many_bigger(self):
for i in range(4, 7):
for j in range(4, 7):
for m in range(0, 3):
with self.subTest(shape=(i, j), fft_length=j+m):
x_val = make_xval([i, j]).astype(np.float32) / 10
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([j+m], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@skip_tflite("Slight accuracy issues with some shapes")
@skip_tfjs("TFJS executes rfft with poor accuracy")
@check_opset_min_version(10, "Slice")
def test_rfft_ops_fft_length_many_larger(self):
for i in range(4, 7):
for j in range(4, 7):
for m in range(-3, 3):
with self.subTest(shape=(3, i, j), fft_length=j+m):
x_val = make_xval([3, i, j]).astype(np.float32) / 10
def func1_length(x):
op_ = tf.signal.rfft(x, np.array([j+m], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val}, optimize=False)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops(self):
x_val = make_xval([3, 4]).astype(np.float32)
def func1(x):
op_ = tf.signal.rfft2d(x)
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val}, optimize=False)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
def func2(x):
op_ = tf.signal.rfft2d(x)
return tf.cos(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func2, [_OUTPUT], {_INPUT: x_val})
def func3(x):
op_ = tf.signal.rfft2d(x)
return tf.identity(op_, name=_TFOUTPUT)
with self.assertRaises(ValueError):
self._run_test_case(func3, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops_fft_length(self):
x_val = make_xval([3, 4]).astype(np.float32)
def func1_length(x):
op_ = tf.signal.rfft2d(x, np.array([3, 3], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
with self.subTest(optimize=False):
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val}, optimize=False)
with self.subTest(optimize=True):
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops_fft_length_many(self):
for i in range(7, 4, -1):
for j in range(7, 4, -1):
for m in range(0, 3):
for n in range(0, 3):
for opt in [False, True]:
with self.subTest(shape=(i, j), fft_length=(m, n), optimize=opt):
x_val = make_xval([i, j]).astype(np.float32) / 100
def func1_length(x):
op_ = tf.signal.rfft2d(x, np.array([i-m, j-n], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1_length, [_OUTPUT], {_INPUT: x_val}, optimize=opt)
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
@unittest.skipIf(True, reason="Not fully implemented for dynamic shape.")
def test_fft_ops(self):
x_val = make_xval([3, 4]).astype(np.float32)
def func1(x):
xc = tf.cast(x, tf.complex64)
op_ = tf.signal.fft(xc)
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
@check_opset_min_version(11, "topk")
def test_invert_permutation(self):
def func(x):
op_ = tf.math.invert_permutation(x)
return tf.identity(op_, name=_TFOUTPUT)
x_val = np.array([0, 1, 2, 3], dtype=np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([1, 5, 2, 0, 3, 4], dtype=np.int64)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
@check_tf_min_version("1.14")
@check_opset_min_version(11, "CumSum")
def test_rfft2d_ops_specific_dimension(self):
x_val = make_xval([3, 1, 4]).astype(np.float32)
def func1(x):
op_ = tf.signal.rfft2d(x, np.array([1, 4], dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
with self.subTest(shape=(3, 1, 4), fft_length=(1, 4), optimize=False):
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val}, optimize=False)
with self.subTest(shape=(3, 1, 4), fft_length=(1, 4), optimize=True):
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val})
for shape in [(3, 1, 4), (5, 7), (3, 5, 7), (7, 5)]:
for fft_length in [shape[-2:], (1, shape[-1]),
(min(2, shape[-2]), shape[-1]),
(shape[-2], 2),
(min(3, shape[-2]), min(4, shape[-2]))]:
if fft_length == (1, 1):
# The code fails in this case but that's unlikely to happen.
continue
for optimize in [False, True]:
with self.subTest(shape=shape, fft_length=fft_length, optimize=optimize):
x_val = make_xval(list(shape)).astype(np.float32)
x_val /= x_val.size
def func1(x):
op_ = tf.signal.rfft2d(x, np.array(fft_length, dtype=np.int32))
return tf.abs(op_, name=_TFOUTPUT)
self._run_test_case(func1, [_OUTPUT], {_INPUT: x_val}, optimize=optimize)
@check_tf_min_version("2.1")
@skip_tflite("TFlite errors on some attributes")
@check_opset_min_version(9, "string")
def test_asstring(self):
def func(x):
op_ = tf.strings.as_string(x)
return tf.identity(op_, name=_TFOUTPUT)
x_val = np.array([0, 1, 2, 3], dtype=np.int32)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array([0, 1, 2, 3], dtype=np.float32)
# can't check the values because in onnx they are padded with 0, in tf they are not
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False)
@check_tf_min_version("2.1")
@skip_tflite("TFlite errors on some attributes")
@check_opset_min_version(9, "string")
def test_string_to_number(self):
def func(x):
op_ = tf.strings.to_number(x)
return tf.identity(op_, name=_TFOUTPUT)
# tf gets this wrong and returns fp32 instead of int
x_val = np.array("123", dtype=np.object)
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
x_val = np.array("123.1", dtype=np.object)
# can't check the values because in onnx they are padded with 0, in tf they are not
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val}, check_value=False)
@check_tf_min_version("2.5")
@check_opset_min_version(14, "hardswish")
@skip_tfjs("not supported in tfjs")
def test_hardswish(self):
def func(x):
# there is no hardswich in tf but toco will optimize to it
op_ = x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.)
return tf.identity(op_, name=_TFOUTPUT)
# tf gets this wrong and returns fp32 instead of int
x_val = np.array([0.5, 1.0, -0.5, -1.0], dtype=np.float32).reshape((2, 2))
self._run_test_case(func, [_OUTPUT], {_INPUT: x_val})
if __name__ == '__main__':
unittest_main()
| 48.681408
| 120
| 0.592901
|
5d8cf7ba32a5f93cedcdf5c4c3776a9561af2aa7
| 893
|
py
|
Python
|
tests/ganache.py
|
SOFIE-project/Identity-Authentication-Authorization
|
4651fdce9249511dda4ac3591235d0670deafc9b
|
[
"Apache-1.1"
] | 2
|
2020-03-12T14:55:42.000Z
|
2020-10-16T02:54:41.000Z
|
tests/ganache.py
|
SOFIE-project/Identity-Authentication-Authorization
|
4651fdce9249511dda4ac3591235d0670deafc9b
|
[
"Apache-1.1"
] | 1
|
2020-08-12T19:31:33.000Z
|
2020-08-12T19:31:33.000Z
|
tests/ganache.py
|
SOFIE-project/Identity-Authentication-Authorization
|
4651fdce9249511dda4ac3591235d0670deafc9b
|
[
"Apache-1.1"
] | 3
|
2020-03-16T15:26:31.000Z
|
2020-05-20T20:19:49.000Z
|
from web3 import Web3
import json
w3 = Web3(Web3.HTTPProvider("HTTP://127.0.0.1:8545"))
with open('../conf/contract/build/ERC721Metadata.abi', 'r') as myfile:
abi = myfile.read()
with open('../conf/contract/build/ERC721Metadata.bin', 'r') as myfile:
binfile = myfile.read()
account = w3.eth.accounts[0]
PDSContract = w3.eth.contract(abi=abi, bytecode=binfile)
tx_hash = PDSContract.constructor("Sofie Access Token", "SAT").transact({'from': account})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
address = tx_receipt.contractAddress
print(address)
PDSContract_instance = w3.eth.contract(abi=abi, address=address)
tx_hash = PDSContract_instance.functions.mint('0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1', 1234, "metadata").transact({'from': account})
w3.eth.waitForTransactionReceipt(tx_hash)
metadata = PDSContract_instance.functions.getTokenURI(1234).call()
print(metadata)
| 40.590909
| 137
| 0.774916
|
93cc160faf4eb8cd902ff55c9df99d08f9fc3d2f
| 165
|
py
|
Python
|
app/settings/product.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
app/settings/product.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
app/settings/product.py
|
ihor-nahuliak/task-23-jul-2019
|
f32d3ef1df985f77998b5d296b524af99f82c3ef
|
[
"MIT"
] | null | null | null |
from .default import DefaultConfig
__all__ = ['ProductConfig']
class ProductConfig(DefaultConfig):
DEBUG = False
POSTGRESQL_DB = 'task23jul2019_product'
| 16.5
| 43
| 0.757576
|
4853f1576957752d5e1fd0c31ebae671a44e6615
| 337
|
py
|
Python
|
lead_management/config/docs.py
|
biswa4u85/lead_management
|
9e490a4f7fb76a79190a004eea9e376109311517
|
[
"MIT"
] | null | null | null |
lead_management/config/docs.py
|
biswa4u85/lead_management
|
9e490a4f7fb76a79190a004eea9e376109311517
|
[
"MIT"
] | null | null | null |
lead_management/config/docs.py
|
biswa4u85/lead_management
|
9e490a4f7fb76a79190a004eea9e376109311517
|
[
"MIT"
] | null | null | null |
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/lead_management"
# docs_base_url = "https://[org_name].github.io/lead_management"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Lead Management"
| 28.083333
| 68
| 0.738872
|
801f8c5c30a421002034b6de0796a5fb15a05a61
| 2,479
|
py
|
Python
|
chargebeecli/processors/customer/customer.py
|
bhaskernitt/chargebee-cli
|
7025239f52deb010139c5b721be57a80da63f09e
|
[
"MIT"
] | null | null | null |
chargebeecli/processors/customer/customer.py
|
bhaskernitt/chargebee-cli
|
7025239f52deb010139c5b721be57a80da63f09e
|
[
"MIT"
] | null | null | null |
chargebeecli/processors/customer/customer.py
|
bhaskernitt/chargebee-cli
|
7025239f52deb010139c5b721be57a80da63f09e
|
[
"MIT"
] | null | null | null |
from chargebeecli.client.actionsImpl import ActionsImpl
from chargebeecli.constants.constants import Formats
from chargebeecli.export.Exporter import Exporter
from chargebeecli.formater.response_formatter import ResponseFormatter
from chargebeecli.printer.printer import Printer
from chargebeecli.processors.processor import Processor
from chargebeecli.validator.validator import Validator
API_URI = '/api/v2/customers'
class Customer(Processor, Validator, ResponseFormatter, Exporter, Printer):
__action_processor = ActionsImpl()
def __init__(self, export_format, export_path, file_name, response_format, _operation, _input_columns):
self.headers = self.get_api_header()
self.export_format = export_format
self.export_path = export_path
self.file_name = file_name
self.tables = None
self.response_format = response_format
self.operation = _operation
self.input_columns = _input_columns
def validate_param(self):
self.headers = super().validate_param(self.input_columns, self.headers)
return self
def get_api_header(self):
return ["id", "first_name", "email", "auto_collection", "net_term_days", "allow_direct_debit", "created_at",
"taxability", "updated_at", "pii_cleared", "resource_version", "deleted", "object", "card_status",
"promotional_credits", "refundable_credits", "excess_payments", "unbilled_charges",
"preferred_currency_code", "primary_payment_source_id", "payment_method"]
def process(self, ctx, operation, payload, resource_id):
return super(Customer, self).process(ctx, operation, payload, resource_id)
def to_be_formatted(self):
return self.response_format.lower() == Formats.TABLE.value
def format(self):
if self.to_be_formatted():
self.tables = super(Customer, self).format(self.response, self.response_format, self.operation,
self.headers, 'customer', 'list')
return self
def get(self, ctx, payload, resource_id):
return self.__action_processor.get(API_URI + '/' + resource_id)
def list(self, ctx):
return self.__action_processor.get(API_URI)
def delete(self, ctx, payload, resource_id):
return self.__action_processor.delete(API_URI + '/' + resource_id + '/' + 'delete')
def table_to_be_printed(self):
return self.to_be_formatted()
| 42.741379
| 116
| 0.701493
|
e13e4078c75b78d85b4d7ec3d79a4ee5105fee56
| 5,724
|
py
|
Python
|
rest_framework/tests/test_validation.py
|
Hipo/django-rest-framework
|
ffdf2ffb19fb75013e03fae0c26a7aa9d2b7cdd5
|
[
"Unlicense"
] | 87
|
2016-01-24T16:41:02.000Z
|
2021-12-20T21:13:24.000Z
|
rest_framework/tests/test_validation.py
|
laborautonomo/django-rest-framework
|
299a8347e8ef448eefc611eebfe80d7e142ceaa1
|
[
"Unlicense"
] | 16
|
2020-02-11T23:19:19.000Z
|
2022-03-11T23:33:40.000Z
|
rest_framework/tests/test_validation.py
|
laborautonomo/django-rest-framework
|
299a8347e8ef448eefc611eebfe80d7e142ceaa1
|
[
"Unlicense"
] | 57
|
2016-02-02T05:46:14.000Z
|
2021-03-21T15:46:06.000Z
|
from __future__ import unicode_literals
from django.core.validators import MaxValueValidator
from django.db import models
from django.test import TestCase
from rest_framework import generics, serializers, status
from rest_framework.test import APIRequestFactory
factory = APIRequestFactory()
# Regression for #666
class ValidationModel(models.Model):
blank_validated_field = models.CharField(max_length=255)
class ValidationModelSerializer(serializers.ModelSerializer):
class Meta:
model = ValidationModel
fields = ('blank_validated_field',)
read_only_fields = ('blank_validated_field',)
class UpdateValidationModel(generics.RetrieveUpdateDestroyAPIView):
model = ValidationModel
serializer_class = ValidationModelSerializer
class TestPreSaveValidationExclusions(TestCase):
def test_pre_save_validation_exclusions(self):
"""
Somewhat weird test case to ensure that we don't perform model
validation on read only fields.
"""
obj = ValidationModel.objects.create(blank_validated_field='')
request = factory.put('/', {}, format='json')
view = UpdateValidationModel().as_view()
response = view(request, pk=obj.pk).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Regression for #653
class ShouldValidateModel(models.Model):
should_validate_field = models.CharField(max_length=255)
class ShouldValidateModelSerializer(serializers.ModelSerializer):
renamed = serializers.CharField(source='should_validate_field', required=False)
def validate_renamed(self, attrs, source):
value = attrs[source]
if len(value) < 3:
raise serializers.ValidationError('Minimum 3 characters.')
return attrs
class Meta:
model = ShouldValidateModel
fields = ('renamed',)
class TestPreSaveValidationExclusionsSerializer(TestCase):
def test_renamed_fields_are_model_validated(self):
"""
Ensure fields with 'source' applied do get still get model validation.
"""
# We've set `required=False` on the serializer, but the model
# does not have `blank=True`, so this serializer should not validate.
serializer = ShouldValidateModelSerializer(data={'renamed': ''})
self.assertEqual(serializer.is_valid(), False)
self.assertIn('renamed', serializer.errors)
self.assertNotIn('should_validate_field', serializer.errors)
class TestCustomValidationMethods(TestCase):
def test_custom_validation_method_is_executed(self):
serializer = ShouldValidateModelSerializer(data={'renamed': 'fo'})
self.assertFalse(serializer.is_valid())
self.assertIn('renamed', serializer.errors)
def test_custom_validation_method_passing(self):
serializer = ShouldValidateModelSerializer(data={'renamed': 'foo'})
self.assertTrue(serializer.is_valid())
class ValidationSerializer(serializers.Serializer):
foo = serializers.CharField()
def validate_foo(self, attrs, source):
raise serializers.ValidationError("foo invalid")
def validate(self, attrs):
raise serializers.ValidationError("serializer invalid")
class TestAvoidValidation(TestCase):
"""
If serializer was initialized with invalid data (None or non dict-like), it
should avoid validation layer (validate_<field> and validate methods)
"""
def test_serializer_errors_has_only_invalid_data_error(self):
serializer = ValidationSerializer(data='invalid data')
self.assertFalse(serializer.is_valid())
self.assertDictEqual(serializer.errors,
{'non_field_errors': ['Invalid data']})
# regression tests for issue: 1493
class ValidationMaxValueValidatorModel(models.Model):
number_value = models.PositiveIntegerField(validators=[MaxValueValidator(100)])
class ValidationMaxValueValidatorModelSerializer(serializers.ModelSerializer):
class Meta:
model = ValidationMaxValueValidatorModel
class UpdateMaxValueValidationModel(generics.RetrieveUpdateDestroyAPIView):
model = ValidationMaxValueValidatorModel
serializer_class = ValidationMaxValueValidatorModelSerializer
class TestMaxValueValidatorValidation(TestCase):
def test_max_value_validation_serializer_success(self):
serializer = ValidationMaxValueValidatorModelSerializer(data={'number_value': 99})
self.assertTrue(serializer.is_valid())
def test_max_value_validation_serializer_fails(self):
serializer = ValidationMaxValueValidatorModelSerializer(data={'number_value': 101})
self.assertFalse(serializer.is_valid())
self.assertDictEqual({'number_value': ['Ensure this value is less than or equal to 100.']}, serializer.errors)
def test_max_value_validation_success(self):
obj = ValidationMaxValueValidatorModel.objects.create(number_value=100)
request = factory.patch('/{0}'.format(obj.pk), {'number_value': 98}, format='json')
view = UpdateMaxValueValidationModel().as_view()
response = view(request, pk=obj.pk).render()
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_max_value_validation_fail(self):
obj = ValidationMaxValueValidatorModel.objects.create(number_value=100)
request = factory.patch('/{0}'.format(obj.pk), {'number_value': 101}, format='json')
view = UpdateMaxValueValidationModel().as_view()
response = view(request, pk=obj.pk).render()
self.assertEqual(response.content, b'{"number_value": ["Ensure this value is less than or equal to 100."]}')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| 38.416107
| 118
| 0.734626
|
84faabf5752c028be7ad85327007f48df39c833c
| 1,411
|
py
|
Python
|
url_cleaner.py
|
HyShai/url-cleaner
|
35a955fa9a0d6a3f7fd2ae2bef6e448e9d57b2ee
|
[
"MIT"
] | 5
|
2015-04-06T14:06:31.000Z
|
2022-01-29T12:56:57.000Z
|
url_cleaner.py
|
HyShai/url-cleaner
|
35a955fa9a0d6a3f7fd2ae2bef6e448e9d57b2ee
|
[
"MIT"
] | null | null | null |
url_cleaner.py
|
HyShai/url-cleaner
|
35a955fa9a0d6a3f7fd2ae2bef6e448e9d57b2ee
|
[
"MIT"
] | 1
|
2020-01-06T23:43:36.000Z
|
2020-01-06T23:43:36.000Z
|
welcome_msg = '''url_cleaner is a poor man's CleanLinks.
See: https://itunes.apple.com/us/app/clean-links/id623671942?mt=8
It takes a shortened url and returns the lengthened, cleaned url.
The inbound shortened url can either be provided as a command line
argument or on the iOS clipboard.
url_cleaner follows all header redirects, instead of downloading
the entire webpage as CleanLinks does, which 1) saves you bandwidth
and 2) does not register as a click (at least for bit.ly).
See: https://twitter.com/gcaprio/status/418040618636435456
It also has optional support for LaunchCenterPro if it is installed.
Source code at: https://github.com/HyShai/url-cleaner'''
import clipboard, console, re, requests, sys, urllib, webbrowser
def url_lengthen(url): # recursively lengthen the url
try:
new_url = requests.head(url).headers.get('location')
except ValueError:
return None
return url_lengthen(new_url) if new_url else url
url = url_lengthen(sys.argv[1] if len(sys.argv) > 1 else clipboard.get())
if not url:
print(welcome_msg)
sys.exit()
#strip analytics garbage
url = re.sub(r'(?<=\&|\?)utm\w+=[^\&]+(\&)?','',url)
if webbrowser.can_open('launch://'):
params = urllib.quote('[prompt:Set Clipboard=%s]' % url)
launch = 'launch://clipboard?text=%s' % params
webbrowser.open(launch)
else:
print('Copying '),
console.write_link(url, url)
print('to the clipboard')
clipboard.set(url)
| 33.595238
| 73
| 0.739901
|
40617e59cedfe85d2b5f4a2d04fcd81c62b0b193
| 43,620
|
py
|
Python
|
test/dialect/mysql/test_reflection.py
|
PhillCli/sqlalchemy
|
44365e0d2190eecf7c006c9d41dbcab7b7925187
|
[
"MIT"
] | null | null | null |
test/dialect/mysql/test_reflection.py
|
PhillCli/sqlalchemy
|
44365e0d2190eecf7c006c9d41dbcab7b7925187
|
[
"MIT"
] | 1
|
2021-01-25T09:53:34.000Z
|
2021-01-25T09:53:35.000Z
|
test/dialect/mysql/test_reflection.py
|
PhillCli/sqlalchemy
|
44365e0d2190eecf7c006c9d41dbcab7b7925187
|
[
"MIT"
] | 2
|
2021-01-10T10:49:52.000Z
|
2021-01-13T09:34:27.000Z
|
# coding: utf-8
import re
from sqlalchemy import BigInteger
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import DDL
from sqlalchemy import DefaultClause
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import Index
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import LargeBinary
from sqlalchemy import MetaData
from sqlalchemy import NCHAR
from sqlalchemy import select
from sqlalchemy import SmallInteger
from sqlalchemy import sql
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import testing
from sqlalchemy import Text
from sqlalchemy import TIMESTAMP
from sqlalchemy import Unicode
from sqlalchemy import UnicodeText
from sqlalchemy import UniqueConstraint
from sqlalchemy import util
from sqlalchemy.dialects.mysql import base as mysql
from sqlalchemy.dialects.mysql import reflection as _reflection
from sqlalchemy.schema import CreateIndex
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_warnings
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
class TypeReflectionTest(fixtures.TestBase):
__only_on__ = "mysql", "mariadb"
__backend__ = True
def _run_test(self, metadata, connection, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
# Early 5.0 releases seem to report more "general" for columns
# in a view, e.g. char -> varchar, tinyblob -> mediumblob
use_views = testing.db.dialect.server_version_info > (5, 0, 10)
m = metadata
Table("mysql_types", m, *columns)
if use_views:
event.listen(
m,
"after_create",
DDL(
"CREATE OR REPLACE VIEW mysql_types_v "
"AS SELECT * from mysql_types"
),
)
event.listen(
m, "before_drop", DDL("DROP VIEW IF EXISTS mysql_types_v")
)
m.create_all(connection)
m2 = MetaData()
tables = [Table("mysql_types", m2, autoload_with=connection)]
if use_views:
tables.append(Table("mysql_types_v", m2, autoload_with=connection))
for table in tables:
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_time_types(self, metadata, connection):
specs = []
if testing.requires.mysql_fsp.enabled:
fsps = [None, 0, 5]
else:
fsps = [None]
for type_ in (mysql.TIMESTAMP, mysql.DATETIME, mysql.TIME):
# MySQL defaults fsp to 0, and if 0 does not report it.
# we don't actually render 0 right now in DDL but even if we do,
# it comes back blank
for fsp in fsps:
if fsp:
specs.append((type_(fsp=fsp), type_(fsp=fsp)))
else:
specs.append((type_(), type_()))
specs.extend(
[(TIMESTAMP(), mysql.TIMESTAMP()), (DateTime(), mysql.DATETIME())]
)
# note 'timezone' should always be None on both
self._run_test(metadata, connection, specs, ["fsp", "timezone"])
def test_year_types(self, metadata, connection):
specs = [
(mysql.YEAR(), mysql.YEAR(display_width=4)),
(mysql.YEAR(display_width=4), mysql.YEAR(display_width=4)),
]
if testing.against("mysql>=8.0.19"):
self._run_test(metadata, connection, specs, [])
else:
self._run_test(metadata, connection, specs, ["display_width"])
def test_string_types(
self,
metadata,
connection,
):
specs = [
(String(1), mysql.MSString(1)),
(String(3), mysql.MSString(3)),
(Text(), mysql.MSText()),
(Unicode(1), mysql.MSString(1)),
(Unicode(3), mysql.MSString(3)),
(UnicodeText(), mysql.MSText()),
(mysql.MSChar(1), mysql.MSChar(1)),
(mysql.MSChar(3), mysql.MSChar(3)),
(NCHAR(2), mysql.MSChar(2)),
(mysql.MSNChar(2), mysql.MSChar(2)),
(mysql.MSNVarChar(22), mysql.MSString(22)),
]
self._run_test(metadata, connection, specs, ["length"])
def test_integer_types(self, metadata, connection):
specs = []
for type_ in [
mysql.TINYINT,
mysql.SMALLINT,
mysql.MEDIUMINT,
mysql.INTEGER,
mysql.BIGINT,
]:
for display_width in [None, 4, 7]:
for unsigned in [False, True]:
for zerofill in [None, True]:
kw = {}
if display_width:
kw["display_width"] = display_width
if unsigned is not None:
kw["unsigned"] = unsigned
if zerofill is not None:
kw["zerofill"] = zerofill
zerofill = bool(zerofill)
source_type = type_(**kw)
if display_width is None:
display_width = {
mysql.MEDIUMINT: 9,
mysql.SMALLINT: 6,
mysql.TINYINT: 4,
mysql.INTEGER: 11,
mysql.BIGINT: 20,
}[type_]
if zerofill:
unsigned = True
expected_type = type_(
display_width=display_width,
unsigned=unsigned,
zerofill=zerofill,
)
specs.append((source_type, expected_type))
specs.extend(
[
(SmallInteger(), mysql.SMALLINT(display_width=6)),
(Integer(), mysql.INTEGER(display_width=11)),
(BigInteger, mysql.BIGINT(display_width=20)),
]
)
# TODO: mysql 8.0.19-ish doesn't consistently report
# on display_width. need to test this more accurately though
# for the cases where it does
if testing.against("mysql >= 8.0.19"):
self._run_test(
metadata, connection, specs, ["unsigned", "zerofill"]
)
else:
self._run_test(
metadata,
connection,
specs,
["display_width", "unsigned", "zerofill"],
)
def test_binary_types(
self,
metadata,
connection,
):
specs = [
(LargeBinary(3), mysql.TINYBLOB()),
(LargeBinary(), mysql.BLOB()),
(mysql.MSBinary(3), mysql.MSBinary(3)),
(mysql.MSVarBinary(3), mysql.MSVarBinary(3)),
(mysql.MSTinyBlob(), mysql.MSTinyBlob()),
(mysql.MSBlob(), mysql.MSBlob()),
(mysql.MSBlob(1234), mysql.MSBlob()),
(mysql.MSMediumBlob(), mysql.MSMediumBlob()),
(mysql.MSLongBlob(), mysql.MSLongBlob()),
]
self._run_test(metadata, connection, specs, [])
def test_legacy_enum_types(
self,
metadata,
connection,
):
specs = [(mysql.ENUM("", "fleem"), mysql.ENUM("", "fleem"))]
self._run_test(metadata, connection, specs, ["enums"])
class ReflectionTest(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mysql", "mariadb"
__backend__ = True
def test_default_reflection(self):
"""Test reflection of column defaults."""
# TODO: this test is a mess. should be broken into individual
# combinations
from sqlalchemy.dialects.mysql import VARCHAR
def_table = Table(
"mysql_def",
MetaData(),
Column(
"c1",
VARCHAR(10, collation="utf8_unicode_ci"),
DefaultClause(""),
nullable=False,
),
Column("c2", String(10), DefaultClause("0")),
Column("c3", String(10), DefaultClause("abc")),
Column("c4", TIMESTAMP, DefaultClause("2009-04-05 12:00:00")),
Column("c5", TIMESTAMP),
Column(
"c6",
TIMESTAMP,
DefaultClause(
sql.text(
"CURRENT_TIMESTAMP " "ON UPDATE CURRENT_TIMESTAMP"
)
),
),
Column("c7", mysql.DOUBLE(), DefaultClause("0.0000")),
Column("c8", mysql.DOUBLE(22, 6), DefaultClause("0.0000")),
)
def_table.create(testing.db)
try:
reflected = Table(
"mysql_def", MetaData(), autoload_with=testing.db
)
finally:
def_table.drop(testing.db)
assert def_table.c.c1.server_default.arg == ""
assert def_table.c.c2.server_default.arg == "0"
assert def_table.c.c3.server_default.arg == "abc"
assert def_table.c.c4.server_default.arg == "2009-04-05 12:00:00"
assert str(reflected.c.c1.server_default.arg) == "''"
assert str(reflected.c.c2.server_default.arg) == "'0'"
assert str(reflected.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert str(reflected.c.c7.server_default.arg) in ("0", "'0'")
# this is because the numeric is 6 decimal places, MySQL
# formats it to that many places.
assert str(reflected.c.c8.server_default.arg) in (
"0.000000",
"'0.000000'",
)
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
reflected.create(testing.db)
try:
reflected2 = Table(
"mysql_def", MetaData(), autoload_with=testing.db
)
finally:
reflected.drop(testing.db)
assert str(reflected2.c.c1.server_default.arg) == "''"
assert str(reflected2.c.c2.server_default.arg) == "'0'"
assert str(reflected2.c.c3.server_default.arg) == "'abc'"
assert (
str(reflected2.c.c4.server_default.arg) == "'2009-04-05 12:00:00'"
)
assert reflected.c.c5.default is None
assert reflected.c.c5.server_default is None
assert reflected.c.c6.default is None
assert str(reflected.c.c7.server_default.arg) in ("0", "'0'")
assert str(reflected.c.c8.server_default.arg) in (
"0.000000",
"'0.000000'",
)
assert re.match(
r"CURRENT_TIMESTAMP(\(\))? ON UPDATE CURRENT_TIMESTAMP(\(\))?",
str(reflected.c.c6.server_default.arg).upper(),
)
def test_reflection_with_table_options(self, metadata, connection):
comment = r"""Comment types type speedily ' " \ '' Fun!"""
if testing.against("mariadb"):
kwargs = dict(
mariadb_engine="MEMORY",
mariadb_default_charset="utf8",
mariadb_auto_increment="5",
mariadb_avg_row_length="3",
mariadb_password="secret",
mariadb_connection="fish",
)
else:
kwargs = dict(
mysql_engine="MEMORY",
mysql_default_charset="utf8",
mysql_auto_increment="5",
mysql_avg_row_length="3",
mysql_password="secret",
mysql_connection="fish",
)
def_table = Table(
"mysql_def",
metadata,
Column("c1", Integer()),
comment=comment,
**kwargs
)
conn = connection
def_table.create(conn)
reflected = Table("mysql_def", MetaData(), autoload_with=conn)
if testing.against("mariadb"):
assert def_table.kwargs["mariadb_engine"] == "MEMORY"
assert def_table.comment == comment
assert def_table.kwargs["mariadb_default_charset"] == "utf8"
assert def_table.kwargs["mariadb_auto_increment"] == "5"
assert def_table.kwargs["mariadb_avg_row_length"] == "3"
assert def_table.kwargs["mariadb_password"] == "secret"
assert def_table.kwargs["mariadb_connection"] == "fish"
assert reflected.kwargs["mariadb_engine"] == "MEMORY"
assert reflected.comment == comment
assert reflected.kwargs["mariadb_comment"] == comment
assert reflected.kwargs["mariadb_default charset"] == "utf8"
assert reflected.kwargs["mariadb_avg_row_length"] == "3"
assert reflected.kwargs["mariadb_connection"] == "fish"
# This field doesn't seem to be returned by mariadb itself.
# assert reflected.kwargs['mariadb_password'] == 'secret'
# This is explicitly ignored when reflecting schema.
# assert reflected.kwargs['mariadb_auto_increment'] == '5'
else:
assert def_table.kwargs["mysql_engine"] == "MEMORY"
assert def_table.comment == comment
assert def_table.kwargs["mysql_default_charset"] == "utf8"
assert def_table.kwargs["mysql_auto_increment"] == "5"
assert def_table.kwargs["mysql_avg_row_length"] == "3"
assert def_table.kwargs["mysql_password"] == "secret"
assert def_table.kwargs["mysql_connection"] == "fish"
assert reflected.kwargs["mysql_engine"] == "MEMORY"
assert reflected.comment == comment
assert reflected.kwargs["mysql_comment"] == comment
assert reflected.kwargs["mysql_default charset"] == "utf8"
assert reflected.kwargs["mysql_avg_row_length"] == "3"
assert reflected.kwargs["mysql_connection"] == "fish"
# This field doesn't seem to be returned by mysql itself.
# assert reflected.kwargs['mysql_password'] == 'secret'
# This is explicitly ignored when reflecting schema.
# assert reflected.kwargs['mysql_auto_increment'] == '5'
def test_reflection_on_include_columns(self, metadata, connection):
"""Test reflection of include_columns to be sure they respect case."""
meta = metadata
case_table = Table(
"mysql_case",
meta,
Column("c1", String(10)),
Column("C2", String(10)),
Column("C3", String(10)),
)
case_table.create(connection)
reflected = Table(
"mysql_case",
MetaData(),
autoload_with=connection,
include_columns=["c1", "C2"],
)
for t in case_table, reflected:
assert "c1" in t.c.keys()
assert "C2" in t.c.keys()
reflected2 = Table(
"mysql_case",
MetaData(),
autoload_with=connection,
include_columns=["c1", "c2"],
)
assert "c1" in reflected2.c.keys()
for c in ["c2", "C2", "C3"]:
assert c not in reflected2.c.keys()
def test_autoincrement(self, metadata, connection):
meta = metadata
Table(
"ai_1",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_2",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column("int_n", Integer, DefaultClause("0"), primary_key=True),
mysql_engine="MyISAM",
)
Table(
"ai_3",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_4",
meta,
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
Column(
"int_n2",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_5",
meta,
Column("int_y", Integer, primary_key=True, autoincrement=True),
Column(
"int_n",
Integer,
DefaultClause("0"),
primary_key=True,
autoincrement=False,
),
mysql_engine="MyISAM",
)
Table(
"ai_6",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_7",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
Column("int_y", Integer, primary_key=True, autoincrement=True),
mysql_engine="MyISAM",
)
Table(
"ai_8",
meta,
Column("o1", String(1), DefaultClause("x"), primary_key=True),
Column("o2", String(1), DefaultClause("x"), primary_key=True),
mysql_engine="MyISAM",
)
meta.create_all(connection)
table_names = [
"ai_1",
"ai_2",
"ai_3",
"ai_4",
"ai_5",
"ai_6",
"ai_7",
"ai_8",
]
mr = MetaData()
mr.reflect(connection, only=table_names)
for tbl in [mr.tables[name] for name in table_names]:
for c in tbl.c:
if c.name.startswith("int_y"):
assert c.autoincrement
elif c.name.startswith("int_n"):
assert not c.autoincrement
connection.execute(tbl.insert())
if "int_y" in tbl.c:
assert connection.scalar(select(tbl.c.int_y)) == 1
assert (
list(connection.execute(tbl.select()).first()).count(1)
== 1
)
else:
assert 1 not in list(connection.execute(tbl.select()).first())
def test_view_reflection(self, metadata, connection):
Table("x", metadata, Column("a", Integer), Column("b", String(50)))
metadata.create_all(connection)
conn = connection
conn.exec_driver_sql("CREATE VIEW v1 AS SELECT * FROM x")
conn.exec_driver_sql(
"CREATE ALGORITHM=MERGE VIEW v2 AS SELECT * FROM x"
)
conn.exec_driver_sql(
"CREATE ALGORITHM=UNDEFINED VIEW v3 AS SELECT * FROM x"
)
conn.exec_driver_sql(
"CREATE DEFINER=CURRENT_USER VIEW v4 AS SELECT * FROM x"
)
@event.listens_for(metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.begin() as conn:
for v in ["v1", "v2", "v3", "v4"]:
conn.exec_driver_sql("DROP VIEW %s" % v)
insp = inspect(connection)
for v in ["v1", "v2", "v3", "v4"]:
eq_(
[
(col["name"], col["type"].__class__)
for col in insp.get_columns(v)
],
[("a", mysql.INTEGER), ("b", mysql.VARCHAR)],
)
def test_skip_not_describable(self, metadata, connection):
@event.listens_for(metadata, "before_drop")
def cleanup(*arg, **kw):
with testing.db.begin() as conn:
conn.exec_driver_sql("DROP TABLE IF EXISTS test_t1")
conn.exec_driver_sql("DROP TABLE IF EXISTS test_t2")
conn.exec_driver_sql("DROP VIEW IF EXISTS test_v")
conn = connection
conn.exec_driver_sql("CREATE TABLE test_t1 (id INTEGER)")
conn.exec_driver_sql("CREATE TABLE test_t2 (id INTEGER)")
conn.exec_driver_sql("CREATE VIEW test_v AS SELECT id FROM test_t1")
conn.exec_driver_sql("DROP TABLE test_t1")
m = MetaData()
with expect_warnings(
"Skipping .* Table or view named .?test_v.? could not be "
"reflected: .* references invalid table"
):
m.reflect(views=True, bind=conn)
eq_(m.tables["test_t2"].name, "test_t2")
assert_raises_message(
exc.UnreflectableTableError,
"references invalid table",
Table,
"test_v",
MetaData(),
autoload_with=conn,
)
@testing.exclude("mysql", "<", (5, 0, 0), "no information_schema support")
def test_system_views(self):
dialect = testing.db.dialect
connection = testing.db.connect()
view_names = dialect.get_view_names(connection, "information_schema")
self.assert_("TABLES" in view_names)
def test_nullable_reflection(self, metadata, connection):
"""test reflection of NULL/NOT NULL, in particular with TIMESTAMP
defaults where MySQL is inconsistent in how it reports CREATE TABLE.
"""
meta = metadata
# this is ideally one table, but older MySQL versions choke
# on the multiple TIMESTAMP columns
row = connection.exec_driver_sql(
"show variables like '%%explicit_defaults_for_timestamp%%'"
).first()
explicit_defaults_for_timestamp = row[1].lower() in ("on", "1", "true")
reflected = []
for idx, cols in enumerate(
[
[
"x INTEGER NULL",
"y INTEGER NOT NULL",
"z INTEGER",
"q TIMESTAMP NULL",
],
["p TIMESTAMP NULL DEFAULT CURRENT_TIMESTAMP"],
["r TIMESTAMP NOT NULL"],
["s TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP"],
["t TIMESTAMP"],
["u TIMESTAMP DEFAULT CURRENT_TIMESTAMP"],
]
):
Table("nn_t%d" % idx, meta) # to allow DROP
connection.exec_driver_sql(
"""
CREATE TABLE nn_t%d (
%s
)
"""
% (idx, ", \n".join(cols))
)
reflected.extend(
{
"name": d["name"],
"nullable": d["nullable"],
"default": d["default"],
}
for d in inspect(connection).get_columns("nn_t%d" % idx)
)
if connection.dialect._is_mariadb_102:
current_timestamp = "current_timestamp()"
else:
current_timestamp = "CURRENT_TIMESTAMP"
eq_(
reflected,
[
{"name": "x", "nullable": True, "default": None},
{"name": "y", "nullable": False, "default": None},
{"name": "z", "nullable": True, "default": None},
{"name": "q", "nullable": True, "default": None},
{"name": "p", "nullable": True, "default": current_timestamp},
{
"name": "r",
"nullable": False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{"name": "s", "nullable": False, "default": current_timestamp},
{
"name": "t",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": None
if explicit_defaults_for_timestamp
else (
"%(current_timestamp)s "
"ON UPDATE %(current_timestamp)s"
)
% {"current_timestamp": current_timestamp},
},
{
"name": "u",
"nullable": True
if explicit_defaults_for_timestamp
else False,
"default": current_timestamp,
},
],
)
def test_reflection_with_unique_constraint(self, metadata, connection):
insp = inspect(connection)
meta = metadata
uc_table = Table(
"mysql_uc",
meta,
Column("a", String(10)),
UniqueConstraint("a", name="uc_a"),
)
uc_table.create(connection)
# MySQL converts unique constraints into unique indexes.
# separately we get both
indexes = dict((i["name"], i) for i in insp.get_indexes("mysql_uc"))
constraints = set(
i["name"] for i in insp.get_unique_constraints("mysql_uc")
)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"]["unique"])
self.assert_("uc_a" in constraints)
# reflection here favors the unique index, as that's the
# more "official" MySQL construct
reflected = Table("mysql_uc", MetaData(), autoload_with=testing.db)
indexes = dict((i.name, i) for i in reflected.indexes)
constraints = set(uc.name for uc in reflected.constraints)
self.assert_("uc_a" in indexes)
self.assert_(indexes["uc_a"].unique)
self.assert_("uc_a" not in constraints)
def test_reflect_fulltext(self, metadata, connection):
mt = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mariadb_engine="InnoDB",
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
mt.c.textdata,
mysql_prefix="FULLTEXT",
mariadb_prefix="FULLTEXT",
)
metadata.create_all(connection)
mt = Table("mytable", MetaData(), autoload_with=testing.db)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options[testing.db.name]["prefix"], "FULLTEXT")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable (textdata)",
)
@testing.requires.mysql_ngram_fulltext
def test_reflect_fulltext_comment(
self,
metadata,
connection,
):
mt = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("textdata", String(50)),
mysql_engine="InnoDB",
)
Index(
"textdata_ix",
mt.c.textdata,
mysql_prefix="FULLTEXT",
mysql_with_parser="ngram",
)
metadata.create_all(connection)
mt = Table("mytable", MetaData(), autoload_with=connection)
idx = list(mt.indexes)[0]
eq_(idx.name, "textdata_ix")
eq_(idx.dialect_options["mysql"]["prefix"], "FULLTEXT")
eq_(idx.dialect_options["mysql"]["with_parser"], "ngram")
self.assert_compile(
CreateIndex(idx),
"CREATE FULLTEXT INDEX textdata_ix ON mytable "
"(textdata) WITH PARSER ngram",
)
def test_non_column_index(self, metadata, connection):
m1 = metadata
t1 = Table(
"add_ix", m1, Column("x", String(50)), mysql_engine="InnoDB"
)
Index("foo_idx", t1.c.x.desc())
m1.create_all(connection)
insp = inspect(connection)
eq_(
insp.get_indexes("add_ix"),
[{"name": "foo_idx", "column_names": ["x"], "unique": False}],
)
def _bug_88718_96365_casing_0(self):
fkeys_casing_0 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "Test_Schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_0 = [
("Test", "Track", "TrackID"),
("Test_Schema", "Track", "TrackID"),
]
return fkeys_casing_0, ischema_casing_0
def _bug_88718_96365_casing_1(self):
fkeys_casing_1 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "Test_Schema",
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_1 = [
(util.u("Test"), util.u("Track"), "TrackID"),
(util.u("Test_Schema"), util.u("Track"), "TrackID"),
]
return fkeys_casing_1, ischema_casing_1
def _bug_88718_96365_casing_2(self):
fkeys_casing_2 = [
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
# I haven't tested schema name but since col/table both
# do it, assume schema name also comes back wrong
"referred_schema": "test_schema",
"referred_table": "track",
"referred_columns": ["trackid"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
# table name also comes back wrong (probably schema also)
# with casing=2, see https://bugs.mysql.com/bug.php?id=96365
"referred_table": "track",
"referred_columns": ["trackid"],
"options": {},
},
]
ischema_casing_2 = [
("Test", "Track", "TrackID"),
("Test_Schema", "Track", "TrackID"),
]
return fkeys_casing_2, ischema_casing_2
def test_correct_for_mysql_bugs_88718_96365(self):
dialect = mysql.dialect()
for casing, (fkeys, ischema) in [
(0, self._bug_88718_96365_casing_0()),
(1, self._bug_88718_96365_casing_1()),
(2, self._bug_88718_96365_casing_2()),
]:
dialect._casing = casing
dialect.default_schema_name = "Test"
connection = mock.Mock(
dialect=dialect, execute=lambda stmt, params: ischema
)
dialect._correct_for_mysql_bugs_88718_96365(fkeys, connection)
eq_(
fkeys,
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": "Test_Schema",
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
def test_case_sensitive_column_constraint_reflection(
self, metadata, connection
):
# test for issue #4344 which works around
# MySQL 8.0 bug https://bugs.mysql.com/bug.php?id=88718
m1 = metadata
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"Track",
m1,
Column("TrackID", Integer, primary_key=True),
schema=testing.config.test_schema,
mysql_engine="InnoDB",
)
Table(
"PlaylistTrack",
m1,
Column("id", Integer, primary_key=True),
Column(
"TrackID",
ForeignKey("Track.TrackID", name="FK_PlaylistTrackId"),
),
Column(
"TTrackID",
ForeignKey(
"%s.Track.TrackID" % (testing.config.test_schema,),
name="FK_PlaylistTTrackId",
),
),
mysql_engine="InnoDB",
)
m1.create_all(connection)
if connection.dialect._casing in (1, 2):
# the original test for the 88718 fix here in [ticket:4344]
# actually set referred_table='track', with the wrong casing!
# this test was never run. with [ticket:4751], I've gone through
# the trouble to create docker containers with true
# lower_case_table_names=2 and per
# https://bugs.mysql.com/bug.php?id=96365 the table name being
# lower case is also an 8.0 regression.
eq_(
inspect(connection).get_foreign_keys("PlaylistTrack"),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
else:
eq_(
sorted(
inspect(connection).get_foreign_keys("PlaylistTrack"),
key=lambda elem: elem["name"],
),
[
{
"name": "FK_PlaylistTTrackId",
"constrained_columns": ["TTrackID"],
"referred_schema": testing.config.test_schema,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
{
"name": "FK_PlaylistTrackId",
"constrained_columns": ["TrackID"],
"referred_schema": None,
"referred_table": "Track",
"referred_columns": ["TrackID"],
"options": {},
},
],
)
@testing.requires.mysql_fully_case_sensitive
def test_case_sensitive_reflection_dual_case_references(
self, metadata, connection
):
# this tests that within the fix we do for MySQL bug
# 88718, we don't do case-insensitive logic if the backend
# is case sensitive
m = metadata
Table(
"t1",
m,
Column("some_id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"T1",
m,
Column("Some_Id", Integer, primary_key=True),
mysql_engine="InnoDB",
)
Table(
"t2",
m,
Column("id", Integer, primary_key=True),
Column("t1id", ForeignKey("t1.some_id", name="t1id_fk")),
Column("cap_t1id", ForeignKey("T1.Some_Id", name="cap_t1id_fk")),
mysql_engine="InnoDB",
)
m.create_all(connection)
eq_(
dict(
(rec["name"], rec)
for rec in inspect(connection).get_foreign_keys("t2")
),
{
"cap_t1id_fk": {
"name": "cap_t1id_fk",
"constrained_columns": ["cap_t1id"],
"referred_schema": None,
"referred_table": "T1",
"referred_columns": ["Some_Id"],
"options": {},
},
"t1id_fk": {
"name": "t1id_fk",
"constrained_columns": ["t1id"],
"referred_schema": None,
"referred_table": "t1",
"referred_columns": ["some_id"],
"options": {},
},
},
)
class RawReflectionTest(fixtures.TestBase):
__backend__ = True
def setup(self):
dialect = mysql.dialect()
self.parser = _reflection.MySQLTableDefinitionParser(
dialect, dialect.identifier_preparer
)
def test_key_reflection(self):
regex = self.parser._re_key
assert regex.match(" PRIMARY KEY (`id`),")
assert regex.match(" PRIMARY KEY USING BTREE (`id`),")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE,")
assert regex.match(" PRIMARY KEY (`id`)")
assert regex.match(" PRIMARY KEY USING BTREE (`id`)")
assert regex.match(" PRIMARY KEY (`id`) USING BTREE")
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE 16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE=16"
)
assert regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = 16"
)
assert not regex.match(
" PRIMARY KEY (`id`) USING BTREE KEY_BLOCK_SIZE = = 16"
)
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'")
# `SHOW CREATE TABLE` returns COMMENT '''comment'
# after creating table with COMMENT '\'comment'
assert regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'comment'''")
assert regex.match(" KEY (`id`) USING BTREE COMMENT 'prefix''suffix'")
assert regex.match(
" KEY (`id`) USING BTREE COMMENT 'prefix''text''suffix'"
)
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
# "It means if the MySQL version >= 501, execute what's in the comment"
assert regex.match(
" FULLTEXT KEY `ix_fulltext_oi_g_name` (`oi_g_name`) "
"/*!50100 WITH PARSER `ngram` */ "
)
def test_key_reflection_columns(self):
regex = self.parser._re_key
exprs = self.parser._re_keyexprs
m = regex.match(" KEY (`id`) USING BTREE COMMENT '''comment'")
eq_(m.group("columns"), "`id`")
m = regex.match(" KEY (`x`, `y`) USING BTREE")
eq_(m.group("columns"), "`x`, `y`")
eq_(exprs.findall(m.group("columns")), [("x", "", ""), ("y", "", "")])
m = regex.match(" KEY (`x`(25), `y`(15)) USING BTREE")
eq_(m.group("columns"), "`x`(25), `y`(15)")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", ""), ("y", "15", "")],
)
m = regex.match(" KEY (`x`(25) DESC, `y`(15) ASC) USING BTREE")
eq_(m.group("columns"), "`x`(25) DESC, `y`(15) ASC")
eq_(
exprs.findall(m.group("columns")),
[("x", "25", "DESC"), ("y", "15", "ASC")],
)
m = regex.match(" KEY `foo_idx` (`x` DESC)")
eq_(m.group("columns"), "`x` DESC")
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
eq_(exprs.findall(m.group("columns")), [("x", "", "DESC")])
m = regex.match(" KEY `foo_idx` (`x` DESC, `y` ASC)")
eq_(m.group("columns"), "`x` DESC, `y` ASC")
def test_fk_reflection(self):
regex = self.parser._re_fk_constraint
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE CASCADE"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"CASCADE",
),
)
m = regex.match(
" CONSTRAINT `addresses_user_id_fkey` "
"FOREIGN KEY (`user_id`) "
"REFERENCES `users` (`id`) "
"ON DELETE CASCADE ON UPDATE SET NULL"
)
eq_(
m.groups(),
(
"addresses_user_id_fkey",
"`user_id`",
"`users`",
"`id`",
None,
"CASCADE",
"SET NULL",
),
)
| 35.319838
| 79
| 0.501582
|
928aa40d05c6bc74e39e5d65b04160d83cfe2b06
| 25,518
|
py
|
Python
|
tests/st/utils/data.py
|
electricjesus/calicoctl
|
48a4b06b7bd40f4ea8723716b604ca0f0aae8677
|
[
"Apache-2.0"
] | null | null | null |
tests/st/utils/data.py
|
electricjesus/calicoctl
|
48a4b06b7bd40f4ea8723716b604ca0f0aae8677
|
[
"Apache-2.0"
] | null | null | null |
tests/st/utils/data.py
|
electricjesus/calicoctl
|
48a4b06b7bd40f4ea8723716b604ca0f0aae8677
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2015-2020 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Various test data that may be shared across multiple tests.
# Naming follows the approximate format:
#
# <kind>_name<idx>_rev<revision>_<key attributes>
#
# Incrementing name indexes indicate the order in which they would be listed.
#
# The rev (a-z) indicates that it should be possible to switch between different
# revisions of the same data.
#
# The key attributes provide some useful additonal data, for example (a v4 specific
# resource).
import netaddr
from utils import API_VERSION
#
# IPPools
#
ippool_name1_rev1_v4 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name1'
},
'spec': {
'cidr': "10.0.1.0/24",
'ipipMode': 'Always',
'vxlanMode': 'Never',
'blockSize': 27,
'nodeSelector': "foo == 'bar'",
}
}
ippool_name1_rev1_table = (
"NAME CIDR SELECTOR \n"
"ippool-name1 10.0.1.0/24 foo == 'bar'"
)
ippool_name1_rev1_wide_table = (
"NAME CIDR NAT IPIPMODE VXLANMODE DISABLED SELECTOR \n"
"ippool-name1 10.0.1.0/24 false Always Never false foo == 'bar'"
)
ippool_name1_rev2_v4 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name1'
},
'spec': {
'cidr': "10.0.1.0/24",
'ipipMode': 'Never',
'vxlanMode': 'Always',
'nodeSelector': "all()",
}
}
ippool_name2_rev1_v6 = {
'apiVersion': API_VERSION,
'kind': 'IPPool',
'metadata': {
'name': 'ippool-name2'
},
'spec': {
'cidr': "fed0:8001::/64",
'ipipMode': 'Never',
'vxlanMode': 'Never',
'blockSize': 123,
'nodeSelector': "all()",
}
}
ippool_name2_rev1_table = (
"NAME CIDR SELECTOR \n"
"ippool-name2 fed0:8001::/64 all()"
)
#
# BGPPeers
#
bgppeer_name1_rev1_v4 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node1',
'peerIP': '192.168.0.250',
'asNumber': 64514,
},
}
bgppeer_name1_rev2_v4 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node2',
'peerIP': '192.168.0.251',
'asNumber': 64515,
},
}
bgppeer_name2_rev1_v6 = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-456def',
},
'spec': {
'node': 'node2',
'peerIP': 'fd5f::6:ee',
'asNumber': 64590,
},
}
bgppeer_invalid = {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-name-123abc',
},
'spec': {
'node': 'node2',
'peerIP': 'badpeerIP',
'asNumber': 64515,
},
}
bgppeer_multiple_invalid = [{
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-invalid1',
},
'spec': {
'node': 'node1',
'peerIP': 'badpeerIP',
'asNumber': 64515,
},
}, {
'apiVersion': API_VERSION,
'kind': 'BGPPeer',
'metadata': {
'name': 'bgppeer-invalid2',
},
'spec': {
'node': 'node2',
'peerIP': 'badpeerIP',
'asNumber': 64515,
}
}]
#
# Network Policy
#
networkpolicy_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
'namespace': 'default'
},
'spec': {
'order': 100,
'selector': "type=='database'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"},
},
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'",
},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:1024'],
'selector': "type=='application'",
'namespaceSelector': 'has(role)',
}
}
],
}
}
networkpolicy_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
'namespace': 'default'
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
networkpolicy_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkPolicy',
'metadata': {
'name': 'policy-mypolicy2',
'namespace': 'default',
'generateName': 'test-policy-',
'deletionTimestamp': '2006-01-02T15:04:07Z',
'deletionGracePeriodSeconds': 30,
'ownerReferences': [{
'apiVersion': 'extensions/v1beta1',
'blockOwnerDeletion': True,
'controller': True,
'kind': 'DaemonSet',
'name': 'endpoint1',
'uid': 'test-uid-change',
}],
'clusterName': 'cluster1',
'labels': {'label1': 'l1', 'label2': 'l2'},
'annotations': {'key': 'value'},
'selfLink': 'test-self-link',
'uid': 'test-uid-change',
'generation': 3,
'finalizers': ['finalizer1', 'finalizer2'],
'creationTimestamp': '2006-01-02T15:04:05Z',
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
#
# Global Network Policy
#
globalnetworkpolicy_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
},
'spec': {
'order': 100,
'selector': "type=='database'",
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"},
},
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'",
},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:1024'],
'selector': "type=='application'",
'namespaceSelector': 'has(role)',
}
}
],
}
}
globalnetworkpolicy_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkPolicy',
'metadata': {
'name': 'policy-mypolicy1',
},
'spec': {
'order': 100000,
'selector': "type=='sql'",
'doNotTrack': True,
'applyOnForward': True,
'types': ['Ingress', 'Egress'],
'egress': [
{
'action': 'Deny',
'protocol': 'TCP',
},
],
'ingress': [
{
'action': 'Allow',
'protocol': 'UDP',
},
],
}
}
#
# Global network sets
#
globalnetworkset_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkSet',
'metadata': {
'name': 'net-set1',
},
'spec': {
'nets': [
"10.0.0.1",
"11.0.0.0/16",
"feed:beef::1",
"dead:beef::96",
]
}
}
# A network set with a large number of entries. In prototyping this test, I found that there are
# "upstream" limits that cap how large we can go:
#
# - Kubernetes' gRPC API has a 4MB message size limit.
# - etcdv3 has a 1MB value size limit.
many_nets = []
for i in xrange(10000):
many_nets.append("10.%s.%s.0/28" % (i >> 8, i % 256))
globalnetworkset_name1_rev1_large = {
'apiVersion': API_VERSION,
'kind': 'GlobalNetworkSet',
'metadata': {
'name': 'net-set1',
},
'spec': {
'nets': many_nets,
}
}
#
# Network sets
#
networkset_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'NetworkSet',
'metadata': {
'name': 'net-set1'
},
'spec': {
'nets': [
"10.0.0.1",
"11.0.0.0/16",
"feed:beef::1",
"dead:beef::96",
]
}
}
# A network set with a large number of entries. In prototyping this test, I found that there are
# "upstream" limits that cap how large we can go:
#
# - Kubernetes' gRPC API has a 4MB message size limit.
# - etcdv3 has a 1MB value size limit.
many_nets = []
for i in xrange(10000):
many_nets.append("10.%s.%s.0/28" % (i >> 8, i % 256))
networkset_name1_rev1_large = {
'apiVersion': API_VERSION,
'kind': 'NetworkSet',
'metadata': {
'name': 'net-set1',
'namespace': 'namespace-1'
},
'spec': {
'nets': many_nets,
}
}
#
# Host Endpoints
#
hostendpoint_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'database'},
},
'spec': {
'interfaceName': 'eth0',
'profiles': ['prof1', 'prof2'],
'node': 'host1'
}
}
hostendpoint_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'frontend'}
},
'spec': {
'interfaceName': 'cali7',
'profiles': ['prof1', 'prof2'],
'node': 'host2'
}
}
hostendpoint_name1_rev3 = {
'apiVersion': API_VERSION,
'kind': 'HostEndpoint',
'metadata': {
'name': 'endpoint1',
'labels': {'type': 'frontend', 'misc': 'version1'},
'annotations': {'key': 'value'},
'selfLink': 'test-self-link',
'uid': 'test-uid-change',
'generation': 3,
'finalizers': ['finalizer1', 'finalizer2'],
'creationTimestamp': '2006-01-02T15:04:05Z',
},
'spec': {
'interfaceName': 'cali7',
'profiles': ['prof1', 'prof2'],
'node': 'host2'
}
}
#
# Profiles
#
profile_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'labels': {'foo': 'bar'},
'name': 'profile-name1'
},
'spec': {
'egress': [
{
'action': 'Allow',
'source': {
'selector': "type=='application'"
}
}
],
'ingress': [
{
'ipVersion': 4,
'action': 'Deny',
'destination': {
'notNets': ['10.3.0.0/16'],
'notPorts': ['110:1050'],
'notSelector': "type=='apples'",
'nets': ['10.2.0.0/16'],
'ports': ['100:200'],
'selector': "type=='application'"},
'protocol': 'TCP',
'source': {
'notNets': ['10.1.0.0/16'],
'notPorts': [1050],
'notSelector': "type=='database'",
'nets': ['10.0.0.0/16'],
'ports': [1234, '10:20'],
'selector': "type=='application'",
}
}
],
}
}
profile_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'Profile',
'metadata': {
'name': 'profile-name1',
},
'spec': {
'egress': [
{
'action': 'Allow'
}
],
'ingress': [
{
'ipVersion': 6,
'action': 'Deny',
},
],
}
}
#
# Workload Endpoints
#
workloadendpoint_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'WorkloadEndpoint',
'metadata': {
'labels': {
'projectcalico.org/namespace': 'namespace1',
'projectcalico.org/orchestrator': 'k8s',
'type': 'database',
},
'name': 'node1-k8s-abcd-eth0',
'namespace': 'namespace1',
},
'spec': {
'node': 'node1',
'orchestrator': 'k8s',
'pod': 'abcd',
'endpoint': 'eth0',
'containerID': 'container1234',
'ipNetworks': ['1.2.3.4/32'],
'interfaceName': 'cali1234',
'profiles': ['prof1', 'prof2'],
}
}
workloadendpoint_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'WorkloadEndpoint',
'metadata': {
'labels': {
'projectcalico.org/namespace': 'namespace1',
'projectcalico.org/orchestrator': 'cni',
'type': 'database'
},
'name': 'node2-cni-container1234-eth0',
'namespace': 'namespace1',
},
'spec': {
'node': 'node2',
'orchestrator': 'cni',
'endpoint': 'eth0',
'containerID': 'container1234',
'ipNetworks': ['1.2.3.4/32'],
'interfaceName': 'cali1234',
'profiles': ['prof1', 'prof2'],
}
}
#
# Nodes
#
node_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node1',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.4/24',
'ipv6Address': 'aa:bb:cc::ff/120',
}
}
}
node_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node2',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.5/24',
'ipv6Address': 'aa:bb:cc::ee/120',
}
}
}
node_name3_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node3',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.6/24',
'ipv6Address': 'aa:bb:cc::dd/120',
}
}
}
node_name4_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node4',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.4/24',
'ipv6Address': 'aa:bb:cc::ff/120',
},
'orchRefs': [
{
'nodeName': 'node4',
'orchestrator': 'k8s',
},
],
}
}
node_name5_rev1 = {
'apiVersion': API_VERSION,
'kind': 'Node',
'metadata': {
'name': 'node5',
},
'spec': {
'bgp': {
'ipv4Address': '1.2.3.5/24',
'ipv6Address': 'aa:bb:cc::ff/120',
},
'orchRefs': [
{
'nodeName': 'node4',
'orchestrator': 'k8s',
},
],
}
}
#
# BGPConfigs
#
bgpconfig_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'default',
},
'spec': {
'logSeverityScreen': 'Info',
'nodeToNodeMeshEnabled': True,
'asNumber': 6512,
}
}
bgpconfig_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'default',
},
'spec': {
'logSeverityScreen': 'Info',
'nodeToNodeMeshEnabled': False,
'asNumber': 6511,
}
}
bgpconfig_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'bgpconfiguration1',
},
'spec': {
'logSeverityScreen': 'Info',
}
}
bgpconfig_name2_rev2 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'bgpconfiguration1',
},
'spec': {
'logSeverityScreen': 'Debug',
}
}
bgpconfig_name2_rev3 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'bgpconfiguration1',
},
'spec': {
'logSeverityScreen': 'Debug',
'nodeToNodeMeshEnabled': True,
}
}
bgpconfig_name3_rev1 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'node.node5',
},
'spec': {
'logSeverityScreen': 'Debug',
}
}
bgpconfig_name4_rev1 = {
'apiVersion': API_VERSION,
'kind': 'BGPConfiguration',
'metadata': {
'name': 'node.node4',
},
'spec': {
'logSeverityScreen': 'Debug',
}
}
#
# FelixConfigs
#
felixconfig_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'felixconfiguration1',
},
'spec': {
'chainInsertMode': 'append',
'defaultEndpointToHostAction': 'Accept',
'failsafeInboundHostPorts': [
{'protocol': 'TCP', 'port': 666},
{'protocol': 'UDP', 'port': 333}, ],
'failsafeOutboundHostPorts': [
{'protocol': 'TCP', 'port': 999},
{'protocol': 'UDP', 'port': 222},
{'protocol': 'UDP', 'port': 422}, ],
'interfacePrefix': 'humperdink',
'ipipMTU': 1521,
'ipsetsRefreshInterval': '44s',
'iptablesFilterAllowAction': 'Return',
'iptablesLockFilePath': '/run/fun',
'iptablesLockProbeInterval': '500ms',
'iptablesLockTimeout': '22s',
'iptablesMangleAllowAction': 'Accept',
'iptablesMarkMask': 0xff0000,
'iptablesPostWriteCheckInterval': '12s',
'iptablesRefreshInterval': '22s',
'ipv6Support': True,
'logFilePath': '/var/log/fun.log',
'logPrefix': 'say-hello-friend',
'logSeverityScreen': 'Info',
'maxIpsetSize': 8192,
'metadataAddr': '127.1.1.1',
'metadataPort': 8999,
'netlinkTimeout': '10s',
'prometheusGoMetricsEnabled': True,
'prometheusMetricsEnabled': True,
'prometheusMetricsPort': 11,
'prometheusProcessMetricsEnabled': True,
'reportingInterval': '10s',
'reportingTTL': '99s',
'routeRefreshInterval': '33s',
'usageReportingEnabled': False,
}
}
felixconfig_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'felixconfiguration1',
},
'spec': {
'ipv6Support': False,
'logSeverityScreen': 'Debug',
'netlinkTimeout': '11s',
}
}
# The large values for `netlinkTimeout` and `reportingTTL` will be transformed
# into a different unit type in the format `XhXmXs`.
felixconfig_name1_rev3 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'felixconfiguration1',
},
'spec': {
'ipv6Support': False,
'logSeverityScreen': 'Debug',
'netlinkTimeout': '125s',
'reportingTTL': '9910s',
}
}
felixconfig_name2_rev1 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'node.node5',
},
'spec': {
'chainInsertMode': 'append',
'defaultEndpointToHostAction': 'Accept',
'failsafeInboundHostPorts': [
{'protocol': 'TCP', 'port': 666},
{'protocol': 'UDP', 'port': 333}, ],
'failsafeOutboundHostPorts': [
{'protocol': 'TCP', 'port': 999},
{'protocol': 'UDP', 'port': 222},
{'protocol': 'UDP', 'port': 422}, ],
'interfacePrefix': 'humperdink',
'ipipMTU': 1521,
'ipsetsRefreshInterval': '44s',
'iptablesFilterAllowAction': 'Return',
'iptablesLockFilePath': '/run/fun',
'iptablesLockProbeInterval': '500ms',
'iptablesLockTimeout': '22s',
'iptablesMangleAllowAction': 'Accept',
'iptablesMarkMask': 0xff0000,
'iptablesPostWriteCheckInterval': '12s',
'iptablesRefreshInterval': '22s',
'ipv6Support': True,
'logFilePath': '/var/log/fun.log',
'logPrefix': 'say-hello-friend',
'logSeverityScreen': 'Info',
'maxIpsetSize': 8192,
'metadataAddr': '127.1.1.1',
'metadataPort': 8999,
'netlinkTimeout': '10s',
'prometheusGoMetricsEnabled': True,
'prometheusMetricsEnabled': True,
'prometheusMetricsPort': 11,
'prometheusProcessMetricsEnabled': True,
'reportingInterval': '10s',
'reportingTTL': '99s',
'routeRefreshInterval': '33s',
'usageReportingEnabled': False,
}
}
felixconfig_name3_rev1 = {
'apiVersion': API_VERSION,
'kind': 'FelixConfiguration',
'metadata': {
'name': 'node.node4',
},
'spec': {
'chainInsertMode': 'append',
'defaultEndpointToHostAction': 'Accept',
'failsafeInboundHostPorts': [
{'protocol': 'TCP', 'port': 666},
{'protocol': 'UDP', 'port': 333}, ],
'failsafeOutboundHostPorts': [
{'protocol': 'TCP', 'port': 999},
{'protocol': 'UDP', 'port': 222},
{'protocol': 'UDP', 'port': 422}, ],
'interfacePrefix': 'humperdink',
'ipipMTU': 1521,
'ipsetsRefreshInterval': '44s',
'iptablesFilterAllowAction': 'Return',
'iptablesLockFilePath': '/run/fun',
'iptablesLockProbeInterval': '500ms',
'iptablesLockTimeout': '22s',
'iptablesMangleAllowAction': 'Accept',
'iptablesMarkMask': 0xff0000,
'iptablesPostWriteCheckInterval': '12s',
'iptablesRefreshInterval': '22s',
'ipv6Support': True,
'logFilePath': '/var/log/fun.log',
'logPrefix': 'say-hello-friend',
'logSeverityScreen': 'Info',
'maxIpsetSize': 8192,
'metadataAddr': '127.1.1.1',
'metadataPort': 8999,
'netlinkTimeout': '10s',
'prometheusGoMetricsEnabled': True,
'prometheusMetricsEnabled': True,
'prometheusMetricsPort': 11,
'prometheusProcessMetricsEnabled': True,
'reportingInterval': '10s',
'reportingTTL': '99s',
'routeRefreshInterval': '33s',
'usageReportingEnabled': False,
}
}
#
# ClusterInfo
#
clusterinfo_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'ClusterInformation',
'metadata': {
'name': 'default',
},
'spec': {
'clusterGUID': 'cluster-guid1',
'datastoreReady': True,
}
}
clusterinfo_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'ClusterInformation',
'metadata': {
'name': 'default',
},
'spec': {
'clusterGUID': 'cluster-guid2',
'clusterType': 'cluster-type2',
'calicoVersion': 'calico-version2',
}
}
#
# KubeControllersConfiguration
#
kubecontrollersconfig_name1_rev1 = {
'apiVersion': API_VERSION,
'kind': 'KubeControllersConfiguration',
'metadata': {
'name': 'default',
},
'spec': {
'logSeverityScreen': 'Info',
'controllers': {
'node': {
'syncLabels': 'Enabled',
'hostEndpoint': {
'autoCreate': 'Disabled',
}
}
}
}
}
kubecontrollersconfig_name1_rev2 = {
'apiVersion': API_VERSION,
'kind': 'KubeControllersConfiguration',
'metadata': {
'name': 'default',
},
'spec': {
'logSeverityScreen': 'Debug',
'controllers': {
'node': {
'syncLabels': 'Enabled',
'hostEndpoint': {
'autoCreate': 'Disabled',
}
},
'namespace': {},
}
},
'status': {
'environmentVars': {
'LOG_LEVEL': 'Info',
}
}
}
| 24.871345
| 97
| 0.486833
|
2055baac99c93a9637972e9e5b2e3f60993de505
| 692
|
py
|
Python
|
PyInstaller/hooks/hook-gi.repository.Adw.py
|
BA7JCM/pyinstaller
|
51ff689509391921bdce42848f3a077dcbb40ca0
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/hooks/hook-gi.repository.Adw.py
|
BA7JCM/pyinstaller
|
51ff689509391921bdce42848f3a077dcbb40ca0
|
[
"Apache-2.0"
] | null | null | null |
PyInstaller/hooks/hook-gi.repository.Adw.py
|
BA7JCM/pyinstaller
|
51ff689509391921bdce42848f3a077dcbb40ca0
|
[
"Apache-2.0"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2022, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------
"""
Import hook for PyGObject https://wiki.gnome.org/PyGObject
"""
from PyInstaller.utils.hooks.gi import get_gi_typelibs
binaries, datas, hiddenimports = get_gi_typelibs('Adw', '1')
| 38.444444
| 78
| 0.593931
|
3633f038ff27a950dc8957691db40ad7a8198eef
| 227
|
py
|
Python
|
scripts/item/consume_2436230.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/item/consume_2436230.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/item/consume_2436230.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
# Created by MechAviv
# Cozy Christmas Damage Skin | (2436230)
if sm.addDamageSkin(2436230):
sm.chat("'Cozy Christmas Damage Skin' Damage Skin has been added to your account's damage skin collection.")
sm.consumeItem()
| 45.4
| 113
| 0.744493
|
0c0182de49e2505d5aa40a22ad9e2d379fdd2673
| 470
|
py
|
Python
|
visitors/management/commands/deleteoldvisitors.py
|
ASquirrelsTail/serve-up
|
9533ba82f5b4989434b3b20352d17a8131bb9619
|
[
"MIT"
] | null | null | null |
visitors/management/commands/deleteoldvisitors.py
|
ASquirrelsTail/serve-up
|
9533ba82f5b4989434b3b20352d17a8131bb9619
|
[
"MIT"
] | 10
|
2021-03-30T14:05:21.000Z
|
2022-03-12T00:41:15.000Z
|
visitors/management/commands/deleteoldvisitors.py
|
ASquirrelsTail/serve-up
|
9533ba82f5b4989434b3b20352d17a8131bb9619
|
[
"MIT"
] | null | null | null |
from django.core.management.base import BaseCommand
from django.utils.timezone import now
from datetime import timedelta
from visitors.models import Visitor
class Command(BaseCommand):
help = 'Removes visitors older than 21 days from the database'
def handle(self, *args, **options):
result = Visitor.objects.filter(group__time__lte=now() - timedelta(days=21)).delete()
print('Deleted {} visitor records older than 21 days'.format(result[0]))
| 36.153846
| 93
| 0.746809
|
ebf0e20ba1fb81894ac68daf8e87dade0282ab5f
| 953
|
py
|
Python
|
extra-packages/pyperl-1.0.1d/t/thr-svrv.py
|
UfSoft/ISPManCCP
|
a415cae3a6860a86591e932606d31dab844703df
|
[
"BSD-3-Clause"
] | null | null | null |
extra-packages/pyperl-1.0.1d/t/thr-svrv.py
|
UfSoft/ISPManCCP
|
a415cae3a6860a86591e932606d31dab844703df
|
[
"BSD-3-Clause"
] | null | null | null |
extra-packages/pyperl-1.0.1d/t/thr-svrv.py
|
UfSoft/ISPManCCP
|
a415cae3a6860a86591e932606d31dab844703df
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
try:
import thread
except:
print "1..0"
sys.exit(0)
import perl
if not perl.MULTI_PERL:
print "1..0"
sys.exit(0)
# This tests behaviour of perl objects passed from one
# thread (and perl interpreter) to the next one and that
# it is still destructed properly.
print "1..5"
perl_obj = perl.eval("""
sub Foo::hello {
return "Hello";
}
sub Foo::DESTROY
{
my $self = shift;
print "ok 2\n";
}
bless {}, "Foo";
""")
#print perl_obj.hello();
#print perl_obj
def t1():
global perl_obj
try:
perl_obj.hello()
print "not "
except ValueError, v:
print "ok 1"
#print v
perl.eval("""sub Foo::DESTROY { $|=1; print "ok 4\n"; }""");
perl_obj = perl.get_ref("@")
perl_obj.__class__ = "Foo";
#print perl_obj
print "ok 3"
sys.stdout.flush();
thread.start_new_thread(t1, ())
import time
time.sleep(2)
#print perl_obj
perl_obj = None
print "ok 5"
| 14.223881
| 64
| 0.604407
|
fd458dc03133a4b64beeb5e1678e9ed834a80349
| 1,314
|
py
|
Python
|
notebooks/15.0-BDP-try-compartment.py
|
zeou1/maggot_models
|
4e1b518c2981ab1ca9607099c3813e8429d94ca4
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/15.0-BDP-try-compartment.py
|
zeou1/maggot_models
|
4e1b518c2981ab1ca9607099c3813e8429d94ca4
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/15.0-BDP-try-compartment.py
|
zeou1/maggot_models
|
4e1b518c2981ab1ca9607099c3813e8429d94ca4
|
[
"BSD-3-Clause"
] | null | null | null |
#%%
from src.data import load_june
from src.utils import meta_to_array
from graspy.utils import import_graph
import networkx as nx
graph = load_june("Gadn")
names = meta_to_array(graph, "Name")
names
classes = meta_to_array(graph, "Class")
classes
nodes = list(graph.nodes())
labels = dict(zip(nodes, names))
for i, c in enumerate(classes):
if c == "OANs":
print(names[i])
print(i)
print
adj = import_graph(graph)
#%%
np.unique(classes)
#%%
for i, n in enumerate(graph.nodes(data=True)):
data = n[1]
node = n[0]
name = data["Name"]
cell_class = data["Class"]
if cell_class == "KCs":
print(name)
print(i)
print(node)
print("Edges incident: " + str(len(graph[node])))
neighbor_graph = nx.ego_graph(graph, node)
neighbor_names = meta_to_array(neighbor_graph, "Name")
neighbor_nodes = list(neighbor_graph.nodes())
labels = dict(zip(neighbor_nodes, neighbor_names))
plt.figure(figsize=(10, 10))
nx.draw_networkx(neighbor_graph, labels=labels)
plt.title(name)
plt.show()
oan_ind = "n2243"
#%%
for nbr in graph[oan_ind]:
print(nbr)
#%%
graph.subgraph(["n1"])
#%%
oan_ind = "n2243"
out = graph.nodes("n2243")
out
for i in out:
print(i)
#%%
graph["n2243"]
#%%
| 19.909091
| 62
| 0.626332
|
76782cccafe939f000c6f7da57ef1563d496968d
| 82,254
|
py
|
Python
|
pywasm/execution.py
|
marat1961/pywasm
|
efcbf580808dc5a4ce741e4b84434cc3aa56f02d
|
[
"MIT"
] | 1
|
2021-06-20T08:31:28.000Z
|
2021-06-20T08:31:28.000Z
|
pywasm/execution.py
|
marat1961/pywasm
|
efcbf580808dc5a4ce741e4b84434cc3aa56f02d
|
[
"MIT"
] | null | null | null |
pywasm/execution.py
|
marat1961/pywasm
|
efcbf580808dc5a4ce741e4b84434cc3aa56f02d
|
[
"MIT"
] | null | null | null |
import typing
import numpy
from . import binary
from . import convention
from . import instruction
from . import log
from . import num
from . import option
# ======================================================================================================================
# Execution Runtime Structure
# ======================================================================================================================
class Value:
# Values are represented by themselves.
def __init__(self):
self.type: binary.ValueType
self.data: bytearray = bytearray(8)
def __repr__(self):
return f'{self.type} {self.val()}'
@classmethod
def new(cls, type: binary.ValueType, data: typing.Union[int, float]):
return {
convention.i32: Value.from_i32,
convention.i64: Value.from_i64,
convention.f32: lambda x: Value.from_f32(num.f32(x)),
convention.f64: lambda x: Value.from_f64(num.f64(x)),
}[type](data)
@classmethod
def raw(cls, type: binary.ValueType, data: bytearray):
o = Value()
o.type = type
o.data = data
return o
def val(self) -> typing.Union[num.i32, num.i64, num.f32, num.f64]:
return {
convention.i32: self.i32,
convention.i64: self.i64,
convention.f32: self.f32,
convention.f64: self.f64,
}[self.type]()
def i32(self) -> num.i32:
return num.LittleEndian.i32(self.data[0:4])
def i64(self) -> num.i64:
return num.LittleEndian.i64(self.data[0:8])
def u32(self) -> num.u32:
return num.LittleEndian.u32(self.data[0:4])
def u64(self) -> num.u64:
return num.LittleEndian.u64(self.data[0:8])
def f32(self) -> num.f32:
return num.LittleEndian.f32(self.data[0:4])
def f64(self) -> num.f64:
return num.LittleEndian.f64(self.data[0:8])
@classmethod
def from_i32(cls, n: num.i32):
o = Value()
o.type = binary.ValueType(convention.i32)
o.data[0:4] = num.LittleEndian.pack_i32(num.int2i32(n))
return o
@classmethod
def from_i64(cls, n: num.i64):
o = Value()
o.type = binary.ValueType(convention.i64)
o.data[0:8] = num.LittleEndian.pack_i64(num.int2i64(n))
return o
@classmethod
def from_u32(cls, n: num.u32):
o = Value()
o.type = binary.ValueType(convention.i32)
o.data[0:4] = num.LittleEndian.pack_u32(num.int2u32(n))
return o
@classmethod
def from_u64(cls, n: num.u64):
o = Value()
o.type = binary.ValueType(convention.i64)
o.data[0:8] = num.LittleEndian.pack_u64(num.int2u64(n))
return o
@classmethod
def from_f32(cls, n: num.f32):
assert isinstance(n, num.f32)
o = Value()
o.type = binary.ValueType(convention.f32)
o.data[0:4] = num.LittleEndian.pack_f32(n)
return o
@classmethod
def from_f32_u32(cls, n: num.u32):
o = Value.from_u32(n)
o.type = binary.ValueType(convention.f32)
return o
@classmethod
def from_f64(cls, n: num.f64):
assert isinstance(n, num.f64)
o = Value()
o.type = binary.ValueType(convention.f64)
o.data[0:8] = num.LittleEndian.pack_f64(n)
return o
@classmethod
def from_f64_u64(cls, n: num.u64):
o = Value.from_u64(n)
o.type = binary.ValueType(convention.f64)
return o
class Result:
# A result is the outcome of a computation. It is either a sequence of values or a trap.
def __init__(self, data: typing.List[Value]):
self.data = data
def __repr__(self):
return self.data.__repr__()
class FunctionAddress(int):
def __repr__(self):
return f'FunctionAddress({super().__repr__()})'
class TableAddress(int):
def __repr__(self):
return f'TableAddress({super().__repr__()})'
class MemoryAddress(int):
def __repr__(self):
return f'MemoryAddress({super().__repr__()})'
class GlobalAddress(int):
def __repr__(self):
return f'GlobalAddress({super().__repr__()})'
class ModuleInstance:
# A module instance is the runtime representation of a module. It is created by instantiating a module, and
# collects runtime representations of all entities that are imported, defined, or exported by the module.
#
# moduleinst ::= {
# types functype∗
# funcaddrs funcaddr∗
# tableaddrs tableaddr∗
# memaddrs memaddr∗
# globaladdrs globaladdr∗
# exports exportinst∗
# }
def __init__(self):
self.type_list: typing.List[binary.FunctionType] = []
self.function_addr_list: typing.List[FunctionAddress] = []
self.table_addr_list: typing.List[TableAddress] = []
self.memory_addr_list: typing.List[MemoryAddress] = []
self.global_addr_list: typing.List[GlobalAddress] = []
self.export_list: typing.List[ExportInstance] = []
class WasmFunc:
def __init__(self, function_type: binary.FunctionType, module: ModuleInstance, code: binary.Function):
self.type = function_type
self.module = module
self.code = code
def __repr__(self):
return f'wasm_func({self.type})'
class HostFunc:
# A host function is a function expressed outside WebAssembly but passed to a module as an import. The definition
# and behavior of host functions are outside the scope of this specification. For the purpose of this
# specification, it is assumed that when invoked, a host function behaves non-deterministically, but within certain
# constraints that ensure the integrity of the runtime.
def __init__(self, function_type: binary.FunctionType, hostcode: typing.Callable):
self.type = function_type
self.hostcode = hostcode
def __repr__(self):
return self.hostcode.__name__
# A function instance is the runtime representation of a function. It effectively is a closure of the original
# function over the runtime module instance of its originating module. The module instance is used to resolve
# references to other definitions during execution of the function.
#
# funcinst ::= {type functype,module moduleinst,code func}
# | {type functype,hostcode hostfunc}
# hostfunc ::= ...
FunctionInstance = typing.Union[WasmFunc, HostFunc]
class TableInstance:
# A table instance is the runtime representation of a table. It holds a vector of function elements and an optional
# maximum size, if one was specified in the table type at the table’s definition site.
#
# Each function element is either empty, representing an uninitialized table entry, or a function address. Function
# elements can be mutated through the execution of an element segment or by external means provided by the embedder.
#
# tableinst ::= {elem vec(funcelem), max u32?}
# funcelem ::= funcaddr?
#
# It is an invariant of the semantics that the length of the element vector never exceeds the maximum size, if
# present.
def __init__(self, element_type: int, limits: binary.Limits):
self.element_type = element_type
self.element_list: typing.List[typing.Optional[FunctionAddress]] = [None for _ in range(limits.n)]
self.limits = limits
class MemoryInstance:
# A memory instance is the runtime representation of a linear memory. It holds a vector of bytes and an optional
# maximum size, if one was specified at the definition site of the memory.
#
# meminst ::= {data vec(byte), max u32?}
#
# The length of the vector always is a multiple of the WebAssembly page size, which is defined to be the constant
# 65536 – abbreviated 64Ki. Like in a memory type, the maximum size in a memory instance is given in units of this
# page size.
#
# The bytes can be mutated through memory instructions, the execution of a data segment, or by external means
# provided by the embedder.
#
# It is an invariant of the semantics that the length of the byte vector, divided by page size, never exceeds the
# maximum size, if present.
def __init__(self, type: binary.MemoryType):
self.type = type
self.data = bytearray()
self.size = 0
self.grow(type.limits.n)
def grow(self, n: int):
if self.type.limits.m and self.size + n > self.type.limits.m:
raise Exception('pywasm: out of memory limit')
# If len is larger than 2**16, then fail
if self.size + n > convention.memory_page:
raise Exception('pywasm: out of memory limit')
self.data.extend([0x00 for _ in range(n * convention.memory_page_size)])
self.size += n
class GlobalInstance:
# A global instance is the runtime representation of a global variable. It holds an individual value and a flag
# indicating whether it is mutable.
#
# globalinst ::= {value val, mut mut}
#
# The value of mutable globals can be mutated through variable instructions or by external means provided by the
# embedder.
def __init__(self, value: Value, mut: binary.Mut):
self.value = value
self.mut = mut
# An external value is the runtime representation of an entity that can be imported or exported. It is an address
# denoting either a function instance, table instance, memory instance, or global instances in the shared store.
#
# externval ::= func funcaddr
# | table tableaddr
# | mem memaddr
# | global globaladdr
ExternValue = typing.Union[FunctionAddress, TableAddress, MemoryAddress, GlobalAddress]
class Store:
# The store represents all global state that can be manipulated by WebAssembly programs. It consists of the runtime
# representation of all instances of functions, tables, memories, and globals that have been allocated during the
# life time of the abstract machine
# Syntactically, the store is defined as a record listing the existing instances of each category:
# store ::= {
# funcs funcinst∗
# tables tableinst∗
# mems meminst∗
# globals globalinst∗
# }
#
# Addresses are dynamic, globally unique references to runtime objects, in contrast to indices, which are static,
# module-local references to their original definitions. A memory address memaddr denotes the abstract address of
# a memory instance in the store, not an offset inside a memory instance.
def __init__(self):
self.function_list: typing.List[FunctionInstance] = []
self.table_list: typing.List[TableInstance] = []
self.memory_list: typing.List[MemoryInstance] = []
self.global_list: typing.List[GlobalInstance] = []
# For compatibility with older 0.4.x versions
self.mems = self.memory_list
def allocate_wasm_function(self, module: ModuleInstance, function: binary.Function) -> FunctionAddress:
function_address = FunctionAddress(len(self.function_list))
function_type = module.type_list[function.type_index]
wasmfunc = WasmFunc(function_type, module, function)
self.function_list.append(wasmfunc)
return function_address
def allocate_host_function(self, hostfunc: HostFunc) -> FunctionAddress:
function_address = FunctionAddress(len(self.function_list))
self.function_list.append(hostfunc)
return function_address
def allocate_table(self, table_type: binary.TableType) -> TableAddress:
table_address = TableAddress(len(self.table_list))
table_instance = TableInstance(convention.funcref, table_type.limits)
self.table_list.append(table_instance)
return table_address
def allocate_memory(self, memory_type: binary.MemoryType) -> MemoryAddress:
memory_address = MemoryAddress(len(self.memory_list))
memory_instance = MemoryInstance(memory_type)
self.memory_list.append(memory_instance)
return memory_address
def allocate_global(self, global_type: binary.GlobalType, value: Value) -> GlobalAddress:
global_address = GlobalAddress(len(self.global_list))
global_instance = GlobalInstance(value, global_type.mut)
self.global_list.append(global_instance)
return global_address
class ExportInstance:
# An export instance is the runtime representation of an export. It defines the export's name and the associated
# external value.
#
# exportinst ::= {name name, value externval}
def __init__(self, name: str, value: ExternValue):
self.name = name
self.value = value
def __repr__(self):
return f'export_instance({self.name}, {self.value})'
class Label:
# Labels carry an argument arity n and their associated branch target, which is expressed syntactically as an
# instruction sequence:
#
# label ::= labeln{instr∗}
#
# Intuitively, instr∗ is the continuation to execute when the branch is taken, in place of the original control
# construct.
def __init__(self, arity: int, continuation: int):
self.arity = arity
self.continuation = continuation
def __repr__(self):
return f'label({self.arity})'
class Frame:
# Activation frames carry the return arity n of the respective function, hold the values of its locals
# (including arguments) in the order corresponding to their static local indices, and a reference to the function's
# own module instance.
def __init__(self, module: ModuleInstance,
local_list: typing.List[Value],
expr: binary.Expression,
arity: int):
self.module = module
self.local_list = local_list
self.expr = expr
self.arity = arity
def __repr__(self):
return f'frame({self.arity}, {self.local_list})'
class Stack:
# Besides the store, most instructions interact with an implicit stack. The stack contains three kinds of entries:
#
# Values: the operands of instructions.
# Labels: active structured control instructions that can be targeted by branches.
# Activations: the call frames of active function calls.
#
# These entries can occur on the stack in any order during the execution of a program. Stack entries are described
# by abstract syntax as follows.
def __init__(self):
self.data: typing.List[typing.Union[Value, Label, Frame]] = []
def len(self):
return len(self.data)
def append(self, v: typing.Union[Value, Label, Frame]):
self.data.append(v)
def pop(self):
return self.data.pop()
# ======================================================================================================================
# Execution Runtime Import Matching
# ======================================================================================================================
def match_limits(a: binary.Limits, b: binary.Limits) -> bool:
if a.n >= b.n:
if b.m == 0:
return 1
if a.m != 0 and b.m != 0:
if a.m < b.m:
return 1
return 0
def match_function(a: binary.FunctionType, b: binary.FunctionType) -> bool:
return a.args.data == b.args.data and a.rets.data == b.rets.data
def match_memory(a: binary.MemoryType, b: binary.MemoryType) -> bool:
return match_limits(a.limits, b.limits)
# ======================================================================================================================
# Abstract Machine
# ======================================================================================================================
class Configuration:
# A configuration consists of the current store and an executing thread.
# A thread is a computation over instructions that operates relative to a current frame referring to the module
# instance in which the computation runs, i.e., where the current function originates from.
#
# config ::= store;thread
# thread ::= frame;instr∗
def __init__(self, store: Store):
self.store = store
self.frame: typing.Optional[Frame] = None
self.stack = Stack()
self.depth = 0
self.pc = 0
self.opts: option.Option = option.Option()
def get_label(self, i: int) -> Label:
l = self.stack.len()
x = i
for a in range(l):
j = l - a - 1
v = self.stack.data[j]
if isinstance(v, Label):
if x == 0:
return v
x -= 1
def set_frame(self, frame: Frame):
self.frame = frame
self.stack.append(frame)
self.stack.append(Label(frame.arity, len(frame.expr.data) - 1))
def call(self, function_addr: FunctionAddress, function_args: typing.List[Value]) -> Result:
function = self.store.function_list[function_addr]
log.debugln(f'call {function}({function_args})')
for e, t in zip(function_args, function.type.args.data):
assert e.type == t
assert len(function.type.rets.data) < 2
if isinstance(function, WasmFunc):
local_list = [Value.new(e, 0) for e in function.code.local_list]
frame = Frame(
module=function.module,
local_list=function_args + local_list,
expr=function.code.expr,
arity=len(function.type.rets.data),
)
self.set_frame(frame)
return self.exec()
if isinstance(function, HostFunc):
r = function.hostcode(self.store, *[e.val() for e in function_args])
l = len(function.type.rets.data)
if l == 0:
return Result([])
if l == 1:
return Result([Value.new(function.type.rets.data[0], r)])
return [Value.new(e, r[i]) for i, e in enumerate(function.type.rets.data)]
raise Exception(f'pywasm: unknown function type: {function}')
def exec(self):
instruction_list = self.frame.expr.data
instruction_list_len = len(instruction_list)
while self.pc < instruction_list_len:
i = instruction_list[self.pc]
if self.opts.cycle_limit > 0:
c = self.opts.cycle + self.opts.instruction_cycle_func(i)
if c > self.opts.cycle_limit:
raise Exception(f'pywasm: out of cycles')
self.opts.cycle = c
ArithmeticLogicUnit.exec(self, i)
self.pc += 1
r = [self.stack.pop() for _ in range(self.frame.arity)][::-1]
l = self.stack.pop()
assert l == self.frame
return Result(r)
# ======================================================================================================================
# Instruction Set
# ======================================================================================================================
class ArithmeticLogicUnit:
@staticmethod
def exec(config: Configuration, i: binary.Instruction):
if log.lvl > 0:
log.println('|', i)
func = {
instruction.unreachable: ArithmeticLogicUnit.unreachable,
instruction.nop: ArithmeticLogicUnit.nop,
instruction.block: ArithmeticLogicUnit.block,
instruction.loop: ArithmeticLogicUnit.loop,
instruction.if_: ArithmeticLogicUnit.if_,
instruction.else_: ArithmeticLogicUnit.else_,
instruction.end: ArithmeticLogicUnit.end,
instruction.br: ArithmeticLogicUnit.br,
instruction.br_if: ArithmeticLogicUnit.br_if,
instruction.br_table: ArithmeticLogicUnit.br_table,
instruction.return_: ArithmeticLogicUnit.return_,
instruction.call: ArithmeticLogicUnit.call,
instruction.call_indirect: ArithmeticLogicUnit.call_indirect,
instruction.drop: ArithmeticLogicUnit.drop,
instruction.select: ArithmeticLogicUnit.select,
instruction.get_local: ArithmeticLogicUnit.get_local,
instruction.set_local: ArithmeticLogicUnit.set_local,
instruction.tee_local: ArithmeticLogicUnit.tee_local,
instruction.get_global: ArithmeticLogicUnit.get_global,
instruction.set_global: ArithmeticLogicUnit.set_global,
instruction.i32_load: ArithmeticLogicUnit.i32_load,
instruction.i64_load: ArithmeticLogicUnit.i64_load,
instruction.f32_load: ArithmeticLogicUnit.f32_load,
instruction.f64_load: ArithmeticLogicUnit.f64_load,
instruction.i32_load8_s: ArithmeticLogicUnit.i32_load8_s,
instruction.i32_load8_u: ArithmeticLogicUnit.i32_load8_u,
instruction.i32_load16_s: ArithmeticLogicUnit.i32_load16_s,
instruction.i32_load16_u: ArithmeticLogicUnit.i32_load16_u,
instruction.i64_load8_s: ArithmeticLogicUnit.i64_load8_s,
instruction.i64_load8_u: ArithmeticLogicUnit.i64_load8_u,
instruction.i64_load16_s: ArithmeticLogicUnit.i64_load16_s,
instruction.i64_load16_u: ArithmeticLogicUnit.i64_load16_u,
instruction.i64_load32_s: ArithmeticLogicUnit.i64_load32_s,
instruction.i64_load32_u: ArithmeticLogicUnit.i64_load32_u,
instruction.i32_store: ArithmeticLogicUnit.i32_store,
instruction.i64_store: ArithmeticLogicUnit.i64_store,
instruction.f32_store: ArithmeticLogicUnit.f32_store,
instruction.f64_store: ArithmeticLogicUnit.f64_store,
instruction.i32_store8: ArithmeticLogicUnit.i32_store8,
instruction.i32_store16: ArithmeticLogicUnit.i32_store16,
instruction.i64_store8: ArithmeticLogicUnit.i64_store8,
instruction.i64_store16: ArithmeticLogicUnit.i64_store16,
instruction.i64_store32: ArithmeticLogicUnit.i64_store32,
instruction.current_memory: ArithmeticLogicUnit.current_memory,
instruction.grow_memory: ArithmeticLogicUnit.grow_memory,
instruction.i32_const: ArithmeticLogicUnit.i32_const,
instruction.i64_const: ArithmeticLogicUnit.i64_const,
instruction.f32_const: ArithmeticLogicUnit.f32_const,
instruction.f64_const: ArithmeticLogicUnit.f64_const,
instruction.i32_eqz: ArithmeticLogicUnit.i32_eqz,
instruction.i32_eq: ArithmeticLogicUnit.i32_eq,
instruction.i32_ne: ArithmeticLogicUnit.i32_ne,
instruction.i32_lts: ArithmeticLogicUnit.i32_lts,
instruction.i32_ltu: ArithmeticLogicUnit.i32_ltu,
instruction.i32_gts: ArithmeticLogicUnit.i32_gts,
instruction.i32_gtu: ArithmeticLogicUnit.i32_gtu,
instruction.i32_les: ArithmeticLogicUnit.i32_les,
instruction.i32_leu: ArithmeticLogicUnit.i32_leu,
instruction.i32_ges: ArithmeticLogicUnit.i32_ges,
instruction.i32_geu: ArithmeticLogicUnit.i32_geu,
instruction.i64_eqz: ArithmeticLogicUnit.i64_eqz,
instruction.i64_eq: ArithmeticLogicUnit.i64_eq,
instruction.i64_ne: ArithmeticLogicUnit.i64_ne,
instruction.i64_lts: ArithmeticLogicUnit.i64_lts,
instruction.i64_ltu: ArithmeticLogicUnit.i64_ltu,
instruction.i64_gts: ArithmeticLogicUnit.i64_gts,
instruction.i64_gtu: ArithmeticLogicUnit.i64_gtu,
instruction.i64_les: ArithmeticLogicUnit.i64_les,
instruction.i64_leu: ArithmeticLogicUnit.i64_leu,
instruction.i64_ges: ArithmeticLogicUnit.i64_ges,
instruction.i64_geu: ArithmeticLogicUnit.i64_geu,
instruction.f32_eq: ArithmeticLogicUnit.f32_eq,
instruction.f32_ne: ArithmeticLogicUnit.f32_ne,
instruction.f32_lt: ArithmeticLogicUnit.f32_lt,
instruction.f32_gt: ArithmeticLogicUnit.f32_gt,
instruction.f32_le: ArithmeticLogicUnit.f32_le,
instruction.f32_ge: ArithmeticLogicUnit.f32_ge,
instruction.f64_eq: ArithmeticLogicUnit.f64_eq,
instruction.f64_ne: ArithmeticLogicUnit.f64_ne,
instruction.f64_lt: ArithmeticLogicUnit.f64_lt,
instruction.f64_gt: ArithmeticLogicUnit.f64_gt,
instruction.f64_le: ArithmeticLogicUnit.f64_le,
instruction.f64_ge: ArithmeticLogicUnit.f64_ge,
instruction.i32_clz: ArithmeticLogicUnit.i32_clz,
instruction.i32_ctz: ArithmeticLogicUnit.i32_ctz,
instruction.i32_popcnt: ArithmeticLogicUnit.i32_popcnt,
instruction.i32_add: ArithmeticLogicUnit.i32_add,
instruction.i32_sub: ArithmeticLogicUnit.i32_sub,
instruction.i32_mul: ArithmeticLogicUnit.i32_mul,
instruction.i32_divs: ArithmeticLogicUnit.i32_divs,
instruction.i32_divu: ArithmeticLogicUnit.i32_divu,
instruction.i32_rems: ArithmeticLogicUnit.i32_rems,
instruction.i32_remu: ArithmeticLogicUnit.i32_remu,
instruction.i32_and: ArithmeticLogicUnit.i32_and,
instruction.i32_or: ArithmeticLogicUnit.i32_or,
instruction.i32_xor: ArithmeticLogicUnit.i32_xor,
instruction.i32_shl: ArithmeticLogicUnit.i32_shl,
instruction.i32_shrs: ArithmeticLogicUnit.i32_shrs,
instruction.i32_shru: ArithmeticLogicUnit.i32_shru,
instruction.i32_rotl: ArithmeticLogicUnit.i32_rotl,
instruction.i32_rotr: ArithmeticLogicUnit.i32_rotr,
instruction.i64_clz: ArithmeticLogicUnit.i64_clz,
instruction.i64_ctz: ArithmeticLogicUnit.i64_ctz,
instruction.i64_popcnt: ArithmeticLogicUnit.i64_popcnt,
instruction.i64_add: ArithmeticLogicUnit.i64_add,
instruction.i64_sub: ArithmeticLogicUnit.i64_sub,
instruction.i64_mul: ArithmeticLogicUnit.i64_mul,
instruction.i64_divs: ArithmeticLogicUnit.i64_divs,
instruction.i64_divu: ArithmeticLogicUnit.i64_divu,
instruction.i64_rems: ArithmeticLogicUnit.i64_rems,
instruction.i64_remu: ArithmeticLogicUnit.i64_remu,
instruction.i64_and: ArithmeticLogicUnit.i64_and,
instruction.i64_or: ArithmeticLogicUnit.i64_or,
instruction.i64_xor: ArithmeticLogicUnit.i64_xor,
instruction.i64_shl: ArithmeticLogicUnit.i64_shl,
instruction.i64_shrs: ArithmeticLogicUnit.i64_shrs,
instruction.i64_shru: ArithmeticLogicUnit.i64_shru,
instruction.i64_rotl: ArithmeticLogicUnit.i64_rotl,
instruction.i64_rotr: ArithmeticLogicUnit.i64_rotr,
instruction.f32_abs: ArithmeticLogicUnit.f32_abs,
instruction.f32_neg: ArithmeticLogicUnit.f32_neg,
instruction.f32_ceil: ArithmeticLogicUnit.f32_ceil,
instruction.f32_floor: ArithmeticLogicUnit.f32_floor,
instruction.f32_trunc: ArithmeticLogicUnit.f32_trunc,
instruction.f32_nearest: ArithmeticLogicUnit.f32_nearest,
instruction.f32_sqrt: ArithmeticLogicUnit.f32_sqrt,
instruction.f32_add: ArithmeticLogicUnit.f32_add,
instruction.f32_sub: ArithmeticLogicUnit.f32_sub,
instruction.f32_mul: ArithmeticLogicUnit.f32_mul,
instruction.f32_div: ArithmeticLogicUnit.f32_div,
instruction.f32_min: ArithmeticLogicUnit.f32_min,
instruction.f32_max: ArithmeticLogicUnit.f32_max,
instruction.f32_copysign: ArithmeticLogicUnit.f32_copysign,
instruction.f64_abs: ArithmeticLogicUnit.f64_abs,
instruction.f64_neg: ArithmeticLogicUnit.f64_neg,
instruction.f64_ceil: ArithmeticLogicUnit.f64_ceil,
instruction.f64_floor: ArithmeticLogicUnit.f64_floor,
instruction.f64_trunc: ArithmeticLogicUnit.f64_trunc,
instruction.f64_nearest: ArithmeticLogicUnit.f64_nearest,
instruction.f64_sqrt: ArithmeticLogicUnit.f64_sqrt,
instruction.f64_add: ArithmeticLogicUnit.f64_add,
instruction.f64_sub: ArithmeticLogicUnit.f64_sub,
instruction.f64_mul: ArithmeticLogicUnit.f64_mul,
instruction.f64_div: ArithmeticLogicUnit.f64_div,
instruction.f64_min: ArithmeticLogicUnit.f64_min,
instruction.f64_max: ArithmeticLogicUnit.f64_max,
instruction.f64_copysign: ArithmeticLogicUnit.f64_copysign,
instruction.i32_wrap_i64: ArithmeticLogicUnit.i32_wrap_i64,
instruction.i32_trunc_sf32: ArithmeticLogicUnit.i32_trunc_sf32,
instruction.i32_trunc_uf32: ArithmeticLogicUnit.i32_trunc_uf32,
instruction.i32_trunc_sf64: ArithmeticLogicUnit.i32_trunc_sf64,
instruction.i32_trunc_uf64: ArithmeticLogicUnit.i32_trunc_uf64,
instruction.i64_extend_si32: ArithmeticLogicUnit.i64_extend_si32,
instruction.i64_extend_ui32: ArithmeticLogicUnit.i64_extend_ui32,
instruction.i64_trunc_sf32: ArithmeticLogicUnit.i64_trunc_sf32,
instruction.i64_trunc_uf32: ArithmeticLogicUnit.i64_trunc_uf32,
instruction.i64_trunc_sf64: ArithmeticLogicUnit.i64_trunc_sf64,
instruction.i64_trunc_uf64: ArithmeticLogicUnit.i64_trunc_uf64,
instruction.f32_convert_si32: ArithmeticLogicUnit.f32_convert_si32,
instruction.f32_convert_ui32: ArithmeticLogicUnit.f32_convert_ui32,
instruction.f32_convert_si64: ArithmeticLogicUnit.f32_convert_si64,
instruction.f32_convert_ui64: ArithmeticLogicUnit.f32_convert_ui64,
instruction.f32_demote_f64: ArithmeticLogicUnit.f32_demote_f64,
instruction.f64_convert_si32: ArithmeticLogicUnit.f64_convert_si32,
instruction.f64_convert_ui32: ArithmeticLogicUnit.f64_convert_ui32,
instruction.f64_convert_si64: ArithmeticLogicUnit.f64_convert_si64,
instruction.f64_convert_ui64: ArithmeticLogicUnit.f64_convert_ui64,
instruction.f64_promote_f32: ArithmeticLogicUnit.f64_promote_f32,
instruction.i32_reinterpret_f32: ArithmeticLogicUnit.i32_reinterpret_f32,
instruction.i64_reinterpret_f64: ArithmeticLogicUnit.i64_reinterpret_f64,
instruction.f32_reinterpret_i32: ArithmeticLogicUnit.f32_reinterpret_i32,
instruction.f64_reinterpret_i64: ArithmeticLogicUnit.f64_reinterpret_i64,
}[i.opcode]
func(config, i)
@staticmethod
def unreachable(config: Configuration, i: binary.Instruction):
raise Exception('pywasm: unreachable')
@staticmethod
def nop(config: Configuration, i: binary.Instruction):
pass
@staticmethod
def block(config: Configuration, i: binary.Instruction):
if i.args[0] == convention.empty:
arity = 0
else:
arity = 1
continuation = config.frame.expr.position[config.pc][1]
config.stack.append(Label(arity, continuation))
@staticmethod
def loop(config: Configuration, i: binary.Instruction):
if i.args[0] == convention.empty:
arity = 0
else:
arity = 1
continuation = config.frame.expr.position[config.pc][0]
config.stack.append(Label(arity, continuation))
@staticmethod
def if_(config: Configuration, i: binary.Instruction):
c = config.stack.pop().i32()
if i.args[0] == convention.empty:
arity = 0
else:
arity = 1
continuation = config.frame.expr.position[config.pc][1]
config.stack.append(Label(arity, continuation))
if c == 0:
if len(config.frame.expr.position[config.pc]) == 3:
config.pc = config.frame.expr.position[config.pc][2]
else:
config.pc = config.frame.expr.position[config.pc][1]
config.stack.pop()
@staticmethod
def else_(config: Configuration, i: binary.Instruction):
L = config.get_label(0)
v = [config.stack.pop() for _ in range(L.arity)][::-1]
while True:
if isinstance(config.stack.pop(), Label):
break
for e in v:
config.stack.append(e)
config.pc = config.frame.expr.position[config.pc][1]
@staticmethod
def end(config: Configuration, i: binary.Instruction):
L = config.get_label(0)
v = [config.stack.pop() for _ in range(L.arity)][::-1]
while True:
if isinstance(config.stack.pop(), Label):
break
for e in v:
config.stack.append(e)
@staticmethod
def br_label(config: Configuration, l: int):
# Let L be the l-th label appearing on the stack, starting from the top and counting from zero.
L = config.get_label(l)
# Pop the values n from the stack.
v = [config.stack.pop() for _ in range(L.arity)][::-1]
# Repeat l+1 times
# While the top of the stack is a value, pop the value from the stack
# Assert: due to validation, the top of the stack now is a label
# Pop the label from the stack
s = 0
# For loops we keep the label which represents the loop on the stack since the continuation of a loop is
# beginning back at the beginning of the loop itself.
if L.continuation < config.pc:
n = l
else:
n = l + 1
while s != n:
e = config.stack.pop()
if isinstance(e, Label):
s += 1
# Push the values n to the stack
for e in v:
config.stack.append(e)
# Jump to the continuation of L
config.pc = L.continuation
@staticmethod
def br(config: Configuration, i: binary.Instruction):
l = i.args[0]
return ArithmeticLogicUnit.br_label(config, l)
@staticmethod
def br_if(config: Configuration, i: binary.Instruction):
if config.stack.pop().i32() == 0:
return
l = i.args[0]
return ArithmeticLogicUnit.br_label(config, l)
@staticmethod
def br_table(config: Configuration, i: binary.Instruction):
a = i.args[0]
l = i.args[1]
c = config.stack.pop().i32()
if c >= 0 and c < len(a):
l = a[c]
return ArithmeticLogicUnit.br_label(config, l)
@staticmethod
def return_(config: Configuration, i: binary.Instruction):
v = [config.stack.pop() for _ in range(config.frame.arity)][::-1]
while True:
e = config.stack.pop()
if isinstance(e, Frame):
config.stack.append(e)
break
for e in v:
config.stack.append(e)
# Jump to the instruction after the original call that pushed the frame
config.pc = len(config.frame.expr.data) - 1
@staticmethod
def call_function_addr(config: Configuration, function_addr: FunctionAddress):
if config.depth > convention.call_stack_depth:
raise Exception('pywasm: call stack exhausted')
function: FunctionInstance = config.store.function_list[function_addr]
function_type = function.type
function_args = [config.stack.pop() for _ in function_type.args.data][::-1]
subcnf = Configuration(config.store)
subcnf.depth = config.depth + 1
subcnf.opts = config.opts
r = subcnf.call(function_addr, function_args)
for e in r.data:
config.stack.append(e)
@staticmethod
def call(config: Configuration, i: binary.Instruction):
function_addr: binary.FunctionIndex = i.args[0]
ArithmeticLogicUnit.call_function_addr(config, function_addr)
@staticmethod
def call_indirect(config: Configuration, i: binary.Instruction):
if i.args[1] != 0x00:
raise Exception("pywasm: zero byte malformed in call_indirect")
ta = config.frame.module.table_addr_list[0]
tab = config.store.table_list[ta]
idx = config.stack.pop().i32()
if not 0 <= idx < len(tab.element_list):
raise Exception('pywasm: undefined element')
function_addr = tab.element_list[idx]
if function_addr is None:
raise Exception('pywasm: uninitialized element')
ArithmeticLogicUnit.call_function_addr(config, function_addr)
@staticmethod
def drop(config: Configuration, i: binary.Instruction):
config.stack.pop()
@staticmethod
def select(config: Configuration, i: binary.Instruction):
c = config.stack.pop().i32()
b = config.stack.pop()
a = config.stack.pop()
if c:
config.stack.append(a)
else:
config.stack.append(b)
@staticmethod
def get_local(config: Configuration, i: binary.Instruction):
r = config.frame.local_list[i.args[0]]
o = Value()
o.type = r.type
o.data = r.data.copy()
config.stack.append(o)
@staticmethod
def set_local(config: Configuration, i: binary.Instruction):
r = config.stack.pop()
config.frame.local_list[i.args[0]] = r
@staticmethod
def tee_local(config: Configuration, i: binary.Instruction):
r = config.stack.data[-1]
o = Value()
o.type = r.type
o.data = r.data.copy()
config.frame.local_list[i.args[0]] = o
@staticmethod
def get_global(config: Configuration, i: binary.Instruction):
a = config.frame.module.global_addr_list[i.args[0]]
glob = config.store.global_list[a]
r = glob.value
config.stack.append(r)
@staticmethod
def set_global(config: Configuration, i: binary.Instruction):
a = config.frame.module.global_addr_list[i.args[0]]
glob = config.store.global_list[a]
assert glob.mut == convention.var
glob.value = config.stack.pop()
@staticmethod
def mem_load(config: Configuration, i: binary.Instruction, size: int) -> bytearray:
memory_addr = config.frame.module.memory_addr_list[0]
memory = config.store.memory_list[memory_addr]
offset = i.args[1]
addr = config.stack.pop().i32() + offset
if addr < 0 or addr + size > len(memory.data):
raise Exception('pywasm: out of bounds memory access')
return memory.data[addr:addr + size]
@staticmethod
def i32_load(config: Configuration, i: binary.Instruction):
r = Value.from_i32(num.LittleEndian.i32(ArithmeticLogicUnit.mem_load(config, i, 4)))
config.stack.append(r)
@staticmethod
def i64_load(config: Configuration, i: binary.Instruction):
r = Value.from_i64(num.LittleEndian.i64(ArithmeticLogicUnit.mem_load(config, i, 8)))
config.stack.append(r)
@staticmethod
def f32_load(config: Configuration, i: binary.Instruction):
r = Value.from_f32(num.LittleEndian.f32(ArithmeticLogicUnit.mem_load(config, i, 4)))
config.stack.append(r)
@staticmethod
def f64_load(config: Configuration, i: binary.Instruction):
r = Value.from_f64(num.LittleEndian.f64(ArithmeticLogicUnit.mem_load(config, i, 8)))
config.stack.append(r)
@staticmethod
def i32_load8_s(config: Configuration, i: binary.Instruction):
r = Value.from_i32(num.LittleEndian.i8(ArithmeticLogicUnit.mem_load(config, i, 1)))
config.stack.append(r)
@staticmethod
def i32_load8_u(config: Configuration, i: binary.Instruction):
r = Value.from_i32(ArithmeticLogicUnit.mem_load(config, i, 1)[0])
config.stack.append(r)
@staticmethod
def i32_load16_s(config: Configuration, i: binary.Instruction):
r = Value.from_i32(num.LittleEndian.i16(ArithmeticLogicUnit.mem_load(config, i, 2)))
config.stack.append(r)
@staticmethod
def i32_load16_u(config: Configuration, i: binary.Instruction):
r = Value.from_i32(num.LittleEndian.u16(ArithmeticLogicUnit.mem_load(config, i, 2)))
config.stack.append(r)
@staticmethod
def i64_load8_s(config: Configuration, i: binary.Instruction):
r = Value.from_i64(num.LittleEndian.i8(ArithmeticLogicUnit.mem_load(config, i, 1)))
config.stack.append(r)
@staticmethod
def i64_load8_u(config: Configuration, i: binary.Instruction):
r = Value.from_i64(ArithmeticLogicUnit.mem_load(config, i, 1)[0])
config.stack.append(r)
@staticmethod
def i64_load16_s(config: Configuration, i: binary.Instruction):
r = Value.from_i64(num.LittleEndian.i16(ArithmeticLogicUnit.mem_load(config, i, 2)))
config.stack.append(r)
@staticmethod
def i64_load16_u(config: Configuration, i: binary.Instruction):
r = Value.from_i64(num.LittleEndian.u16(ArithmeticLogicUnit.mem_load(config, i, 2)))
config.stack.append(r)
@staticmethod
def i64_load32_s(config: Configuration, i: binary.Instruction):
r = Value.from_i64(num.LittleEndian.i32(ArithmeticLogicUnit.mem_load(config, i, 4)))
config.stack.append(r)
@staticmethod
def i64_load32_u(config: Configuration, i: binary.Instruction):
r = Value.from_i64(num.LittleEndian.u32(ArithmeticLogicUnit.mem_load(config, i, 4)))
config.stack.append(r)
@staticmethod
def mem_store(config: Configuration, i: binary.Instruction, size: int):
memory_addr = config.frame.module.memory_addr_list[0]
memory = config.store.memory_list[memory_addr]
r = config.stack.pop()
offset = i.args[1]
addr = config.stack.pop().i32() + offset
if addr < 0 or addr + size > len(memory.data):
raise Exception('pywasm: out of bounds memory access')
memory.data[addr:addr + size] = r.data[0:size]
@staticmethod
def i32_store(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 4)
@staticmethod
def i64_store(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 8)
@staticmethod
def f32_store(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 4)
@staticmethod
def f64_store(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 8)
@staticmethod
def i32_store8(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 1)
@staticmethod
def i32_store16(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 2)
@staticmethod
def i64_store8(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 4)
@staticmethod
def i64_store16(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 2)
@staticmethod
def i64_store32(config: Configuration, i: binary.Instruction):
ArithmeticLogicUnit.mem_store(config, i, 4)
@staticmethod
def current_memory(config: Configuration, i: binary.Instruction):
memory_addr = config.frame.module.memory_addr_list[0]
memory = config.store.memory_list[memory_addr]
r = Value.from_i32(memory.size)
config.stack.append(r)
@staticmethod
def grow_memory(config: Configuration, i: binary.Instruction):
memory_addr = config.frame.module.memory_addr_list[0]
memory = config.store.memory_list[memory_addr]
size = memory.size
r = config.stack.pop().i32()
if config.opts.pages_limit > 0 and memory.size + r > config.opts.pages_limit:
raise Exception('pywasm: out of memory limit')
try:
memory.grow(r)
config.stack.append(Value.from_i32(size))
except Exception:
config.stack.append(Value.from_i32(-1))
@staticmethod
def i32_const(config: Configuration, i: binary.Instruction):
config.stack.append(Value.from_i32(i.args[0]))
@staticmethod
def i64_const(config: Configuration, i: binary.Instruction):
config.stack.append(Value.from_i64(i.args[0]))
@staticmethod
def f32_const(config: Configuration, i: binary.Instruction):
r = Value.from_i32(i.args[0])
r.type = binary.ValueType(convention.f32)
config.stack.append(r)
@staticmethod
def f64_const(config: Configuration, i: binary.Instruction):
r = Value.from_i64(i.args[0])
r.type = binary.ValueType(convention.f64)
config.stack.append(r)
@staticmethod
def i32_eqz(config: Configuration, i: binary.Instruction):
config.stack.append(Value.from_i32(config.stack.pop().i32() == 0))
@staticmethod
def i32_eq(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
config.stack.append(Value.from_i32(a == b))
@staticmethod
def i32_ne(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
config.stack.append(Value.from_i32(a != b))
@staticmethod
def i32_lts(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
config.stack.append(Value.from_i32(a < b))
@staticmethod
def i32_ltu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
config.stack.append(Value.from_i32(a < b))
@staticmethod
def i32_gts(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
config.stack.append(Value.from_i32(a > b))
@staticmethod
def i32_gtu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
config.stack.append(Value.from_i32(a > b))
@staticmethod
def i32_les(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
config.stack.append(Value.from_i32(a <= b))
@staticmethod
def i32_leu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
config.stack.append(Value.from_i32(a <= b))
@staticmethod
def i32_ges(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
config.stack.append(Value.from_i32(int(a >= b)))
@staticmethod
def i32_geu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
config.stack.append(Value.from_i32(int(a >= b)))
@staticmethod
def i64_eqz(config: Configuration, i: binary.Instruction):
config.stack.append(Value.from_i32(config.stack.pop().i64() == 0))
@staticmethod
def i64_eq(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a == b))
@staticmethod
def i64_ne(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a != b))
@staticmethod
def i64_lts(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a < b))
@staticmethod
def i64_ltu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
config.stack.append(Value.from_i32(a < b))
@staticmethod
def i64_gts(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a > b))
@staticmethod
def i64_gtu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
config.stack.append(Value.from_i32(a > b))
@staticmethod
def i64_les(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a <= b))
@staticmethod
def i64_leu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
config.stack.append(Value.from_i32(a <= b))
@staticmethod
def i64_ges(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a >= b))
@staticmethod
def i64_geu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
config.stack.append(Value.from_i32(a >= b))
@staticmethod
def f32_eq(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
config.stack.append(Value.from_i32(a == b))
@staticmethod
def f32_ne(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
config.stack.append(Value.from_i32(a != b))
@staticmethod
def f32_lt(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
config.stack.append(Value.from_i32(a < b))
@staticmethod
def f32_gt(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
config.stack.append(Value.from_i32(a > b))
@staticmethod
def f32_le(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
config.stack.append(Value.from_i32(a <= b))
@staticmethod
def f32_ge(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
config.stack.append(Value.from_i32(a >= b))
@staticmethod
def f64_eq(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
config.stack.append(Value.from_i32(a == b))
@staticmethod
def f64_ne(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
config.stack.append(Value.from_i32(a != b))
@staticmethod
def f64_lt(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
config.stack.append(Value.from_i32(a < b))
@staticmethod
def f64_gt(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
config.stack.append(Value.from_i32(a > b))
@staticmethod
def f64_le(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
config.stack.append(Value.from_i32(a <= b))
@staticmethod
def f64_ge(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
config.stack.append(Value.from_i32(a >= b))
@staticmethod
def i32_clz(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i32()
c = 0
while c < 32 and (a & 0x80000000) == 0:
c += 1
a = a << 1
config.stack.append(Value.from_i32(c))
@staticmethod
def i32_ctz(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i32()
c = 0
while c < 32 and (a & 0x01) == 0:
c += 1
a = a >> 1
config.stack.append(Value.from_i32(c))
@staticmethod
def i32_popcnt(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i32()
c = 0
for _ in range(32):
if a & 0x01:
c += 1
a = a >> 1
config.stack.append(Value.from_i32(c))
@staticmethod
def i32_add(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a + b)
config.stack.append(c)
@staticmethod
def i32_sub(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a - b)
config.stack.append(c)
@staticmethod
def i32_mul(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a * b)
config.stack.append(c)
@staticmethod
def i32_divs(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
if b == 0:
raise Exception('pywasm: integer divide by zero')
if b == -1 and a == -2**31:
raise Exception('pywasm: integer overflow')
# Integer division that rounds towards 0 (like C)
r = Value.from_i32(a // b if a * b > 0 else (a + (-a % b)) // b)
config.stack.append(r)
@staticmethod
def i32_divu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
if b == 0:
raise Exception('pywasm: integer divide by zero')
r = Value.from_i32(a // b)
config.stack.append(r)
@staticmethod
def i32_rems(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
if b == 0:
raise Exception('pywasm: integer divide by zero')
# Integer remainder that rounds towards 0 (like C)
r = Value.from_i32(a % b if a * b > 0 else -(-a % b))
config.stack.append(r)
@staticmethod
def i32_remu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
if b == 0:
raise Exception('pywasm: integer divide by zero')
r = Value.from_i32(a % b)
config.stack.append(r)
@staticmethod
def i32_and(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a & b)
config.stack.append(c)
@staticmethod
def i32_or(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a | b)
config.stack.append(c)
@staticmethod
def i32_xor(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a ^ b)
config.stack.append(c)
@staticmethod
def i32_shl(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a << (b % 0x20))
config.stack.append(c)
@staticmethod
def i32_shrs(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(a >> (b % 0x20))
config.stack.append(c)
@staticmethod
def i32_shru(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u32()
a = config.stack.pop().u32()
c = Value.from_i32(a >> (b % 0x20))
config.stack.append(c)
@staticmethod
def i32_rotl(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32((((a << (b % 0x20)) & 0xffffffff) | (a >> (0x20 - (b % 0x20)))))
config.stack.append(c)
@staticmethod
def i32_rotr(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i32()
a = config.stack.pop().i32()
c = Value.from_i32(((a >> (b % 0x20)) | ((a << (0x20 - (b % 0x20))) & 0xffffffff)))
config.stack.append(c)
@staticmethod
def i64_clz(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i64()
c = 0
while c < 64 and (a & 0x8000000000000000) == 0:
c += 1
a = a << 1
config.stack.append(Value.from_i64(c))
@staticmethod
def i64_ctz(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i64()
c = 0
while c < 64 and (a & 0x01) == 0:
c += 1
a = a >> 1
config.stack.append(Value.from_i64(c))
@staticmethod
def i64_popcnt(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i64()
c = 0
for _ in range(64):
if a & 0x01:
c += 1
a = a >> 1
config.stack.append(Value.from_i64(c))
@staticmethod
def i64_add(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a + b)
config.stack.append(c)
@staticmethod
def i64_sub(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a - b)
config.stack.append(c)
@staticmethod
def i64_mul(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a * b)
config.stack.append(c)
@staticmethod
def i64_divs(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
if b == 0:
raise Exception('pywasm: integer divide by zero')
if b == -1 and a == -2**63:
raise Exception('pywasm: integer overflow')
r = Value.from_i64(a // b if a * b > 0 else (a + (-a % b)) // b)
config.stack.append(r)
@staticmethod
def i64_divu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
if b == 0:
raise Exception('pywasm: integer divide by zero')
r = Value.from_i64(a // b)
config.stack.append(r)
@staticmethod
def i64_rems(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
if b == 0:
raise Exception('pywasm: integer divide by zero')
# Integer remainder that rounds towards 0 (like C)
r = Value.from_i64(a % b if a * b > 0 else -(-a % b))
config.stack.append(r)
@staticmethod
def i64_remu(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
if b == 0:
raise Exception('pywasm: integer divide by zero')
r = Value.from_i64(a % b)
config.stack.append(r)
@staticmethod
def i64_and(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a & b)
config.stack.append(c)
@staticmethod
def i64_or(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a | b)
config.stack.append(c)
@staticmethod
def i64_xor(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a & b)
config.stack.append(c)
@staticmethod
def i64_shl(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a << (b % 0x40))
config.stack.append(c)
@staticmethod
def i64_shrs(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(a >> (b % 0x40))
config.stack.append(c)
@staticmethod
def i64_shru(config: Configuration, i: binary.Instruction):
b = config.stack.pop().u64()
a = config.stack.pop().u64()
c = Value.from_i64(a >> (b % 0x40))
config.stack.append(c)
@staticmethod
def i64_rotl(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64((((a << (b % 0x20)) & 0xffffffff) | (a >> (0x20 - (b % 0x20)))))
config.stack.append(c)
@staticmethod
def i64_rotr(config: Configuration, i: binary.Instruction):
b = config.stack.pop().i64()
a = config.stack.pop().i64()
c = Value.from_i64(((a >> (b % 0x20)) | ((a << (0x20 - (b % 0x20))) & 0xffffffff)))
config.stack.append(c)
@staticmethod
def f32_abs(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
a.data[3] = a.data[3] & 0x7f
config.stack.append(a)
@staticmethod
def f32_neg(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
if a.data[3] & 0x80 != 0x00:
a.data[3] = a.data[3] & 0x7f
else:
a.data[3] = a.data[3] | 0x80
config.stack.append(a)
@staticmethod
def f32_ceil(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
r = Value.from_f32(numpy.ceil(a))
config.stack.append(r)
@staticmethod
def f32_floor(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
r = Value.from_f32(numpy.floor(a))
config.stack.append(r)
@staticmethod
def f32_trunc(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
r = Value.from_f32(numpy.trunc(a))
config.stack.append(r)
@staticmethod
def f32_nearest(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
r = Value.from_f32(numpy.round(a))
config.stack.append(r)
@staticmethod
def f32_sqrt(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
r = Value.from_f32(numpy.sqrt(a))
config.stack.append(r)
@staticmethod
def f32_add(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
r = Value.from_f32(a + b)
config.stack.append(r)
@staticmethod
def f32_sub(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
r = Value.from_f32(a - b)
config.stack.append(r)
@staticmethod
def f32_mul(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
r = Value.from_f32(a * b)
config.stack.append(r)
@staticmethod
def f32_div(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
r = Value.from_f32(a / b)
config.stack.append(r)
@staticmethod
def f32_min(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
if a == b == 0 and (numpy.signbit(a) or numpy.signbit(b)):
return config.stack.append(Value.from_f32_u32(convention.f32_negative_zero))
config.stack.append(Value.from_f32(numpy.min([a, b])))
@staticmethod
def f32_max(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
if a == b == 0 and not (numpy.signbit(a) and numpy.signbit(b)):
return config.stack.append(Value.from_f32_u32(convention.f32_positive_zero))
config.stack.append(Value.from_f32(numpy.max([a, b])))
@staticmethod
def f32_copysign(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f32()
a = config.stack.pop().f32()
r = Value.from_f32(numpy.copysign(a, b))
config.stack.append(r)
@staticmethod
def f64_abs(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
a.data[7] = a.data[7] & 0x7f
config.stack.append(a)
@staticmethod
def f64_neg(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
if a.data[7] & 0x80 != 0x00:
a.data[7] = a.data[7] & 0x7f
else:
a.data[7] = a.data[7] | 0x80
config.stack.append(a)
@staticmethod
def f64_ceil(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
r = Value.from_f64(numpy.ceil(a))
config.stack.append(r)
@staticmethod
def f64_floor(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
r = Value.from_f64(numpy.floor(a))
config.stack.append(r)
@staticmethod
def f64_trunc(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
r = Value.from_f64(numpy.trunc(a))
config.stack.append(r)
@staticmethod
def f64_nearest(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
r = Value.from_f64(numpy.round(a))
config.stack.append(r)
@staticmethod
def f64_sqrt(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
r = Value.from_f64(numpy.sqrt(a))
config.stack.append(r)
@staticmethod
def f64_add(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
r = Value.from_f64(a + b)
config.stack.append(r)
@staticmethod
def f64_sub(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
r = Value.from_f64(a - b)
config.stack.append(r)
@staticmethod
def f64_mul(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
r = Value.from_f64(a * b)
config.stack.append(r)
@staticmethod
def f64_div(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
r = Value.from_f64(a / b)
config.stack.append(r)
@staticmethod
def f64_min(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
if a == b == 0 and (numpy.signbit(a) or numpy.signbit(b)):
return config.stack.append(Value.from_f64_u64(convention.f64_negative_zero))
config.stack.append(Value.from_f64(numpy.min([a, b])))
@staticmethod
def f64_max(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
if a == b == 0 and not (numpy.signbit(a) and numpy.signbit(b)):
return config.stack.append(Value.from_f64_u64(convention.f64_positive_zero))
config.stack.append(Value.from_f64(numpy.max([a, b])))
@staticmethod
def f64_copysign(config: Configuration, i: binary.Instruction):
b = config.stack.pop().f64()
a = config.stack.pop().f64()
r = Value.from_f64(numpy.copysign(a, b))
config.stack.append(r)
@staticmethod
def i32_wrap_i64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i64()
config.stack.append(Value.from_i32(a))
@staticmethod
def i32_trunc_sf32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
if a > (1 << 31) - 1 or a < -(1 << 31):
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i32(b)
config.stack.append(r)
@staticmethod
def i32_trunc_uf32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
if a > (1 << 32) - 1 or a <= -1:
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i32(b)
config.stack.append(r)
@staticmethod
def i32_trunc_sf64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
if a > (1 << 31) - 1 or a < -(1 << 31):
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i32(b)
config.stack.append(r)
@staticmethod
def i32_trunc_uf64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
if a > (1 << 32) - 1 or a <= -1:
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i32(b)
config.stack.append(r)
@staticmethod
def i64_extend_si32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i32()
r = Value.from_i64(a)
config.stack.append(r)
@staticmethod
def i64_extend_ui32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().u32()
r = Value.from_i64(a)
config.stack.append(r)
@staticmethod
def i64_trunc_sf32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
if a > (1 << 63) - 1 or a < -(1 << 63):
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i64(b)
config.stack.append(r)
@staticmethod
def i64_trunc_uf32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
if a > (1 << 64) - 1 or a <= -1:
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i64(b)
config.stack.append(r)
@staticmethod
def i64_trunc_sf64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
if a > (1 << 63) - 1 or a < -(1 << 63):
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i64(b)
config.stack.append(r)
@staticmethod
def i64_trunc_uf64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
if a > (1 << 64) - 1 or a <= -1:
raise Exception('pywasm: integer overflow')
try:
b = int(a)
except:
raise Exception('pywasm: invalid conversion to integer')
r = Value.from_i64(b)
config.stack.append(r)
@staticmethod
def f32_convert_si32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i32()
r = Value.from_f32(num.f32(a))
config.stack.append(r)
@staticmethod
def f32_convert_ui32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().u32()
r = Value.from_f32(num.f32(a))
config.stack.append(r)
@staticmethod
def f32_convert_si64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i64()
r = Value.from_f32(num.f32(a))
config.stack.append(r)
@staticmethod
def f32_convert_ui64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().u64()
r = Value.from_f32(num.f32(a))
config.stack.append(r)
@staticmethod
def f32_demote_f64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f64()
r = Value.from_f32(num.f32(a))
config.stack.append(r)
@staticmethod
def f64_convert_si32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i32()
r = Value.from_f64(num.f64(a))
config.stack.append(r)
@staticmethod
def f64_convert_ui32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().u32()
r = Value.from_f64(num.f64(a))
config.stack.append(r)
@staticmethod
def f64_convert_si64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().i64()
r = Value.from_f64(num.f64(a))
config.stack.append(r)
@staticmethod
def f64_convert_ui64(config: Configuration, i: binary.Instruction):
a = config.stack.pop().u64()
r = Value.from_f64(num.f64(a))
config.stack.append(r)
@staticmethod
def f64_promote_f32(config: Configuration, i: binary.Instruction):
a = config.stack.pop().f32()
r = Value.from_f64(num.f64(a))
config.stack.append(r)
@staticmethod
def i32_reinterpret_f32(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
a.type = binary.ValueType(convention.i32)
config.stack.append(a)
@staticmethod
def i64_reinterpret_f64(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
a.type = binary.ValueType(convention.i64)
config.stack.append(a)
@staticmethod
def f32_reinterpret_i32(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
a.type = binary.ValueType(convention.f32)
config.stack.append(a)
@staticmethod
def f64_reinterpret_i64(config: Configuration, i: binary.Instruction):
a = config.stack.pop()
a.type = binary.ValueType(convention.f64)
config.stack.append(a)
class Machine:
# Execution behavior is defined in terms of an abstract machine that models the program state. It includes a stack,
# which records operand values and control constructs, and an abstract store containing global state.
def __init__(self):
self.module: ModuleInstance = ModuleInstance()
self.store: Store = Store()
self.opts: option.Option = option.Option()
def instantiate(self, module: binary.Module, extern_value_list: typing.List[ExternValue]):
self.module.type_list = module.type_list
# [TODO] If module is not valid, then panic
# Assert: module is valid with external types classifying its imports
for e in extern_value_list:
if isinstance(e, FunctionAddress):
assert e < len(self.store.function_list)
if isinstance(e, TableAddress):
assert e < len(self.store.table_list)
if isinstance(e, MemoryAddress):
assert e < len(self.store.memory_list)
if isinstance(e, GlobalAddress):
assert e < len(self.store.global_list)
# If the number m of imports is not equal to the number n of provided external values, then fail
assert len(module.import_list) == len(extern_value_list)
# For each external value and external type, do:
# If externval is not valid with an external type in store S, then fail
# If externtype does not match externtype, then fail
for i, e in enumerate(extern_value_list):
if isinstance(e, FunctionAddress):
a = self.store.function_list[e].type
b = module.type_list[module.import_list[i].desc]
assert match_function(a, b)
if isinstance(e, TableAddress):
a = self.store.table_list[e].limits
b = module.import_list[i].desc.limits
assert match_limits(a, b)
if isinstance(e, MemoryAddress):
a = self.store.memory_list[e].type
b = module.import_list[i].desc
assert match_memory(a, b)
if isinstance(e, GlobalAddress):
assert module.import_list[i].desc.value_type == self.store.global_list[e].value.type
assert module.import_list[i].desc.mut == self.store.global_list[e].mut
# Let vals be the vector of global initialization values determined by module and externvaln
global_values: typing.List[Value] = []
aux = ModuleInstance()
aux.global_addr_list = [e for e in extern_value_list if isinstance(e, GlobalAddress)]
for e in module.global_list:
log.debugln(f'init global value')
frame = Frame(aux, [], e.expr, 1)
config = Configuration(self.store)
config.opts = self.opts
config.set_frame(frame)
r = config.exec().data[0]
global_values.append(r)
# Let moduleinst be a new module instance allocated from module in store S with imports externval and global
# initializer values, and let S be the extended store produced by module allocation.
self.allocate(module, extern_value_list, global_values)
for element_segment in module.element_list:
log.debugln('init elem')
# Let F be the frame, push the frame F to the stack
frame = Frame(self.module, [], element_segment.offset, 1)
config = Configuration(self.store)
config.opts = self.opts
config.set_frame(frame)
r = config.exec().data[0]
offset = r.val()
table_addr = self.module.table_addr_list[element_segment.table_index]
table_instance = self.store.table_list[table_addr]
for i, e in enumerate(element_segment.init):
table_instance.element_list[offset + i] = e
for data_segment in module.data_list:
log.debugln('init data')
frame = Frame(self.module, [], data_segment.offset, 1)
config = Configuration(self.store)
config.opts = self.opts
config.set_frame(frame)
r = config.exec().data[0]
offset = r.val()
memory_addr = self.module.memory_addr_list[data_segment.memory_index]
memory_instance = self.store.memory_list[memory_addr]
memory_instance.data[offset: offset + len(data_segment.init)] = data_segment.init
# [TODO] Assert: due to validation, the frame F is now on the top of the stack.
# If the start function module.start is not empty, invoke the function instance
if module.start is not None:
log.debugln(f'running start function {module.start}')
self.invocate(self.module.function_addr_list[module.start.function_idx], [])
def allocate(
self,
module: binary.Module,
extern_value_list: typing.List[ExternValue],
global_values: typing.List[Value],
):
# Let funcaddr be the list of function addresses extracted from externval, concatenated with funcaddr
# Let tableaddr be the list of table addresses extracted from externval, concatenated with tableaddr
# Let memaddr be the list of memory addresses extracted from externval, concatenated with memaddr
# Let globaladdr be the list of global addresses extracted from externval, concatenated with globaladdr
for e in extern_value_list:
if isinstance(e, FunctionAddress):
self.module.function_addr_list.append(e)
if isinstance(e, TableAddress):
self.module.table_addr_list.append(e)
if isinstance(e, MemoryAddress):
self.module.memory_addr_list.append(e)
if isinstance(e, GlobalAddress):
self.module.global_addr_list.append(e)
# For each function func in module.funcs, do:
for e in module.function_list:
function_addr = self.store.allocate_wasm_function(self.module, e)
self.module.function_addr_list.append(function_addr)
# For each table in module.tables, do:
for e in module.table_list:
table_addr = self.store.allocate_table(e.type)
self.module.table_addr_list.append(table_addr)
# For each memory module.mems, do:
for e in module.memory_list:
memory_addr = self.store.allocate_memory(e.type)
self.module.memory_addr_list.append(memory_addr)
# For each global in module.globals, do:
for i, e in enumerate(module.global_list):
global_addr = self.store.allocate_global(e.type, global_values[i])
self.module.global_addr_list.append(global_addr)
# For each export in module.exports, do:
for e in module.export_list:
if isinstance(e.desc, binary.FunctionIndex):
addr = self.module.function_addr_list[e.desc]
if isinstance(e.desc, binary.TableIndex):
addr = self.module.table_addr_list[e.desc]
if isinstance(e.desc, binary.MemoryIndex):
addr = self.module.memory_addr_list[e.desc]
if isinstance(e.desc, binary.GlobalIndex):
addr = self.module.global_addr_list[e.desc]
export_inst = ExportInstance(e.name, addr)
self.module.export_list.append(export_inst)
def invocate(self, function_addr: FunctionAddress, function_args: typing.List[Value]) -> Result:
config = Configuration(self.store)
config.opts = self.opts
return config.call(function_addr, function_args)
| 39.112696
| 120
| 0.628273
|
61846abc4fa17e483cee2f1f45400c8a1bc1c5a2
| 10,861
|
py
|
Python
|
source/conf.py
|
mm-s/symbol-docs
|
1e4838713cb1b33ab19ad1056ebec6312c1fb5d4
|
[
"Apache-2.0"
] | 1
|
2021-01-08T09:17:56.000Z
|
2021-01-08T09:17:56.000Z
|
source/conf.py
|
huuhao1999/symbol-docs
|
75c7725fc9943d476ca59d663b3c935c31f2fac1
|
[
"Apache-2.0"
] | null | null | null |
source/conf.py
|
huuhao1999/symbol-docs
|
75c7725fc9943d476ca59d663b3c935c31f2fac1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# symbol-docs documentation build configuration file, created by
# sphinx-quickstart on Mon Dec 18 16:39:26 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('_ext'))
import sphinx_bootstrap_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.githubpages',
'sphinx.ext.ifconfig',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.examplecode',
'sphinxcontrib.mermaid',
'sphinxcontrib.viewsource',
'sphinx_tabs.tabs',
'ablog',
'edit-on-github',
'fulltoc',
'ghreference',
'redirects',
]
# Add any paths that contain templates here, relative to this directory.
import ablog
templates_path = ['_templates']
templates_path.append(ablog.get_html_templates_path())
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The main toctree document.
master_doc = 'index'
# General information about the project.
project = u'symbol-docs'
copyright = u'2018-present, NEM'
author = u'NEM'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.22.2'
# The full version, including alpha/beta/rc tags.
release = u'Main'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
locale_dirs = ['locale/']
gettext_compact = False
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'inkpot'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Tab name for entire site. (Default: "Site")
'navbar_site_name': "Site",
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "exclude",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "cosmo" or "sandstone".
#
# The set of valid themes depend on the version of Bootstrap
# that's used (the next config option).
#
# Currently, the supported themes are:
# - Bootstrap 2: https://bootswatch.com/2
# - Bootstrap 3: https://bootswatch.com/3
'bootswatch_theme': "cosmo",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo-symbol.svg"
# Docs Title
html_title = 'Symbol Documentation'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Additional html pages
html_additional_pages = {'404': '404.html'}
## Custom style overrides
def setup(app):
app.add_stylesheet("https://fonts.googleapis.com/css?family=Noto+Sans:400,400i,700&display=swap")
app.add_stylesheet("https://use.fontawesome.com/releases/v5.2.0/css/all.css")
app.add_stylesheet("css/custom.css") # may also be an URL
app.add_javascript("js/custom.js")
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
html_sidebars = {
'**': ['globaltoc.html'],
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'symbol-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'symbol-docs.tex', u'Symbol Documentation',
u'nem', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'symbol-docs', u'Symbol Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'symbol-docs', u'Symbol Documentation',
author, 'symbol-docs', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for substitutions --
rst_prolog = """
.. |codename| replace:: Symbol
.. |sitename| replace:: Symbol Developer Documentation
.. |networkcurrency| replace:: ``symbol.xym``
.. |privatenetworkcurrency| replace:: ``cat.currency``
.. |sdk| replace:: Symbol SDK
.. |cli| replace:: Symbol CLI
.. |desktop-wallet| replace:: Symbol Desktop Wallet
.. |slack| raw:: html
<a href="https://join.slack.com/t/nem2/shared_invite/zt-km50fzxd-I8dPNrPEV6cqYVrhyLvrfA" target="_blank">Slack</a>
.. |nem| raw:: html
<a href="https://nem.io/" target="_blank">NEM</a>
.. |community| raw:: html
<a href="https://github.com/nemtech/community/" target="_blank">community</a>
.. |twitter| raw:: html
<a href="https://twitter.com/NEMofficial" target="_blank">Twitter</a>
.. |github| raw:: html
<a href="https://github.com/nemtech" target="_blank">GitHub</a>
.. |open-source| raw:: html
<a href="https://github.com/nemtech" target="_blank">open source</a>
"""
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html, references.html, guides.html']
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
html_favicon = 'favicon.ico'
# -- Options for edit on github -------------------------------------------
edit_on_github_project = 'nemtech/symbol-docs'
edit_on_github_branch = 'main'
# -- Options for edit scaled images ---------------------------------------
html_scaled_image_link = False
# -- Options for ablog ----------------------------------------------------
blog_baseurl = '/'
blog_path = 'guides'
blog_authors = {}
# -- Options for linkcheck ------------------------------------------------
linkcheck_ignore = [r'http://localhost\d+']
# -- Options for viewsource ------------------------------------------------
viewsource_title = 'View Code'
def viewsource_resolve_link(file_path, language=None):
if language == 'javascript':
language = 'typescript'
if language == 'java':
language = 'java/src/test/java/symbol/guides/examples'
base_url = 'https://github.com/nemtech/symbol-docs/blob/main/source/resources/examples/%s/' % language
path_split = file_path.split('/')
path = "/".join(path_split[len(path_split)-2:])
return base_url + path
# -- Custom extlinks -----------------------------------------------------
extlinks = {'schema': ('https://github.com/nemtech/catbuffer/blob/main/schemas/%s', 'file '),
'properties': ('https://github.com/nemtech/catapult-server/blob/main/resources/%s', 'file ')}
| 30.253482
| 117
| 0.659884
|
7a11d51d447c0809c57a8864790e1a2b3c0c9be3
| 1,363
|
py
|
Python
|
OpenGL/01.HW.py
|
sarincr/Python-modules-for-GUI-Dev
|
0dca4982de99d4f2a305862daf6a2f6fbcc522e6
|
[
"MIT"
] | null | null | null |
OpenGL/01.HW.py
|
sarincr/Python-modules-for-GUI-Dev
|
0dca4982de99d4f2a305862daf6a2f6fbcc522e6
|
[
"MIT"
] | null | null | null |
OpenGL/01.HW.py
|
sarincr/Python-modules-for-GUI-Dev
|
0dca4982de99d4f2a305862daf6a2f6fbcc522e6
|
[
"MIT"
] | null | null | null |
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
w, h = 500,500
# ---Section 1---
def square():
# We have to declare the points in this sequence: bottom left, bottom right, top right, top left
glBegin(GL_QUADS) # Begin the sketch
glVertex2f(100, 100) # Coordinates for the bottom left point
glVertex2f(200, 100) # Coordinates for the bottom right point
glVertex2f(200, 200) # Coordinates for the top right point
glVertex2f(100, 200) # Coordinates for the top left point
glEnd() # Mark the end of drawing
# This alone isn't enough to draw our square
# ---Section 2---
def showScreen():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Remove everything from screen (i.e. displays all white)
glLoadIdentity() # Reset all graphic/shape's position
square() # Draw a square using our function
glutSwapBuffers()
#---Section 3---
glutInit()
glutInitDisplayMode(GLUT_RGBA) # Set the display mode to be colored
glutInitWindowSize(500, 500) # Set the w and h of your window
glutInitWindowPosition(0, 0) # Set the position at which this windows should appear
wind = glutCreateWindow("OpenGL Coding Practice") # Set a window title
glutDisplayFunc(showScreen)
glutIdleFunc(showScreen) # Keeps the window open
glutMainLoop() # Keeps the above created window displaying/running in a loop
| 34.075
| 112
| 0.734409
|
d2cd681b9d7e1b682c18eb871f89724851d08515
| 15,748
|
py
|
Python
|
skmultilearn/ext/meka.py
|
emrecncelik/scikit-multilearn
|
1d7f7b74702cb9a5a8245726bf38e23e1f2f3382
|
[
"BSD-2-Clause"
] | 763
|
2015-03-22T18:54:33.000Z
|
2022-03-25T07:54:04.000Z
|
skmultilearn/ext/meka.py
|
emrecncelik/scikit-multilearn
|
1d7f7b74702cb9a5a8245726bf38e23e1f2f3382
|
[
"BSD-2-Clause"
] | 187
|
2015-01-27T15:06:35.000Z
|
2022-03-22T21:41:47.000Z
|
skmultilearn/ext/meka.py
|
emrecncelik/scikit-multilearn
|
1d7f7b74702cb9a5a8245726bf38e23e1f2f3382
|
[
"BSD-2-Clause"
] | 157
|
2015-04-13T16:47:36.000Z
|
2022-03-17T19:12:59.000Z
|
import os
import shlex
import subprocess
import sys
import tempfile
import zipfile
from builtins import filter
from builtins import map
from builtins import range
from builtins import str
import scipy.sparse as sparse
from ..base import MLClassifierBase
from ..dataset import save_to_arff, get_data_home, _download_single_file, _get_md5
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote
SUPPORTED_VERSION = '1.9.2'
SUPPORTED_VERSION_MD5 = 'e909044b39513bbad451b8d71098b22c'
def download_meka(version=None):
"""Downloads a given version of the MEKA library and returns its classpath
Parameters
----------
version : str
the MEKA version to download, default falls back to currently supported version 1.9.2
Returns
-------
string
meka class path string for installed version
Raises
------
IOError
if unpacking the meka release file does not provide a proper setup
Exception
if MD5 mismatch happens after a download error
"""
version = version or SUPPORTED_VERSION
meka_release_string = "meka-release-{}".format(version)
file_name = meka_release_string + '-bin.zip'
meka_path = get_data_home(subdirectory='meka')
target_path = os.path.join(meka_path, file_name)
path_to_lib = os.path.join(meka_path, meka_release_string, 'lib')
if os.path.exists(target_path):
print("MEKA {} found, not downloading".format(version))
else:
print("MEKA {} not found, downloading".format(version))
release_url = "http://downloads.sourceforge.net/project/meka/meka-{}/".format(version)
_download_single_file(file_name, target_path, release_url)
found_md5 = _get_md5(target_path)
if SUPPORTED_VERSION_MD5 != found_md5:
raise Exception("MD5 mismatch - possible MEKA download error")
if not os.path.exists(path_to_lib):
with zipfile.ZipFile(target_path, 'r') as meka_zip:
print("Unzipping MEKA {} to {}".format(version, meka_path + os.path.sep))
meka_zip.extractall(path=meka_path + os.path.sep)
if not os.path.exists(os.path.join(path_to_lib, 'meka-{}.jar'.format(version))):
raise IOError("Something went wrong, MEKA files missing, please file a bug report")
return path_to_lib + os.path.sep
class Meka(MLClassifierBase):
"""Wrapper for the MEKA classifier
Allows using MEKA, WEKA and some of MULAN classifiers from scikit-compatible API. For more information on
how to use this class see the tutorial: :doc:`../meka`
Parameters
----------
meka_classifier : str
The MEKA classifier string and parameters from the MEKA API,
such as :code:`meka.classifiers.multilabel.MULAN -S RAkEL2`
weka_classifier : str
The WEKA classifier string and parameters from the WEKA API,
such as :code:`weka.classifiers.trees.J48`
java_command : str
Path to test the java command
meka_classpath: str
Path to the MEKA class path folder, usually the folder lib
in the directory MEKA was extracted into
Attributes
----------
output_ : str
the full text output of MEKA command
References
----------
If you use this wrapper please also cite:
.. code-block :: latex
@article{MEKA,
author = {Read, Jesse and Reutemann, Peter and Pfahringer, Bernhard and Holmes, Geoff},
title = {{MEKA}: A Multi-label/Multi-target Extension to {Weka}},
journal = {Journal of Machine Learning Research},
year = {2016},
volume = {17},
number = {21},
pages = {1--5},
url = {http://jmlr.org/papers/v17/12-164.html},
}
@article{Hall:2009:WDM:1656274.1656278,
author = {Hall, Mark and Frank, Eibe and Holmes, Geoffrey and Pfahringer, Bernhard and Reutemann, Peter and Witten, Ian H.},
title = {The WEKA Data Mining Software: An Update},
journal = {SIGKDD Explor. Newsl.},
issue_date = {June 2009},
volume = {11},
number = {1},
month = nov,
year = {2009},
issn = {1931-0145},
pages = {10--18},
numpages = {9},
url = {http://doi.acm.org/10.1145/1656274.1656278},
doi = {10.1145/1656274.1656278},
acmid = {1656278},
publisher = {ACM},
address = {New York, NY, USA},
}
Examples
--------
Here's an example of performing Label Powerset classification using MEKA with a WEKA Naive Bayes classifier.
.. code-block:: python
from skmultilearn.ext import Meka, download_meka
meka = Meka(
meka_classifier = "meka.classifiers.multilabel.LC",
weka_classifier = "weka.classifiers.bayes.NaiveBayes",
meka_classpath = download_meka(),
java_command = '/usr/bin/java')
meka.fit(X_train, y_train)
predictions = meka.predict(X_test)
"""
def __init__(self, meka_classifier=None, weka_classifier=None,
java_command=None, meka_classpath=None):
super(Meka, self).__init__()
self.java_command = java_command
if self.java_command is None:
# TODO: this will not be needed once we're python 3 ready - we will
# use it only in python 2.7 cases
from whichcraft import which
self.java_command = which("java")
if self.java_command is None:
raise ValueError("Java not found")
self.meka_classpath = meka_classpath
if self.meka_classpath is None:
self.meka_classpath = os.environ.get('MEKA_CLASSPATH')
if self.meka_classpath is None:
raise ValueError("No meka classpath defined")
self.meka_classifier = meka_classifier
self.weka_classifier = weka_classifier
self.copyable_attrs = [
'meka_classifier',
'weka_classifier',
'java_command',
'meka_classpath'
]
self.output_ = None
self._verbosity = 5
self._warnings = None
self.require_dense = [False, False]
self._clean()
def _clean(self):
"""Sets various attributes to :code:`None`"""
self._results = None
self._statistics = None
self.output_ = None
self._error = None
self._label_count = None
self._instance_count = None
def _remove_temporary_files(self, temporary_files):
"""Internal function for cleaning temporary files"""
for file_object in temporary_files:
file_name = file_object.name
file_object.close()
if os.path.exists(file_name):
os.remove(file_name)
arff_file_name = file_name + '.arff'
if os.path.exists(arff_file_name):
os.remove(arff_file_name)
def fit(self, X, y):
"""Fits classifier to training data
Internally this method dumps X and y to temporary arff files and
runs MEKA with relevant arguments using :meth:`_run`. It uses a
sparse DOK representation (:class:`scipy.sparse.dok_matrix`)
of the X matrix.
Parameters
----------
X : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix, shape=(n_samples, n_features)
input feature matrix
y : `array_like`, :class:`numpy.matrix` or :mod:`scipy.sparse` matrix of `{0, 1}`, shape=(n_samples, n_labels)
binary indicator matrix with label assignments
Returns
-------
self
fitted instance of self
"""
self._clean()
X = self._ensure_input_format(
X, sparse_format='dok', enforce_sparse=True)
y = self._ensure_output_format(
y, sparse_format='dok', enforce_sparse=True)
self._label_count = y.shape[1]
# we need this in case threshold needs to be recalibrated in meka
self.train_data_ = save_to_arff(X, y)
train_arff = tempfile.NamedTemporaryFile(delete=False)
classifier_dump_file = tempfile.NamedTemporaryFile(delete=False)
try:
with open(train_arff.name + '.arff', 'w') as fp:
fp.write(self.train_data_)
input_args = [
'-verbosity', "0",
'-split-percentage', "100",
'-t', '"{}"'.format(train_arff.name + '.arff'),
'-d', '"{}"'.format(classifier_dump_file.name),
]
self._run_meka_command(input_args)
self.classifier_dump = None
with open(classifier_dump_file.name, 'rb') as fp:
self.classifier_dump = fp.read()
finally:
self._remove_temporary_files([train_arff, classifier_dump_file])
return self
def predict(self, X):
"""Predict label assignments for X
Internally this method dumps X to temporary arff files and
runs MEKA with relevant arguments using :func:`_run`. It uses a
sparse DOK representation (:class:`scipy.sparse.dok_matrix`)
of the X matrix.
Parameters
----------
X : numpy.ndarray or scipy.sparse
input features of shape :code:`(n_samples, n_features)`
Returns
-------
scipy.sparse of int
sparse matrix of integers with shape :code:`(n_samples, n_features)`
"""
X = self._ensure_input_format(
X, sparse_format='dok', enforce_sparse=True)
self._instance_count = X.shape[0]
if self.classifier_dump is None:
raise Exception('Not classified')
sparse_y = sparse.coo_matrix((X.shape[0], self._label_count), dtype=int)
try:
train_arff = tempfile.NamedTemporaryFile(delete=False)
test_arff = tempfile.NamedTemporaryFile(delete=False)
classifier_dump_file = tempfile.NamedTemporaryFile(delete=False)
with open(train_arff.name + '.arff', 'w') as fp:
fp.write(self.train_data_)
with open(classifier_dump_file.name, 'wb') as fp:
fp.write(self.classifier_dump)
with open(test_arff.name + '.arff', 'w') as fp:
fp.write(save_to_arff(X, sparse_y))
args = [
'-l', '"{}"'.format(classifier_dump_file.name)
]
self._run(train_arff.name + '.arff', test_arff.name + '.arff', args)
self._parse_output()
finally:
self._remove_temporary_files(
[train_arff, test_arff, classifier_dump_file]
)
return self._results
def _run(self, train_file, test_file, additional_arguments=[]):
"""Runs the meka classifiers
Parameters
----------
train_file : str
path to train :code:`.arff` file in meka format
(big endian, labels first in attributes list).
test_file : str
path to test :code:`.arff` file in meka format
(big endian, labels first in attributes list).
Returns
-------
predictions: sparse binary indicator matrix [n_test_samples, n_labels]
array of binary label vectors including label predictions of
shape :code:`(n_test_samples, n_labels)`
"""
self.output_ = None
self._warnings = None
# meka_command_string = 'java -cp "/home/niedakh/pwr/old/meka-1.5/lib/*" meka.classifiers.multilabel.MULAN -S RAkEL2
# -threshold 0 -t {train} -T {test} -verbosity {verbosity} -W weka.classifiers.bayes.NaiveBayes'
# meka.classifiers.multilabel.LC, weka.classifiers.bayes.NaiveBayes
args = [
'-t', '"{}"'.format(train_file),
'-T', '"{}"'.format(test_file),
'-verbosity', str(5),
] + additional_arguments
self._run_meka_command(args)
return self
def _parse_output(self):
"""Internal function for parsing MEKA output."""
if self.output_ is None:
self._results = None
self._statistics = None
return None
predictions_split_head = '==== PREDICTIONS'
predictions_split_foot = '|==========='
if self._label_count is None:
self._label_count = map(lambda y: int(y.split(')')[1].strip()), [
x for x in self.output_.split('\n') if 'Number of labels' in x])[0]
if self._instance_count is None:
self._instance_count = int(float(filter(lambda x: '==== PREDICTIONS (N=' in x, self.output_.split(
'\n'))[0].split('(')[1].split('=')[1].split(')')[0]))
predictions = self.output_.split(predictions_split_head)[1].split(
predictions_split_foot)[0].split('\n')[1:-1]
predictions = [y.split(']')[0]
for y in [x.split('] [')[1] for x in predictions]]
predictions = [[a for a in [f.strip() for f in z.split(',')] if len(a) > 0]
for z in predictions]
predictions = [[int(a) for a in z] for z in predictions]
assert self._verbosity == 5
self._results = sparse.lil_matrix(
(self._instance_count, self._label_count), dtype='int')
for row in range(self._instance_count):
for label in predictions[row]:
self._results[row, label] = 1
statistics = [x for x in self.output_.split(
'== Evaluation Info')[1].split('\n') if len(x) > 0 and '==' not in x]
statistics = [y for y in [z.strip() for z in statistics] if ' ' in y]
array_data = [z for z in statistics if '[' in z]
non_array_data = [z for z in statistics if '[' not in z]
self._statistics = {}
for row in non_array_data:
r = row.strip().split(' ')
r = [z for z in r if len(z) > 0]
r = [z.strip() for z in r]
if len(r) < 2:
continue
try:
test_value = float(r[1])
except ValueError:
test_value = r[1]
r[1] = test_value
self._statistics[r[0]] = r[1]
for row in array_data:
r = row.strip().split('[')
r = [z.strip() for z in r]
r[1] = r[1].replace(', ', ' ').replace(
',', '.').replace(']', '').split(' ')
r[1] = [x for x in r[1] if len(x) > 0]
self._statistics[r[0]] = r[1]
def _run_meka_command(self, args):
command_args = [
self.java_command,
'-cp', '"{}*"'.format(self.meka_classpath),
self.meka_classifier,
]
if self.weka_classifier is not None:
command_args += ['-W', self.weka_classifier]
command_args += args
meka_command = " ".join(command_args)
if sys.platform != 'win32':
meka_command = shlex.split(meka_command)
pipes = subprocess.Popen(meka_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
self.output_, self._error = pipes.communicate()
if type(self.output_) == bytes:
self.output_ = self.output_.decode(sys.stdout.encoding)
if type(self._error) == bytes:
self._error = self._error.decode(sys.stdout.encoding)
if pipes.returncode != 0:
raise Exception(self.output_ + self._error)
| 35.073497
| 136
| 0.585154
|
3999649c252e18cb1ece7a3b667403899a7ea69b
| 19,172
|
py
|
Python
|
image_class.py
|
openharmony-gitee-mirror/update_packaging_tools
|
ddc9e16353d634a5dfe7b24e75387c9917a3cd06
|
[
"Apache-2.0"
] | null | null | null |
image_class.py
|
openharmony-gitee-mirror/update_packaging_tools
|
ddc9e16353d634a5dfe7b24e75387c9917a3cd06
|
[
"Apache-2.0"
] | null | null | null |
image_class.py
|
openharmony-gitee-mirror/update_packaging_tools
|
ddc9e16353d634a5dfe7b24e75387c9917a3cd06
|
[
"Apache-2.0"
] | 1
|
2021-09-13T11:17:32.000Z
|
2021-09-13T11:17:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import copy
import os
import struct
import tempfile
from hashlib import sha256
from log_exception import UPDATE_LOGGER
from blocks_manager import BlocksManager
from utils import SPARSE_IMAGE_MAGIC
from utils import HEADER_INFO_FORMAT
from utils import HEADER_INFO_LEN
from utils import EXTEND_VALUE
from utils import FILE_MAP_ZERO_KEY
from utils import FILE_MAP_NONZERO_KEY
from utils import FILE_MAP_COPY_KEY
from utils import MAX_BLOCKS_PER_GROUP
class FullUpdateImage:
"""
Full image processing class
"""
def __init__(self, target_package_images_dir, full_img_list, verse_script,
full_image_path_list, no_zip=False):
self.__target_package_images_dir = target_package_images_dir
self.__full_img_list = full_img_list
self.__verse_script = verse_script
self.__full_image_path_list = full_image_path_list
self.__no_zip = no_zip
def update_full_image(self):
"""
Processing of the full image
:return full_image_content_len_list: full image content length list
:return full_image_file_obj_list: full image temporary file list
"""
full_image_file_obj_list = []
full_image_content_len_list = []
for idx, each_name in enumerate(self.__full_img_list):
full_image_content = self.get_full_image_content(
self.__full_image_path_list[idx])
if full_image_content is False:
UPDATE_LOGGER.print_log(
"Get full image content failed!",
log_type=UPDATE_LOGGER.ERROR_LOG)
return False, False
each_img = tempfile.NamedTemporaryFile(
prefix="full_image%s" % each_name, mode='wb')
each_img.write(full_image_content)
each_img.seek(0)
full_image_content_len_list.append(len(full_image_content))
full_image_file_obj_list.append(each_img)
UPDATE_LOGGER.print_log(
"Image %s full processing completed" % each_name)
if not self.__no_zip:
# No zip mode (no script command)
if is_sparse_image(each_img.name):
sparse_image_write_cmd = \
self.__verse_script.sparse_image_write(each_name)
cmd = '%s_WRITE_FLAG%s' % (
each_name, sparse_image_write_cmd)
else:
raw_image_write_cmd = \
self.__verse_script.raw_image_write(
each_name, each_name)
cmd = '%s_WRITE_FLAG%s' % (
each_name, raw_image_write_cmd)
if each_name not in ("boot", "updater_boot",
"updater", "updater_b"):
self.__verse_script.add_command(
cmd=cmd)
UPDATE_LOGGER.print_log(
"All full image processing completed! image count: %d" %
len(self.__full_img_list))
return full_image_content_len_list, full_image_file_obj_list
@staticmethod
def get_full_image_content(each_name):
"""
Obtain the full image content.
:param each_name: image name
:return content: full image content if available; false otherwise
"""
each_image_path = each_name
if not os.path.exists(each_image_path):
UPDATE_LOGGER.print_log(
"The file is missing "
"from the target package, "
"the component: %s cannot be full update processed. " %
each_image_path)
return False
with open(each_image_path, 'rb') as f_r:
content = f_r.read()
return content
def is_sparse_image(img_path):
"""
Check whether the image is a sparse image.
:param img_path: image path
:return:
"""
with open(img_path, 'rb') as f_r:
image_content = f_r.read(HEADER_INFO_LEN)
try:
header_info = struct.unpack(HEADER_INFO_FORMAT, image_content)
except struct.error:
return False
is_sparse = IncUpdateImage.image_header_info_check(header_info)[-1]
if is_sparse:
UPDATE_LOGGER.print_log("Sparse image is not supported!")
raise RuntimeError
return is_sparse
class IncUpdateImage:
"""
Increment update image class
"""
def __init__(self, image_path, map_path):
"""
Initialize the inc image.
:param image_path: img file path
:param map_path: map file path
"""
self.image_path = image_path
self.offset_value_list = []
self.care_block_range = None
self.extended_range = None
self.reserved_blocks = BlocksManager("0")
self.file_map = []
self.offset_index = []
self.block_size = None
self.total_blocks = None
self.parse_sparse_image_file(image_path, map_path)
def parse_sparse_image_file(self, image_path, map_path):
"""
Parse the .img file.
:param image_path: img file path
:param map_path: map file path
"""
self.block_size = block_size = 4096
self.total_blocks = total_blocks = \
os.path.getsize(self.image_path) // self.block_size
reference = b'\0' * self.block_size
with open(image_path, 'rb') as f_r:
care_value_list, offset_value_list = [], []
nonzero_blocks = []
for i in range(self.total_blocks):
blocks_data = f_r.read(self.block_size)
if blocks_data != reference:
nonzero_blocks.append(i)
nonzero_blocks.append(i + 1)
self.care_block_range = BlocksManager(nonzero_blocks)
care_value_list = list(self.care_block_range.range_data)
for idx, value in enumerate(care_value_list):
if idx != 0 and (idx + 1) % 2 == 0:
be_value = int(care_value_list[idx - 1])
af_value = int(care_value_list[idx])
file_tell = be_value * block_size
offset_value_list.append(
(be_value, af_value - be_value,
file_tell, None))
self.offset_index = [i[0] for i in offset_value_list]
self.offset_value_list = offset_value_list
extended_range = \
self.care_block_range.extend_value_to_blocks(EXTEND_VALUE)
all_blocks = BlocksManager(range_data=(0, total_blocks))
self.extended_range = \
extended_range.get_intersect_with_other(all_blocks). \
get_subtract_with_other(self.care_block_range)
self.parse_block_map_file(map_path, f_r)
def parse_block_map_file(self, map_path, image_file_r):
"""
Parses the map file for blocks where files are contained in the image.
:param map_path: map file path
:param image_file_r: file reading object
:return:
"""
remain_range = self.care_block_range
temp_file_map = {}
with open(map_path, 'r') as f_r:
# Read the .map file and process each line.
for each_line in f_r.readlines():
each_map_path, ranges_value = each_line.split(None, 1)
each_range = BlocksManager(ranges_value)
temp_file_map[each_map_path] = each_range
# each_range is contained in the remain range.
if each_range.size() != each_range. \
get_intersect_with_other(remain_range).size():
raise RuntimeError
# After the processing is complete,
# remove each_range from remain_range.
remain_range = remain_range.get_subtract_with_other(each_range)
reserved_blocks = self.reserved_blocks
# Remove reserved blocks from all blocks.
remain_range = remain_range.get_subtract_with_other(reserved_blocks)
# Divide all blocks into zero_blocks
# (if there are many) and nonzero_blocks.
zero_blocks_list = []
nonzero_blocks_list = []
nonzero_groups_list = []
default_zero_block = ('\0' * self.block_size).encode()
nonzero_blocks_list, nonzero_groups_list, zero_blocks_list = \
self.apply_remain_range(
default_zero_block, image_file_r, nonzero_blocks_list,
nonzero_groups_list, remain_range, zero_blocks_list)
temp_file_map = self.get_file_map(
nonzero_blocks_list, nonzero_groups_list,
reserved_blocks, temp_file_map, zero_blocks_list)
self.file_map = temp_file_map
def apply_remain_range(self, *args):
"""
Implement traversal processing of remain_range.
"""
default_zero_block, image_file_r, \
nonzero_blocks_list, nonzero_groups_list, \
remain_range, zero_blocks_list = args
for start_value, end_value in remain_range:
for each_value in range(start_value, end_value):
# bisect 二分查找,b在self.offset_index中的位置
idx = bisect.bisect_right(self.offset_index, each_value) - 1
chunk_start, _, file_pos, fill_data = \
self.offset_value_list[idx]
data = self.get_file_data(self.block_size, chunk_start,
default_zero_block, each_value,
file_pos, fill_data, image_file_r)
zero_blocks_list, nonzero_blocks_list, nonzero_groups_list = \
self.get_zero_nonzero_blocks_list(
data, default_zero_block, each_value,
nonzero_blocks_list, nonzero_groups_list,
zero_blocks_list)
return nonzero_blocks_list, nonzero_groups_list, zero_blocks_list
@staticmethod
def get_file_map(*args):
"""
Obtain the file map.
nonzero_blocks_list nonzero blocks list,
nonzero_groups_list nonzero groups list,
reserved_blocks reserved blocks ,
temp_file_map temporary file map,
zero_blocks_list zero block list
:return temp_file_map file map
"""
nonzero_blocks_list, nonzero_groups_list, \
reserved_blocks, temp_file_map, zero_blocks_list = args
if nonzero_blocks_list:
nonzero_groups_list.append(nonzero_blocks_list)
if zero_blocks_list:
temp_file_map[FILE_MAP_ZERO_KEY] = \
BlocksManager(range_data=zero_blocks_list)
if nonzero_groups_list:
for i, blocks in enumerate(nonzero_groups_list):
temp_file_map["%s-%d" % (FILE_MAP_NONZERO_KEY, i)] = \
BlocksManager(range_data=blocks)
if reserved_blocks:
temp_file_map[FILE_MAP_COPY_KEY] = reserved_blocks
return temp_file_map
@staticmethod
def get_zero_nonzero_blocks_list(*args):
"""
Get zero_blocks_list, nonzero_blocks_list, and nonzero_groups_list.
data: block data,
default_zero_block: default to zero block,
each_value: each value,
nonzero_blocks_list: nonzero_blocks_list,
nonzero_groups_list: nonzero_groups_list,
zero_blocks_list: zero_blocks_list,
:return new_zero_blocks_list: new zero blocks list,
:return new_nonzero_blocks_list: new nonzero blocks list,
:return new_nonzero_groups_list: new nonzero groups list.
"""
data, default_zero_block, each_value, \
nonzero_blocks_list, nonzero_groups_list, \
zero_blocks_list = args
# Check whether the data block is equal to the default zero_blocks.
if data == default_zero_block:
zero_blocks_list.append(each_value)
zero_blocks_list.append(each_value + 1)
else:
nonzero_blocks_list.append(each_value)
nonzero_blocks_list.append(each_value + 1)
# The number of nonzero_blocks is greater than
# or equal to the upper limit.
if len(nonzero_blocks_list) >= MAX_BLOCKS_PER_GROUP:
nonzero_groups_list.append(nonzero_blocks_list)
nonzero_blocks_list = []
new_zero_blocks_list, new_nonzero_blocks_list, \
new_nonzero_groups_list = \
copy.copy(zero_blocks_list), \
copy.copy(nonzero_blocks_list),\
copy.copy(nonzero_groups_list)
return new_zero_blocks_list, new_nonzero_blocks_list, \
new_nonzero_groups_list
@staticmethod
def get_file_data(*args):
"""
Get the file data.
block_size: blocksize,
chunk_start: the start position of chunk,
default_zero_block: default to zero blocks,
each_value: each_value,
file_pos: file position,
fill_data: data,
image_file_r: read file object,
:return data: Get the file data.
"""
block_size, chunk_start, default_zero_block, each_value, \
file_pos, fill_data, image_file_r = args
if file_pos is not None:
file_pos += (each_value - chunk_start) * block_size
image_file_r.seek(file_pos, os.SEEK_SET)
data = image_file_r.read(block_size)
else:
if fill_data == default_zero_block[:4]:
data = default_zero_block
else:
data = None
return data
def range_sha256(self, ranges):
"""
range sha256 hash content
:param ranges: ranges value
:return:
"""
hash_obj = sha256()
for data in self.__get_blocks_set_data(ranges):
hash_obj.update(data)
return hash_obj.hexdigest()
def write_range_data_2_fd(self, ranges, file_obj):
"""
write range data to fd
:param ranges: ranges obj
:param file_obj: file obj
:return:
"""
for data in self.__get_blocks_set_data(ranges):
file_obj.write(data)
def get_ranges(self, ranges):
"""
get ranges value
:param ranges: ranges
:return: ranges value
"""
return [each_data for each_data in self.__get_blocks_set_data(ranges)]
def __get_blocks_set_data(self, blocks_set_data):
"""
Get the range data.
"""
with open(self.image_path, 'rb') as f_r:
for start, end in blocks_set_data:
diff_value = end - start
idx = bisect.bisect_right(self.offset_index, start) - 1
chunk_start, chunk_len, file_pos, fill_data = \
self.offset_value_list[idx]
remain = chunk_len - (start - chunk_start)
this_read = min(remain, diff_value)
if file_pos is not None:
pos = file_pos + ((start - chunk_start) * self.block_size)
f_r.seek(pos, os.SEEK_SET)
yield f_r.read(this_read * self.block_size)
else:
yield fill_data * (this_read * (self.block_size >> 2))
diff_value -= this_read
while diff_value > 0:
idx += 1
chunk_start, chunk_len, file_pos, fill_data = \
self.offset_value_list[idx]
this_read = min(chunk_len, diff_value)
if file_pos is not None:
f_r.seek(file_pos, os.SEEK_SET)
yield f_r.read(this_read * self.block_size)
else:
yield fill_data * (this_read * (self.block_size >> 2))
diff_value -= this_read
@staticmethod
def image_header_info_check(header_info):
"""
Check for new messages of the header_info image.
:param header_info: header_info
:return:
"""
image_flag = True
# Sparse mirroring header ID. The magic value is fixed to 0xED26FF3A.
magic_info = header_info[0]
# major version number
major_version = header_info[1]
# minor version number
minor_version = header_info[2]
# Length of the header information.
# The value is fixed to 28 characters.
header_info_size = header_info[3]
# Header information size of the chunk.
# The length is fixed to 12 characters.
chunk_header_info_size = header_info[4]
# Number of bytes of a block. The default size is 4096.
block_size = header_info[5]
# Total number of blocks contained in the current image
# (number of blocks in a non-sparse image)
total_blocks = header_info[6]
# Total number of chunks contained in the current image
total_chunks = header_info[7]
if magic_info != SPARSE_IMAGE_MAGIC:
UPDATE_LOGGER.print_log(
"SparseImage head Magic should be 0xED26FF3A!")
image_flag = False
if major_version != 1 or minor_version != 0:
UPDATE_LOGGER.print_log(
"SparseImage Only supported major version with "
"minor version 1.0!")
image_flag = False
if header_info_size != 28:
UPDATE_LOGGER.print_log(
"SparseImage header info size must be 28! size: %u." %
header_info_size)
image_flag = False
if chunk_header_info_size != 12:
UPDATE_LOGGER.print_log(
"SparseImage Chunk header size mast to be 12! size: %u." %
chunk_header_info_size)
image_flag = False
ret_args = [block_size, chunk_header_info_size, header_info_size,
magic_info, total_blocks, total_chunks, image_flag]
return ret_args
| 41.408207
| 80
| 0.594409
|
2197fe5a2fe135c18814fd2b2145c6fead17c1f6
| 1,865
|
py
|
Python
|
utils/alias_multinomial.py
|
bhomass/lda2vec-pytorch
|
a57d40c9fee1ef88d7c27a52c428894d50b6fb5f
|
[
"MIT"
] | 123
|
2017-09-23T06:53:43.000Z
|
2022-02-24T15:06:42.000Z
|
utils/alias_multinomial.py
|
bhomass/lda2vec-pytorch
|
a57d40c9fee1ef88d7c27a52c428894d50b6fb5f
|
[
"MIT"
] | 17
|
2017-09-23T08:03:22.000Z
|
2021-02-15T00:43:41.000Z
|
utils/alias_multinomial.py
|
bhomass/lda2vec-pytorch
|
a57d40c9fee1ef88d7c27a52c428894d50b6fb5f
|
[
"MIT"
] | 39
|
2017-09-19T10:31:14.000Z
|
2022-01-10T02:50:17.000Z
|
import torch
import numpy as np
class AliasMultinomial(object):
"""
Fast sampling from a multinomial distribution.
https://hips.seas.harvard.edu/blog/2013/03/03/the-alias-method-efficient-sampling-with-many-discrete-outcomes/
"""
def __init__(self, probs):
"""
probs: a float tensor with shape [K].
It represents probabilities of different outcomes.
There are K outcomes. Probabilities sum to one.
"""
K = len(probs)
self.q = torch.zeros(K).cuda()
self.J = torch.LongTensor([0]*K).cuda()
# sort the data into the outcomes with probabilities
# that are larger and smaller than 1/K
smaller = []
larger = []
for kk, prob in enumerate(probs):
self.q[kk] = K*prob
if self.q[kk] < 1.0:
smaller.append(kk)
else:
larger.append(kk)
# loop though and create little binary mixtures that
# appropriately allocate the larger outcomes over the
# overall uniform mixture
while len(smaller) > 0 and len(larger) > 0:
small = smaller.pop()
large = larger.pop()
self.J[small] = large
self.q[large] = (self.q[large] - 1.0) + self.q[small]
if self.q[large] < 1.0:
smaller.append(large)
else:
larger.append(large)
self.q.clamp(0.0, 1.0)
self.J.clamp(0, K - 1)
def draw(self, N):
"""Draw N samples from the distribution."""
K = self.J.size(0)
r = torch.LongTensor(np.random.randint(0, K, size=N)).cuda()
q = self.q.index_select(0, r)
j = self.J.index_select(0, r)
b = torch.bernoulli(q)
oq = r.mul(b.long())
oj = j.mul((1 - b).long())
return oq + oj
| 30.57377
| 114
| 0.542627
|
3ba82a4f96eda89e03922367f52b11d32a5d242c
| 23,039
|
py
|
Python
|
tests.py
|
CaptainKanuk/scikit-rebate
|
16798854e7fbca553416409be8f9ff6f71204dac
|
[
"MIT"
] | 362
|
2017-01-12T12:59:34.000Z
|
2022-03-31T15:18:40.000Z
|
tests.py
|
pschmitt52/scikit-rebate
|
779538969fcc067d82660a233150094dba2b91c1
|
[
"MIT"
] | 46
|
2016-12-01T19:43:18.000Z
|
2021-12-16T18:59:39.000Z
|
tests.py
|
pschmitt52/scikit-rebate
|
779538969fcc067d82660a233150094dba2b91c1
|
[
"MIT"
] | 67
|
2017-01-04T20:36:33.000Z
|
2022-01-03T14:10:35.000Z
|
"""
scikit-rebate was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Pete Schmitt (pschmitt@upenn.edu)
- Ryan J. Urbanowicz (ryanurb@upenn.edu)
- Weixuan Fu (weixuanf@upenn.edu)
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from skrebate import ReliefF, SURF, SURFstar, MultiSURF, MultiSURFstar
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings('ignore')
np.random.seed(3249083)
genetic_data = pd.read_csv(
'data/GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1.tsv.gz', sep='\t', compression='gzip')
genetic_data = genetic_data.sample(frac=0.25)
genetic_data_cont_endpoint = pd.read_csv(
'data/GAMETES_Epistasis_2-Way_continuous_endpoint_a_20s_1600her_0.4__maf_0.2_EDM-2_01.tsv.gz', sep='\t', compression='gzip')
genetic_data_cont_endpoint.rename(columns={'Class': 'class'}, inplace=True)
genetic_data_cont_endpoint = genetic_data_cont_endpoint.sample(frac=0.25)
genetic_data_mixed_attributes = pd.read_csv(
'data/GAMETES_Epistasis_2-Way_mixed_attribute_a_20s_1600her_0.4__maf_0.2_EDM-2_01.tsv.gz', sep='\t', compression='gzip')
genetic_data_mixed_attributes.rename(columns={'Class': 'class'}, inplace=True)
genetic_data_mixed_attributes = genetic_data_mixed_attributes.sample(frac=0.25)
genetic_data_missing_values = pd.read_csv(
'data/GAMETES_Epistasis_2-Way_missing_values_0.1_a_20s_1600her_0.4__maf_0.2_EDM-2_01.tsv.gz', sep='\t', compression='gzip')
genetic_data_missing_values.rename(columns={'Class': 'class'}, inplace=True)
genetic_data_missing_values = genetic_data_missing_values.sample(frac=0.25)
genetic_data_multiclass = pd.read_csv('data/3Class_Datasets_Loc_2_01.txt', sep='\t')
genetic_data_multiclass.rename(columns={'Class': 'class'}, inplace=True)
genetic_data_multiclass = genetic_data_multiclass.sample(frac=0.25)
features, labels = genetic_data.drop('class', axis=1).values, genetic_data['class'].values
headers = list(genetic_data.drop("class", axis=1))
features_cont_endpoint, labels_cont_endpoint = genetic_data_cont_endpoint.drop(
'class', axis=1).values, genetic_data_cont_endpoint['class'].values
headers_cont_endpoint = list(genetic_data_cont_endpoint.drop("class", axis=1))
features_mixed_attributes, labels_mixed_attributes = genetic_data_mixed_attributes.drop(
'class', axis=1).values, genetic_data_mixed_attributes['class'].values
headers_mixed_attributes = list(genetic_data_mixed_attributes.drop("class", axis=1))
features_missing_values, labels_missing_values = genetic_data_missing_values.drop(
'class', axis=1).values, genetic_data_missing_values['class'].values
headers_missing_values = list(genetic_data_missing_values.drop("class", axis=1))
features_multiclass, labels_multiclass = genetic_data_multiclass.drop(
'class', axis=1).values, genetic_data_multiclass['class'].values
headers_multiclass = list(genetic_data_multiclass.drop("class", axis=1))
# Initialization tests--------------------------------------------------------------------------------
def test_relieff_init():
"""Check: ReliefF constructor stores custom values correctly"""
clf = ReliefF(n_features_to_select=7,
n_neighbors=500,
discrete_threshold=20,
verbose=True,
n_jobs=3)
assert clf.n_features_to_select == 7
assert clf.n_neighbors == 500
assert clf.discrete_threshold == 20
assert clf.verbose == True
assert clf.n_jobs == 3
def test_surf_init():
"""Check: SURF, SURF*, and MultiSURF constructors store custom values correctly"""
clf = SURF(n_features_to_select=7,
discrete_threshold=20,
verbose=True,
n_jobs=3)
assert clf.n_features_to_select == 7
assert clf.discrete_threshold == 20
assert clf.verbose == True
assert clf.n_jobs == 3
# Basic Parallelization Tests and Core binary data and discrete feature data testing (Focus on ReliefF only for efficiency)------------------------------------------------------------
def test_relieff_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=10),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.7
def test_relieff_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): ReliefF works in a sklearn pipeline when ReliefF is parallelized"""
# Note that the rebate algorithm cannot be parallelized with both the random forest and the cross validation all at once. If the rebate algorithm is parallelized, the cross-validation scoring cannot be.
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=10, n_jobs=-1),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3)) > 0.7
def test_relieffpercent_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): ReliefF with % neighbors works in a sklearn pipeline"""
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=0.1),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.7
def test_surf_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): SURF works in a sklearn pipeline"""
np.random.seed(240932)
clf = make_pipeline(SURF(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.7
def test_surf_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): SURF works in a sklearn pipeline when SURF is parallelized"""
np.random.seed(240932)
clf = make_pipeline(SURF(n_features_to_select=2, n_jobs=-1),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3)) > 0.7
def test_surfstar_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): SURF* works in a sklearn pipelined"""
np.random.seed(9238745)
clf = make_pipeline(SURFstar(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.7
def test_surfstar_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): SURF* works in a sklearn pipeline when SURF* is parallelized"""
np.random.seed(9238745)
clf = make_pipeline(SURFstar(n_features_to_select=2, n_jobs=-1),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3)) > 0.7
def test_multisurfstar_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): MultiSURF* works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.7
def test_multisurfstar_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): MultiSURF* works in a sklearn pipeline when MultiSURF* is parallelized"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2, n_jobs=-1),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3)) > 0.7
def test_multisurf_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): MultiSURF works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURF(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.7
def test_multisurf_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): MultiSURF works in a sklearn pipeline when MultiSURF is parallelized"""
np.random.seed(320931)
clf = make_pipeline(MultiSURF(n_features_to_select=2, n_jobs=-1),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features, labels, cv=3)) > 0.7
def test_turf_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): TuRF with ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
# clf = make_pipeline(TuRF(core_algorithm="ReliefF", n_features_to_select=2, pct=0.5, n_neighbors=100),
# RandomForestClassifier(n_estimators=100, n_jobs=-1))
#
# assert np.mean(cross_val_score(clf, features, labels, fit_params={
# 'turf__headers': headers}, cv=3, n_jobs=-1)) > 0.7
def test_turf_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): TuRF with ReliefF works in a sklearn pipeline when TuRF is parallelized"""
np.random.seed(49082)
# clf = make_pipeline(TuRF(core_algorithm="ReliefF", n_features_to_select=2, pct=0.5, n_neighbors=100, n_jobs=-1),
# RandomForestClassifier(n_estimators=100, n_jobs=-1))
#
# assert np.mean(cross_val_score(clf, features, labels, fit_params={
# 'turf__headers': headers}, cv=3)) > 0.7
def test_vlsrelief_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): VLSRelief with ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
# clf = make_pipeline(VLSRelief(core_algorithm="ReliefF", n_features_to_select=2, n_neighbors=100),
# RandomForestClassifier(n_estimators=100, n_jobs=-1))
#
# assert np.mean(cross_val_score(clf, features, labels, fit_params={
# 'vlsrelief__headers': headers}, cv=3, n_jobs=-1)) > 0.7
def test_vlsrelief_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): VLSRelief with ReliefF works in a sklearn pipeline when VLSRelief is parallelized"""
np.random.seed(49082)
# clf = make_pipeline(VLSRelief(core_algorithm="ReliefF", n_features_to_select=2, n_neighbors=100, n_jobs=-1),
# RandomForestClassifier(n_estimators=100, n_jobs=-1))
#
# assert np.mean(cross_val_score(clf, features, labels, fit_params={
# 'vlsrelief__headers': headers}, cv=3)) > 0.7
def test_iterrelief_pipeline():
"""Check: Data (Binary Endpoint, Discrete Features): IterRelief with ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
# clf = make_pipeline(IterRelief(core_algorithm="ReliefF", n_features_to_select=2, n_neighbors=100),
# RandomForestClassifier(n_estimators=100, n_jobs=-1))
#
# assert np.mean(cross_val_score(clf, features, labels, cv=3, n_jobs=-1)) > 0.5
def test_iterrelief_pipeline_parallel():
"""Check: Data (Binary Endpoint, Discrete Features): IterRelief with ReliefF works in a sklearn pipeline when VLSRelief is parallelized"""
np.random.seed(49082)
# clf = make_pipeline(IterRelief(core_algorithm="ReliefF", n_features_to_select=2, n_neighbors=100, n_jobs=-1),
# RandomForestClassifier(n_estimators=100, n_jobs=-1))
#
# assert np.mean(cross_val_score(clf, features, labels, cv=3)) > 0.5
# Test Multiclass Data ------------------------------------------------------------------------------------
def test_relieff_pipeline_multiclass():
"""Check: Data (Multiclass Endpoint): ReliefF works in a sklearn pipeline """
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=10),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_multiclass,
labels_multiclass, cv=3, n_jobs=-1)) > 0.7
def test_surf_pipeline_multiclass():
"""Check: Data (Multiclass Endpoint): SURF works in a sklearn pipeline"""
np.random.seed(240932)
clf = make_pipeline(SURF(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_multiclass,
labels_multiclass, cv=3, n_jobs=-1)) > 0.7
def test_surfstar_pipeline_multiclass():
"""Check: Data (Multiclass Endpoint): SURF* works in a sklearn pipeline"""
np.random.seed(9238745)
clf = make_pipeline(SURFstar(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_multiclass,
labels_multiclass, cv=3, n_jobs=-1)) > 0.7
def test_multisurfstar_pipeline_multiclass():
"""Check: Data (Multiclass Endpoint): MultiSURF* works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_multiclass,
labels_multiclass, cv=3, n_jobs=-1)) > 0.7
def test_multisurf_pipeline_multiclass():
"""Check: Data (Multiclass Endpoint): MultiSURF works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURF(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_multiclass,
labels_multiclass, cv=3, n_jobs=-1)) > 0.7
# Test Continuous Endpoint Data ------------------------------------------------------------------------------------
def test_relieff_pipeline_cont_endpoint():
"""Check: Data (Continuous Endpoint): ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=10),
RandomForestRegressor(n_estimators=100, n_jobs=-1))
assert abs(np.mean(cross_val_score(clf, features_cont_endpoint,
labels_cont_endpoint, cv=3, n_jobs=-1))) < 0.5
def test_surf_pipeline_cont_endpoint():
"""Check: Data (Continuous Endpoint): SURF works in a sklearn pipeline"""
np.random.seed(240932)
clf = make_pipeline(SURF(n_features_to_select=2),
RandomForestRegressor(n_estimators=100, n_jobs=-1))
assert abs(np.mean(cross_val_score(clf, features_cont_endpoint,
labels_cont_endpoint, cv=3, n_jobs=-1))) < 0.5
def test_surfstar_pipeline_cont_endpoint():
"""Check: Data (Continuous Endpoint): SURF* works in a sklearn pipeline"""
np.random.seed(9238745)
clf = make_pipeline(SURFstar(n_features_to_select=2),
RandomForestRegressor(n_estimators=100, n_jobs=-1))
assert abs(np.mean(cross_val_score(clf, features_cont_endpoint,
labels_cont_endpoint, cv=3, n_jobs=-1))) < 0.5
def test_multisurfstar_pipeline_cont_endpoint():
"""Check: Data (Continuous Endpoint): MultiSURF* works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2),
RandomForestRegressor(n_estimators=100, n_jobs=-1))
assert abs(np.mean(cross_val_score(clf, features_cont_endpoint,
labels_cont_endpoint, cv=3, n_jobs=-1))) < 0.5
def test_multisurf_pipeline_cont_endpoint():
"""Check: Data (Continuous Endpoint): MultiSURF works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURF(n_features_to_select=2),
RandomForestRegressor(n_estimators=100, n_jobs=-1))
assert abs(np.mean(cross_val_score(clf, features_cont_endpoint,
labels_cont_endpoint, cv=3, n_jobs=-1))) < 0.5
# Test Mixed Attribute Data ------------------------------------------------------------------------------------
def test_relieff_pipeline_mixed_attributes():
"""Check: Data (Mixed Attributes): ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=10),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_mixed_attributes,
labels_mixed_attributes, cv=3, n_jobs=-1)) > 0.7
def test_surf_pipeline_mixed_attributes():
"""Check: Data (Mixed Attributes): SURF works in a sklearn pipeline"""
np.random.seed(240932)
clf = make_pipeline(SURF(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_mixed_attributes,
labels_mixed_attributes, cv=3, n_jobs=-1)) > 0.7
def test_surfstar_pipeline_mixed_attributes():
"""Check: Data (Mixed Attributes): SURF* works in a sklearn pipeline"""
np.random.seed(9238745)
clf = make_pipeline(SURFstar(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_mixed_attributes,
labels_mixed_attributes, cv=3, n_jobs=-1)) > 0.7
def test_multisurfstar_pipeline_mixed_attributes():
"""Check: Data (Mixed Attributes): MultiSURF* works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_mixed_attributes,
labels_mixed_attributes, cv=3, n_jobs=-1)) > 0.7
def test_multisurf_pipeline_mixed_attributes():
"""Check: Data (Mixed Attributes): MultiSURF works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURF(n_features_to_select=2),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_mixed_attributes,
labels_mixed_attributes, cv=3, n_jobs=-1)) > 0.7
# Test Missing Value Data ------------------------------------------------------------------------------------
def test_relieff_pipeline_missing_values():
"""Check: Data (Missing Values): ReliefF works in a sklearn pipeline"""
np.random.seed(49082)
clf = make_pipeline(ReliefF(n_features_to_select=2, n_neighbors=10),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_missing_values,
labels_missing_values, cv=3, n_jobs=-1)) > 0.7
def test_surf_pipeline_missing_values():
"""Check: Data (Missing Values): SURF works in a sklearn pipeline"""
np.random.seed(240932)
clf = make_pipeline(SURF(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_missing_values,
labels_missing_values, cv=3, n_jobs=-1)) > 0.7
def test_surfstar_pipeline_missing_values():
"""Check: Data (Missing Values): SURF* works in a sklearn pipeline"""
np.random.seed(9238745)
clf = make_pipeline(SURFstar(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_missing_values,
labels_missing_values, cv=3, n_jobs=-1)) > 0.7
def test_multisurfstar_pipeline_missing_values():
"""Check: Data (Missing Values): MultiSURF* works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURFstar(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_missing_values,
labels_missing_values, cv=3, n_jobs=-1)) > 0.7
def test_multisurf_pipeline_missing_values():
"""Check: Data (Missing Values): MultiSURF works in a sklearn pipeline"""
np.random.seed(320931)
clf = make_pipeline(MultiSURF(n_features_to_select=2),
Imputer(),
RandomForestClassifier(n_estimators=100, n_jobs=-1))
assert np.mean(cross_val_score(clf, features_missing_values,
labels_missing_values, cv=3, n_jobs=-1)) > 0.7
| 44.136015
| 208
| 0.660055
|
612cb9a0641507a1ac2aa472cfc45b68d83637c1
| 14,551
|
py
|
Python
|
test/lib/mayaUsd/fileio/testDuplicateAs.py
|
sun-frog/maya-usd
|
561fc867e192e426749c9df59807cc836d16a2c2
|
[
"Apache-2.0"
] | null | null | null |
test/lib/mayaUsd/fileio/testDuplicateAs.py
|
sun-frog/maya-usd
|
561fc867e192e426749c9df59807cc836d16a2c2
|
[
"Apache-2.0"
] | null | null | null |
test/lib/mayaUsd/fileio/testDuplicateAs.py
|
sun-frog/maya-usd
|
561fc867e192e426749c9df59807cc836d16a2c2
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2021 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fixturesUtils
import mayaUsd_createStageWithNewLayer
from usdUtils import createSimpleXformScene
import mayaUsd.lib
import mayaUtils
import mayaUsd.ufe
from pxr import UsdGeom, Gf
from maya import cmds
from maya import standalone
from maya.api import OpenMaya as om
import ufe
import unittest
from testUtils import assertVectorAlmostEqual
import os
class DuplicateAsTestCase(unittest.TestCase):
'''Test duplicate as: duplicate USD data into Maya and vice versa.
'''
pluginsLoaded = False
@classmethod
def setUpClass(cls):
fixturesUtils.readOnlySetUpClass(__file__, loadPlugin=False)
if not cls.pluginsLoaded:
cls.pluginsLoaded = mayaUtils.isMayaUsdPluginLoaded()
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
def setUp(self):
cmds.file(new=True, force=True)
def testDuplicateAsMaya(self):
'''Duplicate a USD transform hierarchy to Maya.'''
(_, _, aXlation, aUsdUfePathStr, aUsdUfePath, _, _,
bXlation, bUsdUfePathStr, bUsdUfePath, _) = \
createSimpleXformScene()
# Duplicate USD data as Maya data, placing it under the root.
with mayaUsd.lib.OpUndoItemList():
self.assertTrue(mayaUsd.lib.PrimUpdaterManager.duplicate(
aUsdUfePathStr, '|world'))
# Should now have two transform nodes in the Maya scene: the path
# components in the second segment of the aUsdItem and bUsdItem will
# now be under the Maya world.
aMayaPathStr = str(aUsdUfePath.segments[1]).replace('/', '|')
bMayaPathStr = str(bUsdUfePath.segments[1]).replace('/', '|')
self.assertEqual(cmds.ls(aMayaPathStr, long=True)[0], aMayaPathStr)
self.assertEqual(cmds.ls(bMayaPathStr, long=True)[0], bMayaPathStr)
# Translation should have been copied over to the Maya data model.
self.assertEqual(cmds.getAttr(aMayaPathStr + '.translate')[0],
aXlation)
self.assertEqual(cmds.getAttr(bMayaPathStr + '.translate')[0],
bXlation)
def testDuplicateAsMayaUndoRedo(self):
'''Duplicate a USD transform hierarchy to Maya and then undo and redo the command.'''
(_, _, aXlation, aUsdUfePathStr, aUsdUfePath, _, _,
bXlation, bUsdUfePathStr, bUsdUfePath, _) = \
createSimpleXformScene()
# Capture selection before duplicate.
previousSn = cmds.ls(sl=True, ufe=True, long=True)
# Duplicate USD data as Maya data, placing it under the root.
cmds.mayaUsdDuplicate(aUsdUfePathStr, '|world')
def verifyDuplicate():
# Should now have two transform nodes in the Maya scene: the path
# components in the second segment of the aUsdItem and bUsdItem will
# now be under the Maya world.
aMayaPathStr = str(aUsdUfePath.segments[1]).replace('/', '|')
bMayaPathStr = str(bUsdUfePath.segments[1]).replace('/', '|')
self.assertEqual(cmds.ls(aMayaPathStr, long=True)[0], aMayaPathStr)
self.assertEqual(cmds.ls(bMayaPathStr, long=True)[0], bMayaPathStr)
# Translation should have been copied over to the Maya data model.
self.assertEqual(cmds.getAttr(aMayaPathStr + '.translate')[0],
aXlation)
self.assertEqual(cmds.getAttr(bMayaPathStr + '.translate')[0],
bXlation)
# Selection is on the duplicate.
sn = cmds.ls(sl=True, ufe=True, long=True)
self.assertEqual(len(sn), 1)
self.assertEqual(sn[0], aMayaPathStr)
verifyDuplicate()
cmds.undo()
def verifyDuplicateIsGone():
bMayaPathStr = str(bUsdUfePath.segments[1]).replace('/', '|')
self.assertListEqual(cmds.ls(bMayaPathStr, long=True), [])
self.assertEqual(cmds.ls(sl=True, ufe=True, long=True), previousSn)
verifyDuplicateIsGone()
cmds.redo()
verifyDuplicate()
def testDuplicateAsUsd(self):
'''Duplicate a Maya transform hierarchy to USD.'''
# Create a hierarchy. Because group1 is selected upon creation, group2
# will be its parent.
group1 = cmds.createNode('transform')
group2 = cmds.group()
self.assertEqual(cmds.listRelatives(group1, parent=True)[0], group2)
cmds.setAttr(group1 + '.translate', 1, 2, 3)
cmds.setAttr(group2 + '.translate', -4, -5, -6)
# Create a stage to receive the USD duplicate.
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
# Duplicate Maya data as USD data. As of 17-Nov-2021 no single-segment
# path handler registered to UFE for Maya path strings, so use absolute
# path.
with mayaUsd.lib.OpUndoItemList():
self.assertTrue(mayaUsd.lib.PrimUpdaterManager.duplicate(
cmds.ls(group2, long=True)[0], psPathStr))
# Maya hierarchy should be duplicated in USD.
usdGroup2PathStr = psPathStr + ',/' + group2
usdGroup1PathStr = usdGroup2PathStr + '/' + group1
usdGroup2Path = ufe.PathString.path(usdGroup2PathStr)
usdGroup1Path = ufe.PathString.path(usdGroup1PathStr)
# group1 is the child of group2
usdGroup1 = ufe.Hierarchy.createItem(usdGroup1Path)
usdGroup2 = ufe.Hierarchy.createItem(usdGroup2Path)
usdGroup1Hier = ufe.Hierarchy.hierarchy(usdGroup1)
usdGroup2Hier = ufe.Hierarchy.hierarchy(usdGroup2)
self.assertEqual(usdGroup2, usdGroup1Hier.parent())
self.assertEqual(len(usdGroup2Hier.children()), 1)
self.assertEqual(usdGroup1, usdGroup2Hier.children()[0])
# Translations have been preserved.
usdGroup1T3d = ufe.Transform3d.transform3d(usdGroup1)
usdGroup2T3d = ufe.Transform3d.transform3d(usdGroup2)
self.assertEqual([1, 2, 3], usdGroup1T3d.translation().vector)
self.assertEqual([-4, -5, -6], usdGroup2T3d.translation().vector)
def testDuplicateAsNonRootUsd(self):
'''Duplicate a Maya transform hierarchy to a non-root node in USD.'''
# Create a hierarchy. Because group1 is selected upon creation, group2
# will be its parent.
group1 = cmds.createNode('transform')
group2 = cmds.group()
self.assertEqual(cmds.listRelatives(group1, parent=True)[0], group2)
cmds.setAttr(group1 + '.translate', 1, 2, 3)
cmds.setAttr(group2 + '.translate', -4, -5, -6)
# Create a stage to receive the USD duplicate, with a prim that will be the parent.
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
stage = mayaUsd.lib.GetPrim(psPathStr).GetStage()
parentName = 'future_parent'
aPrim = stage.DefinePrim('/' + parentName, 'Xform')
parentPathStr = psPathStr + ',/' + parentName
# Duplicate Maya data as USD data. As of 17-Nov-2021 no single-segment
# path handler registered to UFE for Maya path strings, so use absolute
# path.
with mayaUsd.lib.OpUndoItemList():
self.assertTrue(mayaUsd.lib.PrimUpdaterManager.duplicate(
cmds.ls(group2, long=True)[0], parentPathStr))
# Maya hierarchy should be duplicated in USD.
usdGroup2PathStr = psPathStr + ',/' + parentName + '/' + group2
usdGroup1PathStr = usdGroup2PathStr + '/' + group1
usdGroup2Path = ufe.PathString.path(usdGroup2PathStr)
usdGroup1Path = ufe.PathString.path(usdGroup1PathStr)
# group1 is the child of group2
usdGroup1 = ufe.Hierarchy.createItem(usdGroup1Path)
self.assertIsNotNone(usdGroup1)
usdGroup2 = ufe.Hierarchy.createItem(usdGroup2Path)
self.assertIsNotNone(usdGroup2)
usdGroup1Hier = ufe.Hierarchy.hierarchy(usdGroup1)
usdGroup2Hier = ufe.Hierarchy.hierarchy(usdGroup2)
self.assertEqual(usdGroup2, usdGroup1Hier.parent())
self.assertEqual(len(usdGroup2Hier.children()), 1)
self.assertEqual(usdGroup1, usdGroup2Hier.children()[0])
# Translations have been preserved.
usdGroup1T3d = ufe.Transform3d.transform3d(usdGroup1)
usdGroup2T3d = ufe.Transform3d.transform3d(usdGroup2)
self.assertEqual([1, 2, 3], usdGroup1T3d.translation().vector)
self.assertEqual([-4, -5, -6], usdGroup2T3d.translation().vector)
def testDuplicateAsUsdSameName(self):
'''Duplicate a Maya transform to USD when USD already has a prim with that name.'''
# Create a Maya transform named 'A'.
mayaA = cmds.createNode('transform', name='A')
cmds.setAttr(mayaA + '.translate', 1, 2, 3)
# Create a stage to receive the USD duplicate, with a prim of the same name.
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
stage = mayaUsd.lib.GetPrim(psPathStr).GetStage()
aPrim = stage.DefinePrim('/A', 'Xform')
# Duplicate Maya data as USD data. As of 17-Nov-2021 no single-segment
# path handler registered to UFE for Maya path strings, so use absolute
# path.
with mayaUsd.lib.OpUndoItemList():
self.assertTrue(mayaUsd.lib.PrimUpdaterManager.duplicate(
cmds.ls(mayaA, long=True)[0], psPathStr))
# Maya hierarchy should be duplicated in USD, but with a numeric suffix due to the collision.
usdNewAPathStr = psPathStr + ',/' + mayaA + '1'
usdNewAPath = ufe.PathString.path(usdNewAPathStr)
# Translations have been preserved.
usdNewA = ufe.Hierarchy.createItem(usdNewAPath)
usdNewAT3d = ufe.Transform3d.transform3d(usdNewA)
self.assertEqual([1, 2, 3], usdNewAT3d.translation().vector)
def testDuplicateAsUsdUndoRedo(self):
'''Duplicate a Maya transform hierarchy to USD and then undo and redo the command.'''
# Create a hierarchy. Because group1 is selected upon creation, group2
# will be its parent.
group1 = cmds.createNode('transform')
group2 = cmds.group()
self.assertEqual(cmds.listRelatives(group1, parent=True)[0], group2)
cmds.setAttr(group1 + '.translate', 1, 2, 3)
cmds.setAttr(group2 + '.translate', -4, -5, -6)
# Create a stage to receive the USD duplicate.
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
# Capture selection before duplicate.
previousSn = cmds.ls(sl=True, ufe=True, long=True)
# Duplicate Maya data as USD data. As of 17-Nov-2021 no single-segment
# path handler registered to UFE for Maya path strings, so use absolute
# path.
cmds.mayaUsdDuplicate(cmds.ls(group2, long=True)[0], psPathStr)
def verifyDuplicate():
# Maya hierarchy should be duplicated in USD.
usdGroup2PathStr = psPathStr + ',/' + group2
usdGroup1PathStr = usdGroup2PathStr + '/' + group1
usdGroup2Path = ufe.PathString.path(usdGroup2PathStr)
usdGroup1Path = ufe.PathString.path(usdGroup1PathStr)
# group1 is the child of group2
usdGroup1 = ufe.Hierarchy.createItem(usdGroup1Path)
usdGroup2 = ufe.Hierarchy.createItem(usdGroup2Path)
usdGroup1Hier = ufe.Hierarchy.hierarchy(usdGroup1)
usdGroup2Hier = ufe.Hierarchy.hierarchy(usdGroup2)
self.assertEqual(usdGroup2, usdGroup1Hier.parent())
self.assertEqual(len(usdGroup2Hier.children()), 1)
self.assertEqual(usdGroup1, usdGroup2Hier.children()[0])
# Translations have been preserved.
usdGroup1T3d = ufe.Transform3d.transform3d(usdGroup1)
usdGroup2T3d = ufe.Transform3d.transform3d(usdGroup2)
self.assertEqual([1, 2, 3], usdGroup1T3d.translation().vector)
self.assertEqual([-4, -5, -6], usdGroup2T3d.translation().vector)
# Selection is on duplicate.
sn = cmds.ls(sl=True, ufe=True, long=True)
self.assertEqual(len(sn), 1)
self.assertEqual(sn[0], usdGroup2PathStr)
verifyDuplicate()
cmds.undo()
def verifyDuplicateIsGone():
# Maya hierarchy should no longer be duplicated in USD.
usdGroup2PathStr = psPathStr + ',/' + group2
usdGroup2Path = ufe.PathString.path(usdGroup2PathStr)
usdGroup2 = ufe.Hierarchy.createItem(usdGroup2Path)
self.assertIsNone(usdGroup2)
self.assertEqual(cmds.ls(sl=True, ufe=True, long=True), previousSn)
verifyDuplicateIsGone()
cmds.redo()
verifyDuplicate()
def testDuplicateWithoutMaterials(self):
'''Duplicate a Maya sphere without merging the materials.'''
# Create a sphere.
sphere = cmds.polySphere(r=1)
# Create a stage to receive the USD duplicate.
psPathStr = mayaUsd_createStageWithNewLayer.createStageWithNewLayer()
stage = mayaUsd.lib.GetPrim(psPathStr).GetStage()
# Duplicate Maya data as USD data with materials
cmds.mayaUsdDuplicate(cmds.ls(sphere, long=True)[0], psPathStr)
# Verify that the copied sphere has a look (material) prim.
looksPrim = stage.GetPrimAtPath("/pSphere1/Looks")
self.assertTrue(looksPrim.IsValid())
# Undo duplicate to USD.
cmds.undo()
# Duplicate Maya data as USD data without materials
cmds.mayaUsdDuplicate(cmds.ls(sphere, long=True)[0], psPathStr, exportOptions='shadingMode=none')
# Verify that the copied sphere does not have a look (material) prim.
looksPrim = stage.GetPrimAtPath("/pSphere1/Looks")
self.assertFalse(looksPrim.IsValid())
if __name__ == '__main__':
unittest.main(verbosity=2)
| 41.10452
| 105
| 0.656175
|
afdff1f7060e387c734f88cb8c6b7e9e877f9116
| 2,167
|
py
|
Python
|
chapter_07/TargetEncode.py
|
MathMachado/Data-Analysis-and-Machine-Learning-with-Kaggle
|
928e670cb13241fb79a527cd42efed9ddf9b2117
|
[
"MIT"
] | null | null | null |
chapter_07/TargetEncode.py
|
MathMachado/Data-Analysis-and-Machine-Learning-with-Kaggle
|
928e670cb13241fb79a527cd42efed9ddf9b2117
|
[
"MIT"
] | 1
|
2021-12-28T21:58:21.000Z
|
2021-12-28T21:58:21.000Z
|
chapter_07/TargetEncode.py
|
MathMachado/Data-Analysis-and-Machine-Learning-with-Kaggle
|
928e670cb13241fb79a527cd42efed9ddf9b2117
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
class TargetEncode(BaseEstimator, TransformerMixin):
def __init__(self, categories='auto', k=1, f=1,
noise_level=0, random_state=None):
self.categories = categories
self.k = k
self.f = f
self.noise_level = noise_level
self.encodings = dict()
self.prior = None
self.random_state = random_state
def add_noise(self, series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def fit(self, X, y=None):
if type(self.categories)==str:
self.categories = np.where(X.dtypes == type(object()))[0]
temp = X.iloc[:, self.categories].copy()
temp['target'] = y
self.prior = np.mean(y)
for variable in X.columns[categories]:
avg = temp.groupby(by=variable)['target'].agg(['mean', 'count'])
# Compute smoothing (see formula 4 on paper)
smoothing = 1 / (1 + np.exp(-(avg['count'] - self.k) / self.f))
# The bigger the count the less full_avg is taken into account
self.encodings[variable] = dict(self.prior * (1 - smoothing) + avg['mean'] * smoothing)
return self
def transform(self, X):
Xt = X.copy()
for variable in Xt.columns[categories]:
Xt[variable].replace(self.encodings[variable], inplace=True)
unknown_value = {value:self.prior for value in X[variable].unique()
if value not in self.encodings[variable].keys()}
if len(unknown_value) > 0:
Xt[variable].replace(unknown_value, inplace=True)
Xt[variable] = Xt[variable].astype(float)
if self.noise_level > 0:
if self.random_state is not None:
np.random.seed(self.random_state)
Xt[variable] = self.add_noise(Xt[variable], self.noise_level)
return Xt
def fit_transform(self, X, y=None):
self.fit(X, y)
return self.transform(X)
| 40.12963
| 99
| 0.576373
|
407a389359b9260c8f2cc894f4468ed7367dba53
| 1,116
|
py
|
Python
|
base/tests/test_utils.py
|
bpatyi/simpleCRM
|
bf74f0e0d783ea4538fb96b6790474d991175b51
|
[
"MIT"
] | 2
|
2016-10-03T08:35:07.000Z
|
2016-10-04T07:22:20.000Z
|
base/tests/test_utils.py
|
bpatyi/simpleCRM
|
bf74f0e0d783ea4538fb96b6790474d991175b51
|
[
"MIT"
] | null | null | null |
base/tests/test_utils.py
|
bpatyi/simpleCRM
|
bf74f0e0d783ea4538fb96b6790474d991175b51
|
[
"MIT"
] | null | null | null |
import googlemaps
from django.test import TestCase
from base.utils import GeoCoder
from base.models import GeoCoderLog
class GeoCoderTester(TestCase):
def test_invalid_address(self):
geocoder = GeoCoder()
geocoder.geocode('')
address = geocoder.get_address()
self.assertEqual(geocoder.has_error(), True)
self.assertIsInstance(geocoder.get_error(), googlemaps.exceptions.HTTPError)
self.assertEqual(geocoder.get_error().status_code, 400)
self.assertEqual(address.is_valid(), False)
def test_address(self):
geocoder = GeoCoder()
geocoder.geocode('Budapest Király utca 46.')
address = geocoder.get_address()
self.assertEqual(address.is_valid(), True)
self.assertEqual(geocoder.has_error(), False)
self.assertEqual(address.street_number, '46')
self.assertEqual(address.postal_code, '1061')
self.assertEqual(address.locality, 'Budapest')
self.assertEqual(address.route, 'Király utca')
self.assertEqual(address.formatted_address, 'Budapest, Király u. 46, 1061 Hungary')
| 33.818182
| 91
| 0.698029
|
53e377e2d50eb21b8e66122d0ff8c76863e0c1cb
| 2,563
|
py
|
Python
|
resources/Lib/myCollections.py
|
varunrai/Kodi-RF-Events
|
c6b86c1ae91dcd5a241ae2ae40276f289400938a
|
[
"Apache-2.0"
] | null | null | null |
resources/Lib/myCollections.py
|
varunrai/Kodi-RF-Events
|
c6b86c1ae91dcd5a241ae2ae40276f289400938a
|
[
"Apache-2.0"
] | null | null | null |
resources/Lib/myCollections.py
|
varunrai/Kodi-RF-Events
|
c6b86c1ae91dcd5a241ae2ae40276f289400938a
|
[
"Apache-2.0"
] | null | null | null |
class histlist(object):
'''
histlist
The history list (histlist) is a list that tracks the history of a
value. The history list can also notify when the value is changed,
This is referred to as a step. There are two kinds of steps that can
be tracked. A regular step (value is changed) and a delayed step. A
delayed step requires that the value be changed a specified amount of
consecutive times.
On initialization, the hist list class requires either a set of initial
conditions of a single value that will be set for all the initial
conditions. If a single value is given, 4 steps of history will be
tracked. If initial conditions are given, one step will be tracked for
each entry in the array of initial conditions.
'''
__min_size__ = 4
def __init__(self, IC):
# create initial states
if type(IC) == int or type(IC) == float or type(IC) == bool:
# single value input, spread across all IC's
self.state = list()
self.state = [IC for ind in range(self.__min_size__)]
elif type(IC) == list or type(IC) == tuple:
# IC set was input
IC = list(IC)
if len(IC) < self.__min_size__:
raise ValueError('Input must be of at least length '
+ str(self.__min_size__))
else:
self.state = IC
else:
raise ValueError('Input must be of type bool, int, float, '
+ 'list, or tuple')
self.__len__ = len(self.state)
def set(self, val):
self.state = [val] + [self.state[ind] for ind in
xrange(0, self.__len__ - 1, 1)]
def get(self, step=0):
return self.state[step]
def __str__(self):
return "histlist(" + str(self.state) + ")"
def __iter__(self):
return self.state.__iter__()
def step(self):
return self.get(0) != self.get(1)
def delayed_step(self, inc=2):
step = True
ind = 1
while step and ind <= inc:
step = step and (self.get(ind - 1) != self.get(ind))
ind += 1
return step
def step_on(self):
return self.step() and (self.get(0) > 0)
def delayed_step_on(self, inc=2):
return self.delayed_step(inc) and (self.get(0) > 0)
def step_off(self):
return self.step() and (self.get(0) == 0)
def delayed_step_off(self, inc=2):
return self.delayed_step(inc) and (self.get(0) == 0)
| 32.858974
| 75
| 0.579789
|
c695c04052f9d0d5798c79b58367862986a7e192
| 393,562
|
py
|
Python
|
pandas/core/frame.py
|
wjandrea/pandas
|
df8acf4201466e77572cdb8125683e75c4715f91
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/frame.py
|
wjandrea/pandas
|
df8acf4201466e77572cdb8125683e75c4715f91
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/frame.py
|
wjandrea/pandas
|
df8acf4201466e77572cdb8125683e75c4715f91
|
[
"BSD-3-Clause"
] | null | null | null |
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import annotations
import collections
from collections import abc
import datetime
import functools
from io import StringIO
import itertools
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
Iterable,
Iterator,
Literal,
Sequence,
cast,
overload,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import (
algos as libalgos,
lib,
properties,
)
from pandas._libs.hashtable import duplicated
from pandas._libs.lib import (
NoDefault,
no_default,
)
from pandas._typing import (
AggFuncType,
AnyArrayLike,
ArrayLike,
Axes,
Axis,
ColspaceArgType,
CompressionOptions,
Dtype,
DtypeObj,
FilePath,
FillnaOptions,
FloatFormatType,
FormattersType,
Frequency,
IgnoreRaise,
IndexKeyFunc,
IndexLabel,
Level,
NaPosition,
PythonFuncType,
ReadBuffer,
Renamer,
Scalar,
SortKind,
StorageOptions,
Suffixes,
TimedeltaConvertibleTypes,
TimestampConvertibleTypes,
ValueKeyFunc,
WriteBuffer,
npt,
)
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
deprecate_nonkeyword_arguments,
doc,
rewrite_axis_style_signature,
)
from pandas.util._exceptions import find_stack_level
from pandas.util._validators import (
validate_ascending,
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
LossySetitemError,
can_hold_element,
construct_1d_arraylike_from_scalar,
construct_2d_arraylike_from_scalar,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_box_native,
maybe_downcast_to_dtype,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
infer_dtype_from_object,
is_1d_only_ea_dtype,
is_bool_dtype,
is_dataclass,
is_datetime64_any_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
common as com,
nanops,
ops,
)
from pandas.core.accessor import CachedAccessor
from pandas.core.apply import (
reconstruct_func,
relabel_result,
)
from pandas.core.array_algos.take import take_2d_multi
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.construction import (
extract_array,
sanitize_array,
sanitize_masked_array,
)
from pandas.core.generic import NDFrame
from pandas.core.indexers import check_key_length
from pandas.core.indexes.api import (
DatetimeIndex,
Index,
PeriodIndex,
default_index,
ensure_index,
ensure_index_from_sequences,
)
from pandas.core.indexes.multi import (
MultiIndex,
maybe_droplevels,
)
from pandas.core.indexing import (
check_bool_indexer,
check_deprecated_indexers,
convert_to_index_sliceable,
)
from pandas.core.internals import (
ArrayManager,
BlockManager,
)
from pandas.core.internals.construction import (
arrays_to_mgr,
dataclasses_to_dicts,
dict_to_mgr,
mgr_to_mgr,
ndarray_to_mgr,
nested_data_to_arrays,
rec_array_to_mgr,
reorder_arrays,
to_arrays,
treat_as_nested,
)
from pandas.core.reshape.melt import melt
from pandas.core.series import Series
from pandas.core.shared_docs import _shared_docs
from pandas.core.sorting import (
get_group_index,
lexsort_indexer,
nargsort,
)
from pandas.io.common import get_handle
from pandas.io.formats import (
console,
format as fmt,
)
from pandas.io.formats.info import (
INFO_DOCSTRING,
DataFrameInfo,
frame_sub_kwargs,
)
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.exchange.dataframe_protocol import DataFrame as DataFrameXchg
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.core.internals import SingleDataManager
from pandas.core.resample import Resampler
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = {
"axes": "index, columns",
"klass": "DataFrame",
"axes_single_arg": "{0 or 'index', 1 or 'columns'}",
"axis": """axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
"inplace": """
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.""",
"optional_by": """
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.""",
"optional_labels": """labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
"optional_axis": """axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
"replace_iloc": """
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.""",
}
_numeric_only_doc = """numeric_only : bool or None, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
A named Series object is treated as a DataFrame with a single named column.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
When performing a cross merge, no column specifications to merge on are
allowed.
.. warning::
If both key columns contain rows where the key is a null value, those
rows will be matched against each other. This is different from usual SQL
join behaviour and can lead to unexpected results.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner', 'cross'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : list-like, default is ("_x", "_y")
A length-2 sequence where each element is optionally a string
indicating the suffix to add to overlapping column names in
`left` and `right` respectively. Pass a value of `None` instead
of a string to indicate that the column name from `left` or
`right` should be left as-is, with no suffix. At least one of the
values must not be None.
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to the output DataFrame called "_merge" with
information on the source of each row. The column can be given a different
name by providing a string argument. The column will have a Categorical
type with the value of "left_only" for observations whose merge key only
appears in the left DataFrame, "right_only" for observations
whose merge key only appears in the right DataFrame, and "both"
if the observation's merge key is found in both DataFrames.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
>>> df1 = pd.DataFrame({'a': ['foo', 'bar'], 'b': [1, 2]})
>>> df2 = pd.DataFrame({'a': ['foo', 'baz'], 'c': [3, 4]})
>>> df1
a b
0 foo 1
1 bar 2
>>> df2
a c
0 foo 3
1 baz 4
>>> df1.merge(df2, how='inner', on='a')
a b c
0 foo 1 3
>>> df1.merge(df2, how='left', on='a')
a b c
0 foo 1 3.0
1 bar 2 NaN
>>> df1 = pd.DataFrame({'left': ['foo', 'bar']})
>>> df2 = pd.DataFrame({'right': [7, 8]})
>>> df1
left
0 foo
1 bar
>>> df2
right
0 7
1 8
>>> df1.merge(df2, how='cross')
left right
0 foo 7
1 foo 8
2 bar 7
3 bar 8
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame, OpsMixin):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, dataclass or list-like objects. If
data is a dict, column order follows insertion-order. If a dict contains Series
which have an index defined, it is aligned by its index.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame when data does not have them,
defaulting to RangeIndex(0, 1, 2, ..., n). If data contains column labels,
will perform column selection instead.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool or None, default None
Copy data from inputs.
For dict data, the default of None behaves like ``copy=True``. For DataFrame
or 2d ndarray input, the default of None behaves like ``copy=False``.
If data is a dict containing one or more Series (possibly of different dtypes),
``copy=False`` will ensure that these inputs are not copied.
.. versionchanged:: 1.3.0
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv : Read a comma-separated values (csv) file into DataFrame.
read_table : Read general delimited file into DataFrame.
read_clipboard : Read text from clipboard into DataFrame.
Notes
-----
Please reference the :ref:`User Guide <basics.dataframe>` for more information.
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from a dictionary including Series:
>>> d = {'col1': [0, 1, 2, 3], 'col2': pd.Series([2, 3], index=[2, 3])}
>>> pd.DataFrame(data=d, index=[0, 1, 2, 3])
col1 col2
0 0 NaN
1 1 NaN
2 2 2.0
3 3 3.0
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
Constructing DataFrame from a numpy ndarray that has labeled columns:
>>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
... dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")])
>>> df3 = pd.DataFrame(data, columns=['c', 'a'])
...
>>> df3
c a
0 3 1
1 6 4
2 9 7
Constructing DataFrame from dataclass:
>>> from dataclasses import make_dataclass
>>> Point = make_dataclass("Point", [("x", int), ("y", int)])
>>> pd.DataFrame([Point(0, 0), Point(0, 3), Point(2, 3)])
x y
0 0 0
1 0 3
2 2 3
"""
_internal_names_set = {"columns", "index"} | NDFrame._internal_names_set
_typ = "dataframe"
_HANDLED_TYPES = (Series, Index, ExtensionArray, np.ndarray)
_accessors: set[str] = {"sparse"}
_hidden_attrs: frozenset[str] = NDFrame._hidden_attrs | frozenset([])
_mgr: BlockManager | ArrayManager
@property
def _constructor(self) -> Callable[..., DataFrame]:
return DataFrame
_constructor_sliced: Callable[..., Series] = Series
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Axes | None = None,
columns: Axes | None = None,
dtype: Dtype | None = None,
copy: bool | None = None,
) -> None:
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, (BlockManager, ArrayManager)):
# first check if a Manager is passed without any other arguments
# -> use fastpath (without checking Manager type)
if index is None and columns is None and dtype is None and not copy:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
manager = get_option("mode.data_manager")
if copy is None:
if isinstance(data, dict):
# retain pre-GH#38939 default behavior
copy = True
elif (
manager == "array"
and isinstance(data, (np.ndarray, ExtensionArray))
and data.ndim == 2
):
# INFO(ArrayManager) by default copy the 2D input array to get
# contiguous 1D arrays
copy = True
else:
copy = False
if isinstance(data, (BlockManager, ArrayManager)):
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
# GH#38939 de facto copy defaults to False only in non-dict cases
mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = rec_array_to_mgr(
data,
index,
columns,
dtype,
copy,
typ=manager,
)
warnings.warn(
"Support for MaskedRecords is deprecated and will be "
"removed in a future version. Pass "
"{name: data[name] for name in data.dtype.names} instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
elif isinstance(data, (np.ndarray, Series, Index, ExtensionArray)):
if data.dtype.names:
# i.e. numpy structured array
data = cast(np.ndarray, data)
mgr = rec_array_to_mgr(
data,
index,
columns,
dtype,
copy,
typ=manager,
)
elif getattr(data, "name", None) is not None:
# i.e. Series/Index with non-None name
mgr = dict_to_mgr(
# error: Item "ndarray" of "Union[ndarray, Series, Index]" has no
# attribute "name"
{data.name: data}, # type: ignore[union-attr]
index,
columns,
dtype=dtype,
typ=manager,
)
else:
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
# For data is list-like, or Iterable (will consume into list)
elif is_list_like(data):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
if hasattr(data, "__array__"):
# GH#44616 big perf improvement for e.g. pytorch tensor
data = np.asarray(data)
else:
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if not isinstance(data, np.ndarray) and treat_as_nested(data):
# exclude ndarray as we may have cast it a few lines above
if columns is not None:
# error: Argument 1 to "ensure_index" has incompatible type
# "Collection[Any]"; expected "Union[Union[Union[ExtensionArray,
# ndarray], Index, Series], Sequence[Any]]"
columns = ensure_index(columns) # type: ignore[arg-type]
arrays, columns, index = nested_data_to_arrays(
# error: Argument 3 to "nested_data_to_arrays" has incompatible
# type "Optional[Collection[Any]]"; expected "Optional[Index]"
data,
columns,
index, # type: ignore[arg-type]
dtype,
)
mgr = arrays_to_mgr(
arrays,
columns,
index,
dtype=dtype,
typ=manager,
)
else:
mgr = ndarray_to_mgr(
data,
index,
columns,
dtype=dtype,
copy=copy,
typ=manager,
)
else:
mgr = dict_to_mgr(
{},
index,
columns,
dtype=dtype,
typ=manager,
)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
# Argument 1 to "ensure_index" has incompatible type "Collection[Any]";
# expected "Union[Union[Union[ExtensionArray, ndarray],
# Index, Series], Sequence[Any]]"
index = ensure_index(index) # type: ignore[arg-type]
# Argument 1 to "ensure_index" has incompatible type "Collection[Any]";
# expected "Union[Union[Union[ExtensionArray, ndarray],
# Index, Series], Sequence[Any]]"
columns = ensure_index(columns) # type: ignore[arg-type]
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if isinstance(dtype, ExtensionDtype):
# TODO(EA2D): special case not needed with 2D EAs
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, dtype=None, typ=manager)
else:
arr2d = construct_2d_arraylike_from_scalar(
data,
len(index),
len(columns),
dtype,
copy,
)
mgr = ndarray_to_mgr(
arr2d,
index,
columns,
dtype=arr2d.dtype,
copy=False,
typ=manager,
)
# ensure correct Manager type according to settings
mgr = mgr_to_mgr(mgr, typ=manager)
NDFrame.__init__(self, mgr)
# ----------------------------------------------------------------------
def __dataframe__(
self, nan_as_null: bool = False, allow_copy: bool = True
) -> DataFrameXchg:
"""
Return the dataframe exchange object implementing the exchange protocol.
Parameters
----------
nan_as_null : bool, default False
Whether to tell the DataFrame to overwrite null values in the data
with ``NaN`` (or ``NaT``).
allow_copy : bool, default True
Whether to allow memory copying when exporting. If set to False
it would cause non-zero-copy exports to fail.
Returns
-------
DataFrame exchange object
The object which consuming library can use to ingress the dataframe.
Notes
-----
Details on the exchange protocol:
https://data-apis.org/dataframe-protocol/latest/index.html
`nan_as_null` currently has no effect; once support for nullable extension
dtypes is added, this value should be propagated to columns.
"""
from pandas.core.exchange.dataframe import PandasDataFrameXchg
return PandasDataFrameXchg(self, nan_as_null, allow_copy)
# ----------------------------------------------------------------------
@property
def axes(self) -> list[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape : Tuple of array dimensions.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if isinstance(self._mgr, ArrayManager):
return len({arr.dtype for arr in self._mgr.arrays}) == 1
if self._mgr.any_extension_types:
return len({block.dtype for block in self._mgr.blocks}) == 1
else:
return not self._is_mixed_type
@property
def _can_fast_transpose(self) -> bool:
"""
Can we transpose this DataFrame without creating any new array objects.
"""
if isinstance(self._mgr, ArrayManager):
return False
blocks = self._mgr.blocks
if len(blocks) != 1:
return False
dtype = blocks[0].dtype
# TODO(EA2D) special case would be unnecessary with 2D EAs
return not is_1d_only_ea_dtype(dtype)
# error: Return type "Union[ndarray, DatetimeArray, TimedeltaArray]" of
# "_values" incompatible with return type "ndarray" in supertype "NDFrame"
@property
def _values( # type: ignore[override]
self,
) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray:
"""
Analogue to ._values that may return a 2D ExtensionArray.
"""
self._consolidate_inplace()
mgr = self._mgr
if isinstance(mgr, ArrayManager):
if len(mgr.arrays) == 1 and not is_1d_only_ea_dtype(mgr.arrays[0].dtype):
# error: Item "ExtensionArray" of "Union[ndarray, ExtensionArray]"
# has no attribute "reshape"
return mgr.arrays[0].reshape(-1, 1) # type: ignore[union-attr]
return self.values
blocks = mgr.blocks
if len(blocks) != 1:
return self.values
arr = blocks[0].values
if arr.ndim == 1:
# non-2D ExtensionArray
return self.values
# more generally, whatever we allow in NDArrayBackedExtensionBlock
arr = cast("np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray", arr)
return arr.T
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case of non-interactive session, no boundaries apply.
`ignore_width` is here so ipynb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if max_rows is not None: # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(line) for line in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
if self._info_repr():
buf = StringIO()
self.info(buf=buf)
return buf.getvalue()
repr_params = fmt.get_dataframe_repr_params()
return self.to_string(**repr_params)
def _repr_html_(self) -> str | None:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO()
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
)
return fmt.DataFrameRenderer(formatter).to_html(notebook=True)
else:
return None
@overload
def to_string(
self,
buf: None = ...,
columns: Sequence[str] | None = ...,
col_space: int | list[int] | dict[Hashable, int] | None = ...,
header: bool | Sequence[str] = ...,
index: bool = ...,
na_rep: str = ...,
formatters: fmt.FormattersType | None = ...,
float_format: fmt.FloatFormatType | None = ...,
sparsify: bool | None = ...,
index_names: bool = ...,
justify: str | None = ...,
max_rows: int | None = ...,
max_cols: int | None = ...,
show_dimensions: bool = ...,
decimal: str = ...,
line_width: int | None = ...,
min_rows: int | None = ...,
max_colwidth: int | None = ...,
encoding: str | None = ...,
) -> str:
...
@overload
def to_string(
self,
buf: FilePath | WriteBuffer[str],
columns: Sequence[str] | None = ...,
col_space: int | list[int] | dict[Hashable, int] | None = ...,
header: bool | Sequence[str] = ...,
index: bool = ...,
na_rep: str = ...,
formatters: fmt.FormattersType | None = ...,
float_format: fmt.FloatFormatType | None = ...,
sparsify: bool | None = ...,
index_names: bool = ...,
justify: str | None = ...,
max_rows: int | None = ...,
max_cols: int | None = ...,
show_dimensions: bool = ...,
decimal: str = ...,
line_width: int | None = ...,
min_rows: int | None = ...,
max_colwidth: int | None = ...,
encoding: str | None = ...,
) -> None:
...
@Substitution(
header_type="bool or sequence of str",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int, list or dict of int",
col_space="The minimum width of each column. If a list of ints is given "
"every integers corresponds with one column. If a dict is given, the key "
"references the column, while the value defines the space to use.",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: int | list[int] | dict[Hashable, int] | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: fmt.FormattersType | None = None,
float_format: fmt.FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: int | None = None,
min_rows: int | None = None,
max_colwidth: int | None = None,
encoding: str | None = None,
) -> str | None:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
min_rows : int, optional
The number of rows to display in the console in a truncated repr
(when number of rows is above `max_rows`).
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
)
return fmt.DataFrameRenderer(formatter).to_string(
buf=buf,
encoding=encoding,
line_width=line_width,
)
# ----------------------------------------------------------------------
@property
def style(self) -> Styler:
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
See Also
--------
io.formats.style.Styler : Helps style a DataFrame or Series according to the
data with HTML and CSS.
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print(f'label: {label}')
... print(f'content: {content}', sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[tuple[Hashable, Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[tuple[Hashable, Series]]:
warnings.warn(
"iteritems is deprecated and will be removed in a future version. "
"Use .items instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
yield from self.items()
def iterrows(self) -> Iterable[tuple[Hashable, Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k).__finalize__(self)
yield k, s
def itertuples(
self, index: bool = True, name: str | None = "Pandas"
) -> Iterable[tuple[Any, ...]]:
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
if name is not None:
# https://github.com/python/mypy/issues/9046
# error: namedtuple() expects a string literal as the first argument
itertuple = collections.namedtuple( # type: ignore[misc]
name, fields, rename=True
)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
@overload
def dot(self, other: Series) -> Series:
...
@overload
def dot(self, other: DataFrame | Index | ArrayLike) -> DataFrame:
...
def dot(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Series. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right._values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return self._constructor_sliced(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return self._constructor_sliced(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
@overload
def __matmul__(self, other: Series) -> Series:
...
@overload
def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
...
def __matmul__(self, other: AnyArrayLike | DataFrame) -> DataFrame | Series:
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
try:
return self.T.dot(np.transpose(other)).T
except ValueError as err:
if "shape mismatch" not in str(err):
raise
# GH#21581 give exception message for original shapes
msg = f"shapes {np.shape(other)} and {self.shape} not aligned"
raise ValueError(msg) from err
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(
cls,
data,
orient: str = "columns",
dtype: Dtype | None = None,
columns=None,
) -> DataFrame:
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index', 'tight'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
If 'tight', assume a dict with keys ['index', 'columns', 'data',
'index_names', 'column_names'].
.. versionadded:: 1.4.0
'tight' as an allowed value for the ``orient`` argument
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'`` or ``orient='tight'``.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from structured ndarray, sequence
of tuples or dicts, or DataFrame.
DataFrame : DataFrame object creation using constructor.
DataFrame.to_dict : Convert the DataFrame to a dictionary.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
Specify ``orient='tight'`` to create the DataFrame using a 'tight'
format:
>>> data = {'index': [('a', 'b'), ('a', 'c')],
... 'columns': [('x', 1), ('y', 2)],
... 'data': [[1, 3], [2, 4]],
... 'index_names': ['n1', 'n2'],
... 'column_names': ['z1', 'z2']}
>>> pd.DataFrame.from_dict(data, orient='tight')
z1 x y
z2 1 2
n1 n2
a b 1 3
c 2 4
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns" or orient == "tight":
if columns is not None:
raise ValueError(f"cannot use columns parameter with orient='{orient}'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
if orient != "tight":
return cls(data, index=index, columns=columns, dtype=dtype)
else:
realdata = data["data"]
def create_index(indexlist, namelist):
index: Index
if len(namelist) > 1:
index = MultiIndex.from_tuples(indexlist, names=namelist)
else:
index = Index(indexlist, name=namelist[0])
return index
index = create_index(data["index"], data["index_names"])
columns = create_index(data["columns"], data["column_names"])
return cls(realdata, index=index, columns=columns, dtype=dtype)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the dtypes of the DataFrame columns.
.. versionadded:: 1.1.0
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
self._consolidate_inplace()
if dtype is not None:
dtype = np.dtype(dtype)
result = self._mgr.as_array(dtype=dtype, copy=copy, na_value=na_value)
if result.dtype is not dtype:
result = np.array(result, dtype=dtype, copy=False)
return result
def to_dict(self, orient: str = "dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'tight' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values],
'index_names' -> [index.names], 'column_names' -> [column.names]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
.. versionadded:: 1.4.0
'tight' as an allowed value for the ``orient`` argument
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
>>> df.to_dict('tight')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]], 'index_names': [None], 'column_names': [None]}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=find_stack_level(),
)
# GH16122
into_c = com.standardize_mapping(into)
orient = orient.lower()
# GH32515
if orient.startswith(("d", "l", "s", "r", "i")) and orient not in {
"dict",
"list",
"series",
"split",
"records",
"index",
}:
warnings.warn(
"Using short name for 'orient' is deprecated. Only the "
"options: ('dict', list, 'series', 'split', 'records', 'index') "
"will be used in a future version. Use one of the above "
"to silence this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
if orient.startswith("d"):
orient = "dict"
elif orient.startswith("l"):
orient = "list"
elif orient.startswith("sp"):
orient = "split"
elif orient.startswith("s"):
orient = "series"
elif orient.startswith("r"):
orient = "records"
elif orient.startswith("i"):
orient = "index"
if orient == "dict":
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient == "list":
return into_c(
(k, list(map(maybe_box_native, v.tolist()))) for k, v in self.items()
)
elif orient == "split":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_native, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient == "tight":
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(maybe_box_native, t))
for t in self.itertuples(index=False, name=None)
],
),
("index_names", list(self.index.names)),
("column_names", list(self.columns.names)),
)
)
elif orient == "series":
return into_c((k, v) for k, v in self.items())
elif orient == "records":
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, maybe_box_native(v)) for k, v in row.items()) for row in rows
]
elif orient == "index":
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, map(maybe_box_native, t[1:]))))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table: str,
project_id: str | None = None,
chunksize: int | None = None,
reauth: bool = False,
if_exists: str = "fail",
auth_local_webserver: bool = True,
table_schema: list[dict[str, str]] | None = None,
location: str | None = None,
progress_bar: bool = True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default True
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
https://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
.. versionchanged:: 1.5.0
Default value is changed to ``True``. Google has deprecated the
``auth_local_webserver = False`` `"out of band" (copy-paste)
flow
<https://developers.googleblog.com/2022/02/making-oauth-flows-safer.html?m=1#disallowed-oob>`_.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float: bool = False,
nrows: int | None = None,
) -> DataFrame:
"""
Convert structured or record ndarray to DataFrame.
Creates a DataFrame object from a structured ndarray, sequence of
tuples or dicts, or DataFrame.
Parameters
----------
data : structured ndarray, sequence of tuples or dicts, or DataFrame
Structured input data.
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
See Also
--------
DataFrame.from_dict : DataFrame from dict of array-like or dicts.
DataFrame : DataFrame object creation using constructor.
Examples
--------
Data can be provided as a structured ndarray:
>>> data = np.array([(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')],
... dtype=[('col_1', 'i4'), ('col_2', 'U1')])
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of dicts:
>>> data = [{'col_1': 3, 'col_2': 'a'},
... {'col_1': 2, 'col_2': 'b'},
... {'col_1': 1, 'col_2': 'c'},
... {'col_1': 0, 'col_2': 'd'}]
>>> pd.DataFrame.from_records(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Data can be provided as a list of tuples with corresponding columns:
>>> data = [(3, 'a'), (2, 'b'), (1, 'c'), (0, 'd')]
>>> pd.DataFrame.from_records(data, columns=['col_1', 'col_2'])
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
"""
result_index = None
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
def maybe_reorder(
arrays: list[ArrayLike], arr_columns: Index, columns: Index, index
) -> tuple[list[ArrayLike], Index, Index | None]:
"""
If our desired 'columns' do not match the data's pre-existing 'arr_columns',
we re-order our arrays. This is like a pre-emptive (cheap) reindex.
"""
if len(arrays):
length = len(arrays[0])
else:
length = 0
result_index = None
if len(arrays) == 0 and index is None and length == 0:
# for backward compat use an object Index instead of RangeIndex
result_index = Index([])
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, length)
return arrays, arr_columns, result_index
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns_list = []
for k, v in data.items():
if k in columns:
arr_columns_list.append(k)
arrays.append(v)
arr_columns = Index(arr_columns_list)
arrays, arr_columns, result_index = maybe_reorder(
arrays, arr_columns, columns, index
)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns)
if coerce_float:
for i, arr in enumerate(arrays):
if arr.dtype == object:
# error: Argument 1 to "maybe_convert_objects" has
# incompatible type "Union[ExtensionArray, ndarray]";
# expected "ndarray"
arrays[i] = lib.maybe_convert_objects(
arr, # type: ignore[arg-type]
try_float=True,
)
arr_columns = ensure_index(arr_columns)
if columns is None:
columns = arr_columns
else:
arrays, arr_columns, result_index = maybe_reorder(
arrays, arr_columns, columns, index
)
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
columns = columns.drop(exclude)
manager = get_option("mode.data_manager")
mgr = arrays_to_mgr(arrays, columns, result_index, typ=manager)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index._values)))
else:
# error: List item 0 has incompatible type "ArrayLike"; expected
# "ndarray"
ix_vals = [self.index.values] # type: ignore[list-item]
arrays = ix_vals + [
np.asarray(self.iloc[:, i]) for i in range(len(self.columns))
]
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
index_names = com.fill_missing_names(index_names)
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [np.asarray(self.iloc[:, i]) for i in range(len(self.columns))]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
# Argument 1 to "append" of "list" has incompatible type
# "Union[type, dtype[Any], str]"; expected "dtype[_SCT]" [arg-type]
formats.append(dtype_mapping) # type: ignore[arg-type]
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(
cls,
arrays,
columns,
index,
dtype: Dtype | None = None,
verify_integrity: bool = True,
) -> DataFrame:
"""
Create DataFrame from a list of arrays corresponding to the columns.
Parameters
----------
arrays : list-like of arrays
Each array in the list corresponds to one column, in order.
columns : list-like, Index
The column names for the resulting DataFrame.
index : list-like, Index
The rows labels for the resulting DataFrame.
dtype : dtype, optional
Optional dtype to enforce for all arrays.
verify_integrity : bool, default True
Validate and homogenize all input. If set to False, it is assumed
that all elements of `arrays` are actual arrays how they will be
stored in a block (numpy ndarray or ExtensionArray), have the same
length as and are aligned with the index, and that `columns` and
`index` are ensured to be an Index object.
Returns
-------
DataFrame
"""
if dtype is not None:
dtype = pandas_dtype(dtype)
manager = get_option("mode.data_manager")
columns = ensure_index(columns)
if len(columns) != len(arrays):
raise ValueError("len(columns) must match len(arrays)")
mgr = arrays_to_mgr(
arrays,
columns,
index,
dtype=dtype,
verify_integrity=verify_integrity,
typ=manager,
)
return cls(mgr)
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path",
)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path: FilePath | WriteBuffer[bytes],
convert_dates: dict[Hashable, str] | None = None,
write_index: bool = True,
byteorder: str | None = None,
time_stamp: datetime.datetime | None = None,
data_label: str | None = None,
variable_labels: dict[Hashable, str] | None = None,
version: int | None = 114,
convert_strl: Sequence[Hashable] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
*,
value_labels: dict[Hashable, dict[float | int, str]] | None = None,
) -> None:
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, path object, or buffer
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {{114, 117, 118, 119, None}}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
Version 119 should usually only be used when the number of
variables exceeds the capacity of dta format 118. Exporting
smaller datasets in format 119 may have unintended consequences,
and, as of November 2020, Stata SE cannot read version 119 files.
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
{compression_options}
.. versionadded:: 1.1.0
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
.. versionadded:: 1.2.0
value_labels : dict of dicts
Dictionary containing columns as keys and dictionaries of column value
to labels as values. Labels for a single variable must be 32,000
characters or smaller.
.. versionadded:: 1.4.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({{'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriter117 as statawriter,
)
else: # versions 118 and 119
# mypy: Name 'statawriter' already defined (possibly by an import)
from pandas.io.stata import ( # type: ignore[no-redef]
StataWriterUTF8 as statawriter,
)
kwargs: dict[str, Any] = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
writer = statawriter(
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
compression=compression,
storage_options=storage_options,
value_labels=value_labels,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path: FilePath | WriteBuffer[bytes], **kwargs) -> None:
"""
Write a DataFrame to the binary Feather format.
Parameters
----------
path : str, path object, file-like object
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If a string or a path,
it will be used as Root Directory path when writing a partitioned dataset.
**kwargs :
Additional keywords passed to :func:`pyarrow.feather.write_feather`.
Starting with pyarrow 0.17, this includes the `compression`,
`compression_level`, `chunksize` and `version` keywords.
.. versionadded:: 1.1.0
Notes
-----
This function writes the dataframe as a `feather file
<https://arrow.apache.org/docs/python/feather.html>`_. Requires a default
index. For saving the DataFrame with your custom index use a method that
supports custom indices e.g. `to_parquet`.
"""
from pandas.io.feather_format import to_feather
to_feather(self, path, **kwargs)
@doc(
Series.to_markdown,
klass=_shared_doc_kwargs["klass"],
storage_options=_shared_docs["storage_options"],
examples="""Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
Output markdown with a tabulate option.
>>> print(df.to_markdown(tablefmt="grid"))
+----+------------+------------+
| | animal_1 | animal_2 |
+====+============+============+
| 0 | elk | dog |
+----+------------+------------+
| 1 | pig | quetzal |
+----+------------+------------+""",
)
def to_markdown(
self,
buf: FilePath | WriteBuffer[str] | None = None,
mode: str = "wt",
index: bool = True,
storage_options: StorageOptions = None,
**kwargs,
) -> str | None:
if "showindex" in kwargs:
warnings.warn(
"'showindex' is deprecated. Only 'index' will be used "
"in a future version. Use 'index' to silence this warning.",
FutureWarning,
stacklevel=find_stack_level(),
)
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
kwargs.setdefault("showindex", index)
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
with get_handle(buf, mode, storage_options=storage_options) as handles:
handles.handle.write(result)
return None
@doc(storage_options=_shared_docs["storage_options"])
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path: FilePath | WriteBuffer[bytes] | None = None,
engine: str = "auto",
compression: str | None = "snappy",
index: bool | None = None,
partition_cols: list[str] | None = None,
storage_options: StorageOptions = None,
**kwargs,
) -> bytes | None:
"""
Write a DataFrame to the binary parquet format.
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a binary ``write()`` function. If None, the result is
returned as bytes. If a string or path, it will be used as Root Directory
path when writing a partitioned dataset.
.. versionchanged:: 1.2.0
Previously this was "fname"
engine : {{'auto', 'pyarrow', 'fastparquet'}}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {{'snappy', 'gzip', 'brotli', None}}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
Must be None if path is not a string.
{storage_options}
.. versionadded:: 1.2.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
Returns
-------
bytes if no path argument is provided else None
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={{'col1': [1, 2], 'col2': [3, 4]}})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
If you want to get a buffer to the parquet content you can use a io.BytesIO
object, as long as you don't use partition_cols, which creates multiple files.
>>> import io
>>> f = io.BytesIO()
>>> df.to_parquet(f)
>>> f.seek(0)
0
>>> content = f.read()
"""
from pandas.io.parquet import to_parquet
return to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
storage_options=storage_options,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int, list or dict of int or str",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf: FilePath | WriteBuffer[str] | None = None,
columns: Sequence[str] | None = None,
col_space: ColspaceArgType | None = None,
header: bool | Sequence[str] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: FormattersType | None = None,
float_format: FloatFormatType | None = None,
sparsify: bool | None = None,
index_names: bool = True,
justify: str | None = None,
max_rows: int | None = None,
max_cols: int | None = None,
show_dimensions: bool | str = False,
decimal: str = ".",
bold_rows: bool = True,
classes: str | list | tuple | None = None,
escape: bool = True,
notebook: bool = False,
border: int | bool | None = None,
table_id: str | None = None,
render_links: bool = False,
encoding: str | None = None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
render_links : bool, default False
Convert URLs to HTML links.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
justify=justify,
index_names=index_names,
escape=escape,
decimal=decimal,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return fmt.DataFrameRenderer(formatter).to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
table_id=table_id,
render_links=render_links,
)
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "path_or_buffer",
)
def to_xml(
self,
path_or_buffer: FilePath | WriteBuffer[bytes] | WriteBuffer[str] | None = None,
index: bool = True,
root_name: str | None = "data",
row_name: str | None = "row",
na_rep: str | None = None,
attr_cols: list[str] | None = None,
elem_cols: list[str] | None = None,
namespaces: dict[str | None, str] | None = None,
prefix: str | None = None,
encoding: str = "utf-8",
xml_declaration: bool | None = True,
pretty_print: bool | None = True,
parser: str | None = "lxml",
stylesheet: FilePath | ReadBuffer[str] | ReadBuffer[bytes] | None = None,
compression: CompressionOptions = "infer",
storage_options: StorageOptions = None,
) -> str | None:
"""
Render a DataFrame to an XML document.
.. versionadded:: 1.3.0
Parameters
----------
path_or_buffer : str, path object, file-like object, or None, default None
String, path object (implementing ``os.PathLike[str]``), or file-like
object implementing a ``write()`` function. If None, the result is returned
as a string.
index : bool, default True
Whether to include index in XML document.
root_name : str, default 'data'
The name of root element in XML document.
row_name : str, default 'row'
The name of row element in XML document.
na_rep : str, optional
Missing data representation.
attr_cols : list-like, optional
List of columns to write as attributes in row element.
Hierarchical columns will be flattened with underscore
delimiting the different levels.
elem_cols : list-like, optional
List of columns to write as children in row element. By default,
all columns output as children of row element. Hierarchical
columns will be flattened with underscore delimiting the
different levels.
namespaces : dict, optional
All namespaces to be defined in root element. Keys of dict
should be prefix names and values of dict corresponding URIs.
Default namespaces should be given empty string key. For
example, ::
namespaces = {{"": "https://example.com"}}
prefix : str, optional
Namespace prefix to be used for every element and/or attribute
in document. This should be one of the keys in ``namespaces``
dict.
encoding : str, default 'utf-8'
Encoding of the resulting document.
xml_declaration : bool, default True
Whether to include the XML declaration at start of document.
pretty_print : bool, default True
Whether output should be pretty printed with indentation and
line breaks.
parser : {{'lxml','etree'}}, default 'lxml'
Parser module to use for building of tree. Only 'lxml' and
'etree' are supported. With 'lxml', the ability to use XSLT
stylesheet is supported.
stylesheet : str, path object or file-like object, optional
A URL, file-like object, or a raw string containing an XSLT
script used to transform the raw XML output. Script should use
layout of elements and attributes from original output. This
argument requires ``lxml`` to be installed. Only XSLT 1.0
scripts and not later versions is currently supported.
{compression_options}
.. versionchanged:: 1.4.0 Zstandard support.
{storage_options}
Returns
-------
None or str
If ``io`` is None, returns the resulting XML format as a
string. Otherwise returns None.
See Also
--------
to_json : Convert the pandas object to a JSON string.
to_html : Convert DataFrame to a html.
Examples
--------
>>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'],
... 'degrees': [360, 360, 180],
... 'sides': [4, np.nan, 3]}})
>>> df.to_xml() # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row>
<index>0</index>
<shape>square</shape>
<degrees>360</degrees>
<sides>4.0</sides>
</row>
<row>
<index>1</index>
<shape>circle</shape>
<degrees>360</degrees>
<sides/>
</row>
<row>
<index>2</index>
<shape>triangle</shape>
<degrees>180</degrees>
<sides>3.0</sides>
</row>
</data>
>>> df.to_xml(attr_cols=[
... 'index', 'shape', 'degrees', 'sides'
... ]) # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<data>
<row index="0" shape="square" degrees="360" sides="4.0"/>
<row index="1" shape="circle" degrees="360"/>
<row index="2" shape="triangle" degrees="180" sides="3.0"/>
</data>
>>> df.to_xml(namespaces={{"doc": "https://example.com"}},
... prefix="doc") # doctest: +SKIP
<?xml version='1.0' encoding='utf-8'?>
<doc:data xmlns:doc="https://example.com">
<doc:row>
<doc:index>0</doc:index>
<doc:shape>square</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides>4.0</doc:sides>
</doc:row>
<doc:row>
<doc:index>1</doc:index>
<doc:shape>circle</doc:shape>
<doc:degrees>360</doc:degrees>
<doc:sides/>
</doc:row>
<doc:row>
<doc:index>2</doc:index>
<doc:shape>triangle</doc:shape>
<doc:degrees>180</doc:degrees>
<doc:sides>3.0</doc:sides>
</doc:row>
</doc:data>
"""
from pandas.io.formats.xml import (
EtreeXMLFormatter,
LxmlXMLFormatter,
)
lxml = import_optional_dependency("lxml.etree", errors="ignore")
TreeBuilder: type[EtreeXMLFormatter] | type[LxmlXMLFormatter]
if parser == "lxml":
if lxml is not None:
TreeBuilder = LxmlXMLFormatter
else:
raise ImportError(
"lxml not found, please install or use the etree parser."
)
elif parser == "etree":
TreeBuilder = EtreeXMLFormatter
else:
raise ValueError("Values for parser can only be lxml or etree.")
xml_formatter = TreeBuilder(
self,
path_or_buffer=path_or_buffer,
index=index,
root_name=root_name,
row_name=row_name,
na_rep=na_rep,
attr_cols=attr_cols,
elem_cols=elem_cols,
namespaces=namespaces,
prefix=prefix,
encoding=encoding,
xml_declaration=xml_declaration,
pretty_print=pretty_print,
stylesheet=stylesheet,
compression=compression,
storage_options=storage_options,
)
return xml_formatter.write_output()
# ----------------------------------------------------------------------
@doc(INFO_DOCSTRING, **frame_sub_kwargs)
def info(
self,
verbose: bool | None = None,
buf: WriteBuffer[str] | None = None,
max_cols: int | None = None,
memory_usage: bool | str | None = None,
show_counts: bool | None = None,
null_counts: bool | None = None,
) -> None:
if null_counts is not None:
if show_counts is not None:
raise ValueError("null_counts used with show_counts. Use show_counts.")
warnings.warn(
"null_counts is deprecated. Use show_counts instead",
FutureWarning,
stacklevel=find_stack_level(),
)
show_counts = null_counts
info = DataFrameInfo(
data=self,
memory_usage=memory_usage,
)
info.render(
buf=buf,
max_cols=max_cols,
verbose=verbose,
show_counts=show_counts,
)
def memory_usage(self, index: bool = True, deep: bool = False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Notes
-----
See the :ref:`Frequently Asked Questions <df-memory-usage>` for more
details.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000, dtype=int).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.0+0.0j 1 True
1 1 1.0 1.0+0.0j 1 True
2 1 1.0 1.0+0.0j 1 True
3 1 1.0 1.0+0.0j 1 True
4 1 1.0 1.0+0.0j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 180000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5244
"""
result = self._constructor_sliced(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
index_memory_usage = self._constructor_sliced(
self.index.memory_usage(deep=deep), index=["Index"]
)
result = index_memory_usage._append(result)
return result
def transpose(self, *args, copy: bool = False) -> DataFrame:
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8.0
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, {})
# construct the args
dtypes = list(self.dtypes)
if self._can_fast_transpose:
# Note: tests pass without this, but this improves perf quite a bit.
new_vals = self._values.T
if copy:
new_vals = new_vals.copy()
result = self._constructor(new_vals, index=self.columns, columns=self.index)
elif (
self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0])
):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = type(self)._from_arrays(
new_values, index=self.columns, columns=self.index
)
else:
new_arr = self.values.T
if copy:
new_arr = new_arr.copy()
result = self._constructor(new_arr, index=self.columns, columns=self.index)
return result.__finalize__(self, method="transpose")
@property
def T(self) -> DataFrame:
return self.transpose()
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_mgr = self._mgr.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_mgr.array, np.ndarray) and new_mgr.array.base is None
result = self._constructor_sliced(new_mgr, name=self.index[i]).__finalize__(
self
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
col_mgr = self._mgr.iget(i)
result = self._box_col_values(col_mgr, i)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def _get_column_array(self, i: int) -> ArrayLike:
"""
Get the values of the i'th column (ndarray or ExtensionArray, as stored
in the Block)
"""
return self._mgr.iget_values(i)
def _iter_column_arrays(self) -> Iterator[ArrayLike]:
"""
Iterate over the arrays of all columns in order.
This returns the values as stored in the Block (ndarray or ExtensionArray).
"""
for i in range(len(self.columns)):
yield self._get_column_array(i)
def __getitem__(self, key):
check_deprecated_indexers(key)
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key) and not is_iterator(key):
# is_iterator to exclude generator e.g. test_getitem_listlike
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if isinstance(self.columns, MultiIndex):
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
if isinstance(indexer, np.ndarray):
indexer = lib.maybe_indices_to_slice(
indexer.astype(np.intp, copy=False), len(self)
)
if isinstance(indexer, np.ndarray):
# GH#43223 If we can not convert, use take
return self.take(indexer, axis=0)
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.columns._get_indexer_strict(key, "columns")[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self._take_with_is_copy(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, MultiIndex):
# GH#26490 using data[key] can cause RecursionError
return data._get_item_cache(key)
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=find_stack_level(),
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self._take_with_is_copy(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
# loc is neither a slice nor ndarray, so must be an int
return self._ixs(loc, axis=1)
def _get_value(self, index, col, takeable: bool = False) -> Scalar:
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
Notes
-----
Assumes that both `self.index._index_as_unique` and
`self.columns._index_as_unique`; Caller is responsible for checking.
"""
if takeable:
series = self._ixs(col, axis=1)
return series._values[index]
series = self._get_item_cache(col)
engine = self.index._engine
if not isinstance(self.index, MultiIndex):
# CategoricalIndex: Trying to use the engine fastpath may give incorrect
# results if our categories are integers that dont match our codes
# IntervalIndex: IntervalTree has no get_loc
row = self.index.get_loc(index)
return series._values[row]
# For MultiIndex going through engine effectively restricts us to
# same-length tuples; see test_get_set_value_no_partial_indexing
loc = engine.get_loc(index)
return series._values[loc]
def isetitem(self, loc, value) -> None:
"""
Set the given value in the column with position 'loc'.
This is a positional analogue to __setitem__.
Parameters
----------
loc : int or sequence of ints
value : scalar or arraylike
Notes
-----
Unlike `frame.iloc[:, i] = value`, `frame.isetitem(loc, value)` will
_never_ try to set the values in place, but will always insert a new
array.
In cases where `frame.columns` is unique, this is equivalent to
`frame[frame.columns[i]] = value`.
"""
arraylike = self._sanitize_column(value)
self._iset_item_mgr(loc, arraylike, inplace=False)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
elif (
is_list_like(value)
and not self.columns.is_unique
and 1 < len(self.columns.get_indexer_for([key])) == len(value)
):
# Column to set is duplicated
self._setitem_array([key], value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key: slice, value):
# NB: we can't just use self.loc[key] = value because that
# operates on labels and we need to operate positional for
# backwards-compat, xref GH#31469
self._check_setitem_copy()
self.iloc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# bool indexer is indexing along rows
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
if isinstance(value, DataFrame):
# GH#39931 reindex since iloc does not align
value = value.reindex(self.index.take(indexer))
self.iloc[indexer] = value
else:
# Note: unlike self.iloc[:, indexer] = value, this will
# never try to overwrite values inplace
if isinstance(value, DataFrame):
check_key_length(self.columns, key, value)
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
elif not is_list_like(value):
for col in key:
self[col] = value
elif isinstance(value, np.ndarray) and value.ndim == 2:
self._iset_not_inplace(key, value)
elif np.ndim(value) > 1:
# list of lists
value = DataFrame(value).values
return self._setitem_array(key, value)
else:
self._iset_not_inplace(key, value)
def _iset_not_inplace(self, key, value):
# GH#39510 when setting with df[key] = obj with a list-like key and
# list-like value, we iterate over those listlikes and set columns
# one at a time. This is different from dispatching to
# `self.loc[:, key]= value` because loc.__setitem__ may overwrite
# data inplace, whereas this will insert new arrays.
def igetitem(obj, i: int):
# Note: we catch DataFrame obj before getting here, but
# hypothetically would return obj.iloc[:, i]
if isinstance(obj, np.ndarray):
return obj[..., i]
else:
return obj[i]
if self.columns.is_unique:
if np.shape(value)[-1] != len(key):
raise ValueError("Columns must be same length as key")
for i, col in enumerate(key):
self[col] = igetitem(value, i)
else:
ilocs = self.columns.get_indexer_non_unique(key)[0]
if (ilocs < 0).any():
# key entries not in self.columns
raise NotImplementedError
if np.shape(value)[-1] != len(ilocs):
raise ValueError("Columns must be same length as key")
assert np.ndim(value) <= 2
orig_columns = self.columns
# Using self.iloc[:, i] = ... may set values inplace, which
# by convention we do not do in __setitem__
try:
self.columns = Index(range(len(self.columns)))
for i, iloc in enumerate(ilocs):
self[iloc] = igetitem(value, i)
finally:
self.columns = orig_columns
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item_frame_value(self, key, value: DataFrame) -> None:
self._ensure_valid_index(value)
# align columns
if key in self.columns:
loc = self.columns.get_loc(key)
cols = self.columns[loc]
len_cols = 1 if is_scalar(cols) else len(cols)
if len_cols != len(value.columns):
raise ValueError("Columns must be same length as key")
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and isinstance(
loc, (slice, Series, np.ndarray, Index)
):
cols = maybe_droplevels(cols, key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
arraylike = _reindex_for_setitem(value, self.index)
self._set_item_mgr(key, arraylike)
def _iset_item_mgr(
self, loc: int | slice | np.ndarray, value, inplace: bool = False
) -> None:
# when called from _set_item_mgr loc can be anything returned from get_loc
self._mgr.iset(loc, value, inplace=inplace)
self._clear_item_cache()
def _set_item_mgr(self, key, value: ArrayLike) -> None:
try:
loc = self._info_axis.get_loc(key)
except KeyError:
# This item wasn't present, just insert at end
self._mgr.insert(len(self._info_axis), key, value)
else:
self._iset_item_mgr(loc, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _iset_item(self, loc: int, value) -> None:
arraylike = self._sanitize_column(value)
self._iset_item_mgr(loc, arraylike, inplace=True)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_item(self, key, value) -> None:
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
value = self._sanitize_column(value)
if (
key in self.columns
and value.ndim == 1
and not is_extension_array_dtype(value)
):
# broadcast across multiple columns if necessary
if not self.columns.is_unique or isinstance(self.columns, MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1)).T
self._set_item_mgr(key, value)
def _set_value(
self, index: IndexLabel, col, value: Scalar, takeable: bool = False
) -> None:
"""
Put single value at passed column and index.
Parameters
----------
index : Label
row label
col : Label
column label
value : scalar
takeable : bool, default False
Sets whether or not index/col interpreted as indexers
"""
try:
if takeable:
series = self._ixs(col, axis=1)
loc = index
else:
series = self._get_item_cache(col)
loc = self.index.get_loc(index)
# setitem_inplace will do validation that may raise TypeError,
# ValueError, or LossySetitemError
series._mgr.setitem_inplace(loc, value)
except (KeyError, TypeError, ValueError, LossySetitemError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
def _ensure_valid_index(self, value) -> None:
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
if not isinstance(value, DataFrame):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError) as err:
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a Series"
) from err
# GH31368 preserve name of index
index_copy = value.index.copy()
if self.index.name is not None:
index_copy.name = self.index.name
self._mgr = self._mgr.reindex_axis(index_copy, axis=1, fill_value=np.nan)
def _box_col_values(self, values: SingleDataManager, loc: int) -> Series:
"""
Provide boxed values for a column.
"""
# Lookup in columns so that if e.g. a str datetime was passed
# we attach the Timestamp object as the name.
name = self.columns[loc]
klass = self._constructor_sliced
# We get index=self.index bc values is a SingleDataManager
return klass(values, name=name, fastpath=True).__finalize__(self)
# ----------------------------------------------------------------------
# Lookup Caching
def _clear_item_cache(self) -> None:
self._item_cache.clear()
def _get_item_cache(self, item: Hashable) -> Series:
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
# All places that call _get_item_cache have unique columns,
# pending resolution of GH#33047
loc = self.columns.get_loc(item)
res = self._ixs(loc, axis=1)
cache[item] = res
# for a chain
res._is_copy = self._is_copy
return res
def _reset_cacher(self) -> None:
# no-op for DataFrame
pass
def _maybe_cache_changed(self, item, value: Series, inplace: bool) -> None:
"""
The object has called back to us saying maybe it has changed.
"""
loc = self._info_axis.get_loc(item)
arraylike = value._values
old = self._ixs(loc, axis=1)
if old._values is value._values and inplace:
# GH#46149 avoid making unnecessary copies/block-splitting
return
self._mgr.iset(loc, arraylike, inplace=inplace)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr: str, inplace: bool = False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that are not valid Python variable names
by surrounding them in backticks. Thus, column names containing spaces
or punctuations (besides underscores) or starting with digits must be
surrounded by backticks. (For example, a column named "Area (cm^2)" would
be referenced as ```Area (cm^2)```). Column names which are Python keywords
(like "list", "for", "import", etc) cannot be used.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether to modify the DataFrame rather than creating a new one.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame or None
DataFrame resulting from the provided query expression or
None if ``inplace=True``.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
result = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
result = self[res]
if inplace:
self._update_inplace(result)
return None
else:
return result
def eval(self, expr: str, inplace: bool = False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, pandas object, or None
The result of the evaluation or None if ``inplace=True``.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
Multiple columns can be assigned to using multi-line expressions:
>>> df.eval(
... '''
... C = A + B
... D = A - B
... '''
... )
A B C D
0 1 10 11 -9
1 2 8 10 -6
2 3 6 9 -3
3 4 4 8 0
4 5 2 7 3
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
kwargs["level"] = kwargs.pop("level", 0) + 1
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = tuple(kwargs.get("resolvers", ())) + resolvers
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> DataFrame:
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
See Also
--------
DataFrame.dtypes: Return Series with the data type of each column.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<https://numpy.org/doc/stable/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int64'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
def check_int_infer_dtype(dtypes):
converted_dtypes: list[type] = []
for dtype in dtypes:
# Numpy maps int to different types (int32, in64) on Windows and Linux
# see https://github.com/numpy/numpy/issues/9464
if (isinstance(dtype, str) and dtype == "int") or (dtype is int):
converted_dtypes.append(np.int32)
converted_dtypes.append(np.int64)
elif dtype == "float" or dtype is float:
# GH#42452 : np.dtype("float") coerces to np.float64 from Numpy 1.20
converted_dtypes.extend([np.float64, np.float32])
else:
converted_dtypes.append(infer_dtype_from_object(dtype))
return frozenset(converted_dtypes)
include = check_int_infer_dtype(include)
exclude = check_int_infer_dtype(exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
def dtype_predicate(dtype: DtypeObj, dtypes_set) -> bool:
return issubclass(dtype.type, tuple(dtypes_set)) or (
np.number in dtypes_set and getattr(dtype, "_is_numeric", False)
)
def predicate(arr: ArrayLike) -> bool:
dtype = arr.dtype
if include:
if not dtype_predicate(dtype, include):
return False
if exclude:
if dtype_predicate(dtype, exclude):
return False
return True
mgr = self._mgr._get_data_subset(predicate)
return type(self)(mgr).__finalize__(self)
def insert(
self,
loc: int,
column: Hashable,
value: Scalar | AnyArrayLike,
allow_duplicates: bool | lib.NoDefault = lib.no_default,
) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : Scalar, Series, or array-like
allow_duplicates : bool, optional, default lib.no_default
See Also
--------
Index.insert : Insert new item by index.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df
col1 col2
0 1 3
1 2 4
>>> df.insert(1, "newcol", [99, 99])
>>> df
col1 newcol col2
0 1 99 3
1 2 99 4
>>> df.insert(0, "col1", [100, 100], allow_duplicates=True)
>>> df
col1 col1 newcol col2
0 100 1 99 3
1 100 2 99 4
Notice that pandas uses index alignment in case of `value` from type `Series`:
>>> df.insert(0, "col0", pd.Series([5, 6], index=[1, 2]))
>>> df
col0 col1 col1 newcol col2
0 NaN 100 1 99 3
1 5.0 100 2 99 4
"""
if allow_duplicates is lib.no_default:
allow_duplicates = False
if allow_duplicates and not self.flags.allows_duplicate_labels:
raise ValueError(
"Cannot specify 'allow_duplicates=True' when "
"'self.flags.allows_duplicate_labels' is False."
)
if not allow_duplicates and column in self.columns:
# Should this be a different kind of error??
raise ValueError(f"cannot insert {column}, already exists")
if not isinstance(loc, int):
raise TypeError("loc must be int")
value = self._sanitize_column(value)
self._mgr.insert(loc, column, value)
def assign(self, **kwargs) -> DataFrame:
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, value) -> ArrayLike:
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
value : scalar, Series, or array-like
Returns
-------
numpy.ndarray or ExtensionArray
"""
self._ensure_valid_index(value)
# We should never get here with DataFrame value
if isinstance(value, Series):
return _reindex_for_setitem(value, self.index)
if is_list_like(value):
com.require_length_match(value, self.index)
return sanitize_array(value, self.index, copy=True, allow_2d=True)
@property
def _series(self):
return {
item: Series(
self._mgr.iget(idx), index=self.index, name=item, fastpath=True
)
for idx, item in enumerate(self.columns)
}
def lookup(
self, row_labels: Sequence[IndexLabel], col_labels: Sequence[IndexLabel]
) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
.. deprecated:: 1.2.0
DataFrame.lookup is deprecated,
use pandas.factorize and NumPy indexing instead.
For further details see
:ref:`Looking up values by index/column labels <indexing.lookup>`.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
The found values.
"""
msg = (
"The 'lookup' method is deprecated and will be "
"removed in a future version. "
"You can use DataFrame.melt and DataFrame.loc "
"as a substitute."
)
warnings.warn(msg, FutureWarning, stacklevel=find_stack_level())
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
if not (self.index.is_unique and self.columns.is_unique):
# GH#33041
raise ValueError("DataFrame.lookup requires unique index and columns")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy: bool,
level: Level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy: bool,
level: Level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(
self, axes: dict[str, Index], copy: bool, fill_value
) -> DataFrame:
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
# Fastpath. By doing two 'take's at once we avoid making an
# unnecessary copy.
# We only get here with `not self._is_mixed_type`, which (almost)
# ensures that self.values is cheap. It may be worth making this
# condition more specific.
indexer = row_indexer, col_indexer
new_values = take_2d_multi(self.values, indexer, fill_value=fill_value)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@doc(NDFrame.align, **_shared_doc_kwargs)
def align(
self,
other,
join: str = "outer",
axis: Axis | None = None,
level: Level | None = None,
copy: bool = True,
fill_value=None,
method: str | None = None,
limit=None,
fill_axis: Axis = 0,
broadcast_axis: Axis | None = None,
) -> DataFrame:
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: Literal[False] = ...
) -> DataFrame:
...
@overload
def set_axis(self, labels, axis: Axis, inplace: Literal[True]) -> None:
...
@overload
def set_axis(self, labels, *, inplace: Literal[True]) -> None:
...
@overload
def set_axis(
self, labels, axis: Axis = ..., inplace: bool = ...
) -> DataFrame | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
)
@Substitution(
**_shared_doc_kwargs,
extended_summary_sub=" column or",
axis_description_sub=", and 1 identifies the columns",
see_also_sub=" or columns",
)
@Appender(NDFrame.set_axis.__doc__)
def set_axis(self, labels, axis: Axis = 0, inplace: bool = False):
return super().set_axis(labels, axis=axis, inplace=inplace)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> DataFrame:
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return super().reindex(**kwargs)
@overload
def drop(
self,
labels: Hashable | list[Hashable] = ...,
*,
axis: Axis = ...,
index: Hashable | list[Hashable] = ...,
columns: Hashable | list[Hashable] = ...,
level: Level | None = ...,
inplace: Literal[True],
errors: IgnoreRaise = ...,
) -> None:
...
@overload
def drop(
self,
labels: Hashable | list[Hashable] = ...,
*,
axis: Axis = ...,
index: Hashable | list[Hashable] = ...,
columns: Hashable | list[Hashable] = ...,
level: Level | None = ...,
inplace: Literal[False] = ...,
errors: IgnoreRaise = ...,
) -> DataFrame:
...
@overload
def drop(
self,
labels: Hashable | list[Hashable] = ...,
*,
axis: Axis = ...,
index: Hashable | list[Hashable] = ...,
columns: Hashable | list[Hashable] = ...,
level: Level | None = ...,
inplace: bool = ...,
errors: IgnoreRaise = ...,
) -> DataFrame | None:
...
# error: Signature of "drop" incompatible with supertype "NDFrame"
# github.com/python/mypy/issues/12387
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"])
def drop( # type: ignore[override]
self,
labels: Hashable | list[Hashable] = None,
axis: Axis = 0,
index: Hashable | list[Hashable] = None,
columns: Hashable | list[Hashable] = None,
level: Level | None = None,
inplace: bool = False,
errors: IgnoreRaise = "raise",
) -> DataFrame | None:
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level. See the `user guide <advanced.shown_levels>`
for more information about the now unused levels.
Parameters
----------
labels : single label or list-like
Index or column labels to drop. A tuple will be used as a single
label and not treated as a list-like.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If False, return a copy. Otherwise, do operation
inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame or None
DataFrame without the removed index or column labels or
None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
Drop a specific index combination from the MultiIndex
DataFrame, i.e., drop the combination ``'falcon'`` and
``'weight'``, which deletes only the corresponding row
>>> df.drop(index=('falcon', 'weight'))
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@overload
def rename(
self,
mapper: Renamer | None = ...,
*,
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
copy: bool = ...,
inplace: Literal[True],
level: Level | None = ...,
errors: IgnoreRaise = ...,
) -> None:
...
@overload
def rename(
self,
mapper: Renamer | None = ...,
*,
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
copy: bool = ...,
inplace: Literal[False] = ...,
level: Level | None = ...,
errors: IgnoreRaise = ...,
) -> DataFrame:
...
@overload
def rename(
self,
mapper: Renamer | None = ...,
*,
index: Renamer | None = ...,
columns: Renamer | None = ...,
axis: Axis | None = ...,
copy: bool = ...,
inplace: bool = ...,
level: Level | None = ...,
errors: IgnoreRaise = ...,
) -> DataFrame | None:
...
def rename(
self,
mapper: Renamer | None = None,
*,
index: Renamer | None = None,
columns: Renamer | None = None,
axis: Axis | None = None,
copy: bool = True,
inplace: bool = False,
level: Level | None = None,
errors: IgnoreRaise = "ignore",
) -> DataFrame | None:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or function transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
If True then value of copy is ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame or None
DataFrame with the renamed axis labels or None if ``inplace=True``.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters:
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super()._rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@overload
def fillna(
self,
value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: Literal[False] = ...,
limit=...,
downcast=...,
) -> DataFrame:
...
@overload
def fillna(
self,
value,
method: FillnaOptions | None,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
method: FillnaOptions | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
*,
method: FillnaOptions | None,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
*,
axis: Axis | None,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value,
method: FillnaOptions | None,
*,
inplace: Literal[True],
limit=...,
downcast=...,
) -> None:
...
@overload
def fillna(
self,
value=...,
method: FillnaOptions | None = ...,
axis: Axis | None = ...,
inplace: bool = ...,
limit=...,
downcast=...,
) -> DataFrame | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"])
@doc(NDFrame.fillna, **_shared_doc_kwargs)
def fillna(
self,
value: object | ArrayLike | None = None,
method: FillnaOptions | None = None,
axis: Axis | None = None,
inplace: bool = False,
limit=None,
downcast=None,
) -> DataFrame | None:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
def pop(self, item: Hashable) -> Series:
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : label
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
return super().pop(item=item)
@doc(NDFrame.replace, **_shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=lib.no_default,
inplace: bool = False,
limit=None,
regex: bool = False,
method: str | lib.NoDefault = lib.no_default,
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
def _replace_columnwise(
self, mapping: dict[Hashable, tuple[Any, Any]], inplace: bool, regex
):
"""
Dispatch to Series.replace column-wise.
Parameters
----------
mapping : dict
of the form {col: (target, value)}
inplace : bool
regex : bool or same types as `to_replace` in DataFrame.replace
Returns
-------
DataFrame or None
"""
# Operate column-wise
res = self if inplace else self.copy()
ax = self.columns
for i in range(len(ax)):
if ax[i] in mapping:
ser = self.iloc[:, i]
target, value = mapping[ax[i]]
newobj = ser.replace(target, value, regex=regex)
res._iset_item(i, newobj)
if inplace:
return
return res.__finalize__(self)
@doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"])
def shift(
self,
periods=1,
freq: Frequency | None = None,
axis: Axis = 0,
fill_value=lib.no_default,
) -> DataFrame:
axis = self._get_axis_number(axis)
ncols = len(self.columns)
if (
axis == 1
and periods != 0
and freq is None
and fill_value is lib.no_default
and ncols > 0
):
# We will infer fill_value to match the closest column
# Use a column that we know is valid for our column's dtype GH#38434
label = self.columns[0]
if periods > 0:
result = self.iloc[:, :-periods]
for col in range(min(ncols, abs(periods))):
# TODO(EA2D): doing this in a loop unnecessary with 2D EAs
# Define filler inside loop so we get a copy
filler = self.iloc[:, 0].shift(len(self))
result.insert(0, label, filler, allow_duplicates=True)
else:
result = self.iloc[:, -periods:]
for col in range(min(ncols, abs(periods))):
# Define filler inside loop so we get a copy
filler = self.iloc[:, -1].shift(len(self))
result.insert(
len(result.columns), label, filler, allow_duplicates=True
)
result.columns = self.columns.copy()
return result
elif (
axis == 1
and periods != 0
and fill_value is not lib.no_default
and ncols > 0
):
arrays = self._mgr.arrays
if len(arrays) > 1 or (
# If we only have one block and we know that we can't
# keep the same dtype (i.e. the _can_hold_element check)
# then we can go through the reindex_indexer path
# (and avoid casting logic in the Block method).
# The exception to this (until 2.0) is datetimelike
# dtypes with integers, which cast.
not can_hold_element(arrays[0], fill_value)
# TODO(2.0): remove special case for integer-with-datetimelike
# once deprecation is enforced
and not (
lib.is_integer(fill_value) and needs_i8_conversion(arrays[0].dtype)
)
):
# GH#35488 we need to watch out for multi-block cases
# We only get here with fill_value not-lib.no_default
nper = abs(periods)
nper = min(nper, ncols)
if periods > 0:
indexer = np.array(
[-1] * nper + list(range(ncols - periods)), dtype=np.intp
)
else:
indexer = np.array(
list(range(nper, ncols)) + [-1] * nper, dtype=np.intp
)
mgr = self._mgr.reindex_indexer(
self.columns,
indexer,
axis=0,
fill_value=fill_value,
allow_dups=True,
)
res_df = self._constructor(mgr)
return res_df.__finalize__(self, method="shift")
return super().shift(
periods=periods, freq=freq, axis=axis, fill_value=fill_value
)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "keys"])
def set_index(
self,
keys,
drop: bool = True,
append: bool = False,
inplace: bool = False,
verify_integrity: bool = False,
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame or None
Changed row labels or None if ``inplace=True``.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: list[Hashable] = []
for col in keys:
if isinstance(col, (Index, Series, np.ndarray, list, abc.Iterator)):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError as err:
raise TypeError(
f"{err_msg}. Received column of type {type(col)}"
) from err
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names: list[Hashable] = []
if append:
names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: list[Hashable] = []
for col in keys:
if isinstance(col, MultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (Index, Series)):
# if Index then not MultiIndex (treated above)
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[Index, Series]"; expected "Index"
arrays.append(col) # type:ignore[arg-type]
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
# error: Argument 1 to "append" of "list" has incompatible type
# "Union[List[Any], ndarray]"; expected "Index"
arrays.append(col) # type: ignore[arg-type]
names.append(None)
elif isinstance(col, abc.Iterator):
# error: Argument 1 to "append" of "list" has incompatible type
# "List[Any]"; expected "Index"
arrays.append(list(col)) # type: ignore[arg-type]
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: Literal[False] = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] = None,
) -> DataFrame:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None,
drop: bool,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] = None,
) -> None:
...
@overload
def reset_index(
self,
*,
drop: bool,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] = None,
) -> None:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None,
*,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] = None,
) -> None:
...
@overload
def reset_index(
self,
*,
inplace: Literal[True],
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] = None,
) -> None:
...
@overload
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = ...,
drop: bool = ...,
inplace: bool = ...,
col_level: Hashable = ...,
col_fill: Hashable = ...,
allow_duplicates: bool | lib.NoDefault = ...,
names: Hashable | Sequence[Hashable] = None,
) -> DataFrame | None:
...
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"])
def reset_index(
self,
level: Hashable | Sequence[Hashable] | None = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Hashable = "",
allow_duplicates: bool | lib.NoDefault = lib.no_default,
names: Hashable | Sequence[Hashable] = None,
) -> DataFrame | None:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
allow_duplicates : bool, optional, default lib.no_default
Allow duplicate column labels to be created.
.. versionadded:: 1.5.0
names : int, str or 1-dimensional list, default None
Using the given string, rename the DataFrame column which contains the
index data. If the DataFrame has a MultiIndex, this has to be a list or
tuple with length equal to the number of levels.
.. versionadded:: 1.5.0
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
Using the `names` parameter, choose a name for the index column:
>>> df.reset_index(names=['classes', 'names'])
classes names speed species
max type
0 bird falcon 389.0 fly
1 bird parrot 24.0 fly
2 mammal lion 80.5 run
3 mammal monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
self._check_inplace_and_allows_duplicate_labels(inplace)
if inplace:
new_obj = self
else:
new_obj = self.copy()
if allow_duplicates is not lib.no_default:
allow_duplicates = validate_bool_kwarg(allow_duplicates, "allow_duplicates")
new_index = default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[tuple[Any, Any | None]]
default = "index" if "index" not in self else "level_0"
names = self.index._get_default_index_names(names, default)
if isinstance(self.index, MultiIndex):
to_insert = zip(self.index.levels, self.index.codes)
else:
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if level is not None and i not in level:
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = lev._values
if level_values.dtype == np.object_:
level_values = lib.maybe_convert_objects(level_values)
if lab is not None:
# if we have the codes, extract the values with a mask
level_values = algorithms.take(
level_values, lab, allow_fill=True, fill_value=lev._na_value
)
new_obj.insert(
0,
name,
level_values,
allow_duplicates=allow_duplicates,
)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isna(self) -> DataFrame:
result = self._constructor(self._mgr.isna(func=isna))
return result.__finalize__(self, method="isna")
@doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"])
def isnull(self) -> DataFrame:
"""
DataFrame.isnull is an alias for DataFrame.isna.
"""
return self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notna(self) -> DataFrame:
return ~self.isna()
@doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"])
def notnull(self) -> DataFrame:
"""
DataFrame.notnull is an alias for DataFrame.notna.
"""
return ~self.isna()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def dropna(
self,
axis: Axis = 0,
how: str | NoDefault = no_default,
thresh: int | NoDefault = no_default,
subset: IndexLabel = None,
inplace: bool = False,
):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values. Cannot be combined with how.
subset : column label or sequence of labels, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
Returns
-------
DataFrame or None
DataFrame with NA entries dropped from it or None if ``inplace=True``.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'toy'])
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
if (how is not no_default) and (thresh is not no_default):
raise TypeError(
"You cannot set both the how and thresh arguments at the same time."
)
if how is no_default:
how = "any"
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
# subset needs to be list
if not is_list_like(subset):
subset = [subset]
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(np.array(subset)[check].tolist())
agg_obj = self.take(indices, axis=agg_axis)
if thresh is not no_default:
count = agg_obj.count(axis=agg_axis)
mask = count >= thresh
elif how == "any":
# faster equivalent to 'agg_obj.count(agg_axis) == self.shape[agg_axis]'
mask = notna(agg_obj).all(axis=agg_axis, bool_only=False)
elif how == "all":
# faster equivalent to 'agg_obj.count(agg_axis) > 0'
mask = notna(agg_obj).any(axis=agg_axis, bool_only=False)
else:
raise ValueError(f"invalid how option: {how}")
if np.all(mask):
result = self.copy()
else:
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "subset"])
def drop_duplicates(
self,
subset: Hashable | Sequence[Hashable] | None = None,
keep: Literal["first"] | Literal["last"] | Literal[False] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> DataFrame | None:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame or None
DataFrame with duplicates removed or None if ``inplace=True``.
See Also
--------
DataFrame.value_counts: Count unique combinations of columns.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, it removes duplicate rows based on all columns.
>>> df.drop_duplicates()
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
To remove duplicates on specific column(s), use ``subset``.
>>> df.drop_duplicates(subset=['brand'])
brand style rating
0 Yum Yum cup 4.0
2 Indomie cup 3.5
To remove duplicates and keep last occurrences, use ``keep``.
>>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
brand style rating
1 Yum Yum cup 4.0
2 Indomie cup 3.5
4 Indomie pack 5.0
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
ignore_index = validate_bool_kwarg(ignore_index, "ignore_index")
duplicated = self.duplicated(subset, keep=keep)
result = self[-duplicated]
if ignore_index:
result.index = default_index(len(result))
if inplace:
self._update_inplace(result)
return None
else:
return result
def duplicated(
self,
subset: Hashable | Sequence[Hashable] | None = None,
keep: Literal["first"] | Literal["last"] | Literal[False] = "first",
) -> Series:
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
Boolean series for each duplicated rows.
See Also
--------
Index.duplicated : Equivalent method on index.
Series.duplicated : Equivalent method on Series.
Series.drop_duplicates : Remove duplicate values from Series.
DataFrame.drop_duplicates : Remove duplicate values from DataFrame.
Examples
--------
Consider dataset containing ramen rating.
>>> df = pd.DataFrame({
... 'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
... 'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
... 'rating': [4, 4, 3.5, 15, 5]
... })
>>> df
brand style rating
0 Yum Yum cup 4.0
1 Yum Yum cup 4.0
2 Indomie cup 3.5
3 Indomie pack 15.0
4 Indomie pack 5.0
By default, for each set of duplicated values, the first occurrence
is set on False and all others on True.
>>> df.duplicated()
0 False
1 True
2 False
3 False
4 False
dtype: bool
By using 'last', the last occurrence of each set of duplicated values
is set on False and all others on True.
>>> df.duplicated(keep='last')
0 True
1 False
2 False
3 False
4 False
dtype: bool
By setting ``keep`` on False, all duplicates are True.
>>> df.duplicated(keep=False)
0 True
1 True
2 False
3 False
4 False
dtype: bool
To find duplicates on specific column(s), use ``subset``.
>>> df.duplicated(subset=['brand'])
0 False
1 True
2 False
3 True
4 True
dtype: bool
"""
if self.empty:
return self._constructor_sliced(dtype=bool)
def f(vals) -> tuple[np.ndarray, int]:
labels, shape = algorithms.factorize(vals, size_hint=len(self))
return labels.astype("i8", copy=False), len(shape)
if subset is None:
# https://github.com/pandas-dev/pandas/issues/28770
# Incompatible types in assignment (expression has type "Index", variable
# has type "Sequence[Any]")
subset = self.columns # type: ignore[assignment]
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Sequence, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = set(subset) - set(self.columns)
if diff:
raise KeyError(Index(diff))
if len(subset) == 1 and self.columns.is_unique:
# GH#45236 This is faster than get_group_index below
result = self[subset[0]].duplicated(keep)
result.name = None
else:
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(
labels,
# error: Argument 1 to "tuple" has incompatible type "List[_T]";
# expected "Iterable[int]"
tuple(shape), # type: ignore[arg-type]
sort=False,
xnull=False,
)
result = self._constructor_sliced(duplicated(ids, keep), index=self.index)
return result.__finalize__(self, method="duplicated")
# ----------------------------------------------------------------------
# Sorting
# TODO: Just move the sort_values doc here.
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "by"])
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
# error: Signature of "sort_values" incompatible with supertype "NDFrame"
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
ascending = validate_ascending(ascending)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
# error: List comprehension has incompatible type List[Series];
# expected List[ndarray]
keys = [
Series(k, name=name) # type: ignore[misc]
for (k, name) in zip(keys, by)
]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
elif len(by):
# len(by) == 1
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
# error: Incompatible types in assignment (expression has type
# "Series", variable has type "ndarray")
k = Series(k, name=by) # type: ignore[assignment]
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
else:
return self.copy()
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.set_axis(
self._get_block_manager_axis(axis), default_index(len(indexer))
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
@overload
def sort_index(
self,
*,
axis: Axis = ...,
level: Level | None = ...,
ascending: bool | Sequence[bool] = ...,
inplace: Literal[True],
kind: SortKind = ...,
na_position: NaPosition = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
) -> None:
...
@overload
def sort_index(
self,
*,
axis: Axis = ...,
level: Level | None = ...,
ascending: bool | Sequence[bool] = ...,
inplace: Literal[False] = ...,
kind: SortKind = ...,
na_position: NaPosition = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
) -> DataFrame:
...
@overload
def sort_index(
self,
*,
axis: Axis = ...,
level: Level | None = ...,
ascending: bool | Sequence[bool] = ...,
inplace: bool = ...,
kind: SortKind = ...,
na_position: NaPosition = ...,
sort_remaining: bool = ...,
ignore_index: bool = ...,
key: IndexKeyFunc = ...,
) -> DataFrame | None:
...
# error: Signature of "sort_index" incompatible with supertype "NDFrame"
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def sort_index( # type: ignore[override]
self,
axis: Axis = 0,
level: Level | None = None,
ascending: bool | Sequence[bool] = True,
inplace: bool = False,
kind: SortKind = "quicksort",
na_position: NaPosition = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
key: IndexKeyFunc = None,
) -> DataFrame | None:
"""
Sort object by labels (along an axis).
Returns a new DataFrame sorted by label if `inplace` argument is
``False``, otherwise updates the original DataFrame and returns None.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool or list-like of bools, default True
Sort ascending vs. descending. When the index is a MultiIndex the
sort direction can be controlled for each level individually.
inplace : bool, default False
Whether to modify the DataFrame rather than creating a new one.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort'
Choice of sorting algorithm. See also :func:`numpy.sort` for more
information. `mergesort` and `stable` are the only stable algorithms. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
key : callable, optional
If not None, apply the key function to the index values
before sorting. This is similar to the `key` argument in the
builtin :meth:`sorted` function, with the notable difference that
this `key` function should be *vectorized*. It should expect an
``Index`` and return an ``Index`` of the same shape. For MultiIndex
inputs, the key is applied *per level*.
.. versionadded:: 1.1.0
Returns
-------
DataFrame or None
The original DataFrame sorted by the labels or None if ``inplace=True``.
See Also
--------
Series.sort_index : Sort Series by the index.
DataFrame.sort_values : Sort DataFrame by the value.
Series.sort_values : Sort Series by the value.
Examples
--------
>>> df = pd.DataFrame([1, 2, 3, 4, 5], index=[100, 29, 234, 1, 150],
... columns=['A'])
>>> df.sort_index()
A
1 4
29 2
100 1
150 5
234 3
By default, it sorts in ascending order, to sort in descending order,
use ``ascending=False``
>>> df.sort_index(ascending=False)
A
234 3
150 5
100 1
29 2
1 4
A key function can be specified which is applied to the index before
sorting. For a ``MultiIndex`` this is applied to each level separately.
>>> df = pd.DataFrame({"a": [1, 2, 3, 4]}, index=['A', 'b', 'C', 'd'])
>>> df.sort_index(key=lambda x: x.str.lower())
a
A 1
b 2
C 3
d 4
"""
return super().sort_index(
axis=axis,
level=level,
ascending=ascending,
inplace=inplace,
kind=kind,
na_position=na_position,
sort_remaining=sort_remaining,
ignore_index=ignore_index,
key=key,
)
def value_counts(
self,
subset: Sequence[Hashable] | None = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
):
"""
Return a Series containing counts of unique rows in the DataFrame.
.. versionadded:: 1.1.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
dropna : bool, default True
Don’t include counts of rows that contain NA values.
.. versionadded:: 1.3.0
Returns
-------
Series
See Also
--------
Series.value_counts: Equivalent method on Series.
Notes
-----
The returned Series will have a MultiIndex with one level per input
column. By default, rows that contain any NA values are omitted from
the result. By default, the resulting Series will be in descending
order so that the first element is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 4, 6],
... 'num_wings': [2, 0, 0, 0]},
... index=['falcon', 'dog', 'cat', 'ant'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
cat 4 0
ant 6 0
>>> df.value_counts()
num_legs num_wings
4 0 2
2 2 1
6 0 1
dtype: int64
>>> df.value_counts(sort=False)
num_legs num_wings
2 2 1
4 0 2
6 0 1
dtype: int64
>>> df.value_counts(ascending=True)
num_legs num_wings
2 2 1
6 0 1
4 0 2
dtype: int64
>>> df.value_counts(normalize=True)
num_legs num_wings
4 0 0.50
2 2 0.25
6 0 0.25
dtype: float64
With `dropna` set to `False` we can also count rows with NA values.
>>> df = pd.DataFrame({'first_name': ['John', 'Anne', 'John', 'Beth'],
... 'middle_name': ['Smith', pd.NA, pd.NA, 'Louise']})
>>> df
first_name middle_name
0 John Smith
1 Anne <NA>
2 John <NA>
3 Beth Louise
>>> df.value_counts()
first_name middle_name
Beth Louise 1
John Smith 1
dtype: int64
>>> df.value_counts(dropna=False)
first_name middle_name
Anne NaN 1
Beth Louise 1
John Smith 1
NaN 1
dtype: int64
"""
if subset is None:
subset = self.columns.tolist()
counts = self.groupby(subset, dropna=dropna).grouper.size()
if sort:
counts = counts.sort_values(ascending=ascending)
if normalize:
counts /= counts.sum()
# Force MultiIndex for single column
if len(subset) == 1:
counts.index = MultiIndex.from_arrays(
[counts.index], names=[counts.index.name]
)
return counts
def nlargest(self, n: int, columns: IndexLabel, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : prioritize the first occurrence(s)
- ``last`` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n: int, columns: IndexLabel, keep: str = "first") -> DataFrame:
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 337000,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 337000 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "population".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 337000 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Iceland 337000 17036 IS
Nauru 337000 182 NR
To order by the smallest values in column "population" and then "GDP", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
Nauru 337000 182 NR
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
@doc(
Series.swaplevel,
klass=_shared_doc_kwargs["klass"],
extra_params=dedent(
"""axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to swap levels on. 0 or 'index' for row-wise, 1 or
'columns' for column-wise."""
),
examples=dedent(
"""\
Examples
--------
>>> df = pd.DataFrame(
... {"Grade": ["A", "B", "A", "C"]},
... index=[
... ["Final exam", "Final exam", "Coursework", "Coursework"],
... ["History", "Geography", "History", "Geography"],
... ["January", "February", "March", "April"],
... ],
... )
>>> df
Grade
Final exam History January A
Geography February B
Coursework History March A
Geography April C
In the following example, we will swap the levels of the indices.
Here, we will swap the levels column-wise, but levels can be swapped row-wise
in a similar manner. Note that column-wise is the default behaviour.
By not supplying any arguments for i and j, we swap the last and second to
last indices.
>>> df.swaplevel()
Grade
Final exam January History A
February Geography B
Coursework March History A
April Geography C
By supplying one argument, we can choose which index to swap the last
index with. We can for example swap the first index with the last one as
follows.
>>> df.swaplevel(0)
Grade
January History Final exam A
February Geography Final exam B
March History Coursework A
April Geography Coursework C
We can also define explicitly which indices we want to swap by supplying values
for both i and j. Here, we for example swap the first and second indices.
>>> df.swaplevel(0, 1)
Grade
History Final exam January A
Geography Final exam February B
History Coursework March A
Geography Coursework April C"""
),
)
def swaplevel(self, i: Axis = -2, j: Axis = -1, axis: Axis = 0) -> DataFrame:
result = self.copy()
axis = self._get_axis_number(axis)
if not isinstance(result._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only swap levels on a hierarchical axis.")
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.swaplevel(i, j)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order: Sequence[Axis], axis: Axis = 0) -> DataFrame:
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : {0 or 'index', 1 or 'columns'}, default 0
Where to reorder levels.
Returns
-------
DataFrame
Examples
--------
>>> data = {
... "class": ["Mammals", "Mammals", "Reptiles"],
... "diet": ["Omnivore", "Carnivore", "Carnivore"],
... "species": ["Humans", "Dogs", "Snakes"],
... }
>>> df = pd.DataFrame(data, columns=["class", "diet", "species"])
>>> df = df.set_index(["class", "diet"])
>>> df
species
class diet
Mammals Omnivore Humans
Carnivore Dogs
Reptiles Carnivore Snakes
Let's reorder the levels of the index:
>>> df.reorder_levels(["diet", "class"])
species
diet class
Omnivore Mammals Humans
Carnivore Mammals Dogs
Reptiles Snakes
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), MultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
assert isinstance(result.index, MultiIndex)
result.index = result.index.reorder_levels(order)
else:
assert isinstance(result.columns, MultiIndex)
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic Methods
def _cmp_method(self, other, op):
axis = 1 # only relevant for Series other case
self, other = ops.align_method_FRAME(self, other, axis, flex=False, level=None)
# See GH#4537 for discussion of scalar op behavior
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
def _arith_method(self, other, op):
if ops.should_reindex_frame_op(self, other, op, 1, 1, None, None):
return ops.frame_arith_method_with_reindex(self, other, op)
axis = 1 # only relevant for Series other case
other = ops.maybe_prepare_scalar_for_op(other, (self.shape[axis],))
self, other = ops.align_method_FRAME(self, other, axis, flex=True, level=None)
new_data = self._dispatch_frame_op(other, op, axis=axis)
return self._construct_result(new_data)
_logical_method = _arith_method
def _dispatch_frame_op(self, right, func: Callable, axis: int | None = None):
"""
Evaluate the frame operation func(left, right) by evaluating
column-by-column, dispatching to the Series implementation.
Parameters
----------
right : scalar, Series, or DataFrame
func : arithmetic or comparison operator
axis : {None, 0, 1}
Returns
-------
DataFrame
"""
# Get the appropriate array-op to apply to each column/block's values.
array_op = ops.get_array_op(func)
right = lib.item_from_zerodim(right)
if not is_list_like(right):
# i.e. scalar, faster than checking np.ndim(right) == 0
with np.errstate(all="ignore"):
bm = self._mgr.apply(array_op, right=right)
return self._constructor(bm)
elif isinstance(right, DataFrame):
assert self.index.equals(right.index)
assert self.columns.equals(right.columns)
# TODO: The previous assertion `assert right._indexed_same(self)`
# fails in cases with empty columns reached via
# _frame_arith_method_with_reindex
# TODO operate_blockwise expects a manager of the same type
with np.errstate(all="ignore"):
bm = self._mgr.operate_blockwise(
# error: Argument 1 to "operate_blockwise" of "ArrayManager" has
# incompatible type "Union[ArrayManager, BlockManager]"; expected
# "ArrayManager"
# error: Argument 1 to "operate_blockwise" of "BlockManager" has
# incompatible type "Union[ArrayManager, BlockManager]"; expected
# "BlockManager"
right._mgr, # type: ignore[arg-type]
array_op,
)
return self._constructor(bm)
elif isinstance(right, Series) and axis == 1:
# axis=1 means we want to operate row-by-row
assert right.index.equals(self.columns)
right = right._values
# maybe_align_as_frame ensures we do not have an ndarray here
assert not isinstance(right, np.ndarray)
with np.errstate(all="ignore"):
arrays = [
array_op(_left, _right)
for _left, _right in zip(self._iter_column_arrays(), right)
]
elif isinstance(right, Series):
assert right.index.equals(self.index) # Handle other cases later
right = right._values
with np.errstate(all="ignore"):
arrays = [array_op(left, right) for left in self._iter_column_arrays()]
else:
# Remaining cases have less-obvious dispatch rules
raise NotImplementedError(right)
return type(self)._from_arrays(
arrays, self.columns, self.index, verify_integrity=False
)
def _combine_frame(self, other: DataFrame, func, fill_value=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
new_data = self._dispatch_frame_op(other, _arith_op)
return new_data
def _construct_result(self, result) -> DataFrame:
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
out.index = self.index
return out
def __divmod__(self, other) -> tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = self // other
mod = self - div * other
return div, mod
def __rdivmod__(self, other) -> tuple[DataFrame, DataFrame]:
# Naive implementation, room for optimization
div = other // self
mod = other - div * self
return div, mod
# ----------------------------------------------------------------------
# Combination-Related
@doc(
_shared_docs["compare"],
"""
Returns
-------
DataFrame
DataFrame that shows the differences stacked side by side.
The resulting index will be a MultiIndex with 'self' and 'other'
stacked alternately at the inner level.
Raises
------
ValueError
When the two DataFrames don't have identical labels or shape.
See Also
--------
Series.compare : Compare with another Series and show differences.
DataFrame.equals : Test whether two objects contain the same elements.
Notes
-----
Matching NaNs will not appear as a difference.
Can only compare identically-labeled
(i.e. same shape, identical row and column labels) DataFrames
Examples
--------
>>> df = pd.DataFrame(
... {{
... "col1": ["a", "a", "b", "b", "a"],
... "col2": [1.0, 2.0, 3.0, np.nan, 5.0],
... "col3": [1.0, 2.0, 3.0, 4.0, 5.0]
... }},
... columns=["col1", "col2", "col3"],
... )
>>> df
col1 col2 col3
0 a 1.0 1.0
1 a 2.0 2.0
2 b 3.0 3.0
3 b NaN 4.0
4 a 5.0 5.0
>>> df2 = df.copy()
>>> df2.loc[0, 'col1'] = 'c'
>>> df2.loc[2, 'col3'] = 4.0
>>> df2
col1 col2 col3
0 c 1.0 1.0
1 a 2.0 2.0
2 b 3.0 4.0
3 b NaN 4.0
4 a 5.0 5.0
Align the differences on columns
>>> df.compare(df2)
col1 col3
self other self other
0 a c NaN NaN
2 NaN NaN 3.0 4.0
Stack the differences on rows
>>> df.compare(df2, align_axis=0)
col1 col3
0 self a NaN
other c NaN
2 self NaN 3.0
other NaN 4.0
Keep the equal values
>>> df.compare(df2, keep_equal=True)
col1 col3
self other self other
0 a c 1.0 1.0
2 b b 3.0 4.0
Keep all original rows and columns
>>> df.compare(df2, keep_shape=True)
col1 col2 col3
self other self other self other
0 a c NaN NaN NaN NaN
1 NaN NaN NaN NaN NaN NaN
2 NaN NaN NaN NaN 3.0 4.0
3 NaN NaN NaN NaN NaN NaN
4 NaN NaN NaN NaN NaN NaN
Keep all original rows and columns and also all original values
>>> df.compare(df2, keep_shape=True, keep_equal=True)
col1 col2 col3
self other self other self other
0 a c 1.0 1.0 1.0 1.0
1 a a 2.0 2.0 2.0 2.0
2 b b 3.0 3.0 3.0 4.0
3 b b NaN NaN 4.0 4.0
4 a a 5.0 5.0 5.0 5.0
""",
klass=_shared_doc_kwargs["klass"],
)
def compare(
self,
other: DataFrame,
align_axis: Axis = 1,
keep_shape: bool = False,
keep_equal: bool = False,
) -> DataFrame:
return super().compare(
other=other,
align_axis=align_axis,
keep_shape=keep_shape,
keep_equal=keep_equal,
)
def combine(
self, other: DataFrame, func, fill_value=None, overwrite: bool = True
) -> DataFrame:
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
series = series.astype(new_dtype, copy=False)
otherSeries = otherSeries.astype(new_dtype, copy=False)
arr = func(series, otherSeries)
if isinstance(new_dtype, np.dtype):
# if new_dtype is an EA Dtype, then `func` is expected to return
# the correct dtype without any additional casting
arr = maybe_downcast_to_dtype(arr, new_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: DataFrame) -> DataFrame:
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two. The resulting
dataframe contains the 'first' dataframe values and overrides the
second one values where both first.loc[index, col] and
second.loc[index, col] are not missing values, upon calling
first.combine_first(second).
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
The result of combining the provided DataFrame with the other object.
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def combiner(x, y):
mask = extract_array(isna(x))
x_values = extract_array(x, extract_numpy=True)
y_values = extract_array(y, extract_numpy=True)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
combined = self.combine(other, combiner, overwrite=False)
dtypes = {
col: find_common_type([self.dtypes[col], other.dtypes[col]])
for col in self.columns.intersection(other.columns)
if not is_dtype_equal(combined.dtypes[col], self.dtypes[col])
}
if dtypes:
combined = combined.astype(dtypes)
return combined
def update(
self,
other,
join: str = "left",
overwrite: bool = True,
filter_func=None,
errors: str = "ignore",
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-column(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, its name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
We can also choose to include NA in group keys or not by setting
`dropna` parameter, the default setting is `True`.
>>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by=["b"]).sum()
a c
b
1.0 2 3
2.0 2 5
>>> df.groupby(by=["b"], dropna=False).sum()
a c
b
1.0 2 3
2.0 2 5
NaN 1 4
>>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
>>> df = pd.DataFrame(l, columns=["a", "b", "c"])
>>> df.groupby(by="a").sum()
b c
a
a 13.0 13.0
b 12.3 123.0
>>> df.groupby(by="a", dropna=False).sum()
b c
a
a 13.0 13.0
b 12.3 123.0
NaN 12.3 33.0
When using ``.apply()``, use ``group_keys`` to include or exclude the group keys.
The ``group_keys`` argument defaults to ``True`` (include).
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df.groupby("Animal", group_keys=True).apply(lambda x: x)
Animal Max Speed
Animal
Falcon 0 Falcon 380.0
1 Falcon 370.0
Parrot 2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby("Animal", group_keys=False).apply(lambda x: x)
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis: Axis = 0,
level: Level | None = None,
as_index: bool = True,
sort: bool = True,
group_keys: bool | lib.NoDefault = no_default,
squeeze: bool | lib.NoDefault = no_default,
observed: bool = False,
dropna: bool = True,
) -> DataFrameGroupBy:
from pandas.core.groupby.generic import DataFrameGroupBy
if squeeze is not no_default:
warnings.warn(
(
"The `squeeze` parameter is deprecated and "
"will be removed in a future version."
),
FutureWarning,
stacklevel=find_stack_level(),
)
else:
squeeze = False
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
dropna=dropna,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object or a list of str, optional
Column to use to make new frame's index. If None, uses
existing index.
.. versionchanged:: 1.1.0
Also accept list of index names.
columns : str or object or a list of str
Column to use to make new frame's columns.
.. versionchanged:: 1.1.0
Also accept list of columns names.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Reference :ref:`the user guide <reshaping.pivot>` for more examples.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
You could also assign a list of column names or a list of index names.
>>> df = pd.DataFrame({
... "lev1": [1, 1, 1, 2, 2, 2],
... "lev2": [1, 1, 2, 1, 1, 2],
... "lev3": [1, 2, 1, 2, 1, 2],
... "lev4": [1, 2, 3, 4, 5, 6],
... "values": [0, 1, 2, 3, 4, 5]})
>>> df
lev1 lev2 lev3 lev4 values
0 1 1 1 1 0
1 1 1 2 2 1
2 1 2 1 3 2
3 2 1 2 4 3
4 2 1 1 5 4
5 2 2 2 6 5
>>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
lev2 1 2
lev3 1 2 1 2
lev1
1 0.0 1.0 2.0 NaN
2 4.0 3.0 NaN 5.0
>>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
lev3 1 2
lev1 lev2
1 1 0.0 1.0
2 2.0 NaN
2 1 4.0 3.0
2 NaN 5.0
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> DataFrame:
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with (in the resulting pivot table,
after aggregation).
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
sort : bool, default True
Specifies if the result should be sorted.
.. versionadded:: 1.3.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
DataFrame.melt: Unpivot a DataFrame from wide to long format,
optionally leaving identifiers set.
wide_to_long : Wide panel to long format. Less flexible but more
user-friendly than melt.
Notes
-----
Reference :ref:`the user guide <reshaping.pivot>` for more examples.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9 7.500000 6
small 5.500000 9 8.500000 8
foo large 2.000000 5 4.500000 4
small 2.333333 6 4.333333 2
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
sort=True,
) -> DataFrame:
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
sort=sort,
)
def stack(self, level: Level = -1, dropna: bool = True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Reference :ref:`the user guide <reshaping.stacking>` for more examples.
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import (
stack,
stack_multiple,
)
if isinstance(level, (tuple, list)):
result = stack_multiple(self, level, dropna=dropna)
else:
result = stack(self, level, dropna=dropna)
return result.__finalize__(self, method="stack")
def explode(
self,
column: IndexLabel,
ignore_index: bool = False,
) -> DataFrame:
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : IndexLabel
Column(s) to explode.
For multiple columns, specify a non-empty list with each element
be str or tuple, and all specified columns their list-like data
on same row of the frame must have matching length.
.. versionadded:: 1.3.0
Multi-column explode
ignore_index : bool, default False
If True, the resulting index will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.1.0
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
* If columns of the frame are not unique.
* If specified columns to explode is empty list.
* If specified columns to explode have not matching count of
elements rowwise in the frame.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples, sets,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged, and empty list-likes will
result in a np.nan for that row. In addition, the ordering of rows in the
output will be non-deterministic when exploding sets.
Reference :ref:`the user guide <reshaping.explode>` for more examples.
Examples
--------
>>> df = pd.DataFrame({'A': [[0, 1, 2], 'foo', [], [3, 4]],
... 'B': 1,
... 'C': [['a', 'b', 'c'], np.nan, [], ['d', 'e']]})
>>> df
A B C
0 [0, 1, 2] 1 [a, b, c]
1 foo 1 NaN
2 [] 1 []
3 [3, 4] 1 [d, e]
Single-column explode.
>>> df.explode('A')
A B C
0 0 1 [a, b, c]
0 1 1 [a, b, c]
0 2 1 [a, b, c]
1 foo 1 NaN
2 NaN 1 []
3 3 1 [d, e]
3 4 1 [d, e]
Multi-column explode.
>>> df.explode(list('AC'))
A B C
0 0 1 a
0 1 1 b
0 2 1 c
1 foo 1 NaN
2 NaN 1 NaN
3 3 1 d
3 4 1 e
"""
if not self.columns.is_unique:
raise ValueError("columns must be unique")
columns: list[Hashable]
if is_scalar(column) or isinstance(column, tuple):
columns = [column]
elif isinstance(column, list) and all(
map(lambda c: is_scalar(c) or isinstance(c, tuple), column)
):
if not column:
raise ValueError("column must be nonempty")
if len(column) > len(set(column)):
raise ValueError("column must be unique")
columns = column
else:
raise ValueError("column must be a scalar, tuple, or list thereof")
df = self.reset_index(drop=True)
if len(columns) == 1:
result = df[columns[0]].explode()
else:
mylen = lambda x: len(x) if is_list_like(x) else -1
counts0 = self[columns[0]].apply(mylen)
for c in columns[1:]:
if not all(counts0 == self[c].apply(mylen)):
raise ValueError("columns must have matching element counts")
result = DataFrame({c: df[c].explode() for c in columns})
result = df.drop(columns, axis=1).join(result)
if ignore_index:
result.index = default_index(len(result))
else:
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result.__finalize__(self, method="explode")
def unstack(self, level: Level = -1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Notes
-----
Reference :ref:`the user guide <reshaping.stacking>` for more examples.
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
result = unstack(self, level, fill_value)
return result.__finalize__(self, method="unstack")
@Appender(_shared_docs["melt"] % {"caller": "df.melt(", "other": "melt"})
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level: Level | None = None,
ignore_index: bool = True,
) -> DataFrame:
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
ignore_index=ignore_index,
).__finalize__(self, method="melt")
# ----------------------------------------------------------------------
# Time series-related
@doc(
Series.diff,
klass="DataFrame",
extra_params="axis : {0 or 'index', 1 or 'columns'}, default 0\n "
"Take difference over rows (0) or columns (1).\n",
other_klass="Series",
examples=dedent(
"""
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0 0
1 NaN -1 3
2 NaN -1 7
3 NaN -1 13
4 NaN 0 20
5 NaN 2 28
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
Overflow in input dtype
>>> df = pd.DataFrame({'a': [1, 0]}, dtype=np.uint8)
>>> df.diff()
a
0 NaN
1 255.0"""
),
)
def diff(self, periods: int = 1, axis: Axis = 0) -> DataFrame:
if not lib.is_integer(periods):
if not (
is_float(periods)
# error: "int" has no attribute "is_integer"
and periods.is_integer() # type: ignore[attr-defined]
):
raise ValueError("periods must be an integer")
periods = int(periods)
axis = self._get_axis_number(axis)
if axis == 1 and periods != 0:
return self - self.shift(periods, axis=axis)
new_data = self._mgr.diff(n=periods, axis=axis)
return self._constructor(new_data).__finalize__(self, "diff")
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: IndexLabel,
ndim: int,
subset: DataFrame | Series | None = None,
) -> DataFrame | Series:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.ExponentialMovingWindow : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
sum 12.0 NaN
min 1.0 2.0
max NaN 8.0
Aggregate different functions over the columns and rename the index of the resulting
DataFrame.
>>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
A B C
x 7.0 NaN NaN
y NaN 2.0 NaN
z NaN NaN 6.0
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@doc(
_shared_docs["aggregate"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
)
def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs):
from pandas.core.apply import frame_apply
axis = self._get_axis_number(axis)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs)
result = op.agg()
if relabeling:
# This is to keep the order to columns occurrence unchanged, and also
# keep the order of new columns occurrence unchanged
# For the return values of reconstruct_func, if relabeling is
# False, columns and order will be None.
assert columns is not None
assert order is not None
result_in_dict = relabel_result(result, func, columns, order)
result = DataFrame(result_in_dict, index=columns)
return result
agg = aggregate
# error: Signature of "any" incompatible with supertype "NDFrame" [override]
@overload # type: ignore[override]
def any(
self,
*,
axis: Axis = ...,
bool_only: bool | None = ...,
skipna: bool = ...,
level: None = ...,
**kwargs,
) -> Series:
...
@overload
def any(
self,
*,
axis: Axis = ...,
bool_only: bool | None = ...,
skipna: bool = ...,
level: Level,
**kwargs,
) -> DataFrame | Series:
...
@doc(NDFrame.any, **_shared_doc_kwargs)
def any(
self,
axis: Axis = 0,
bool_only: bool | None = None,
skipna: bool = True,
level: Level | None = None,
**kwargs,
) -> DataFrame | Series:
...
@doc(
_shared_docs["transform"],
klass=_shared_doc_kwargs["klass"],
axis=_shared_doc_kwargs["axis"],
)
def transform(
self, func: AggFuncType, axis: Axis = 0, *args, **kwargs
) -> DataFrame:
from pandas.core.apply import frame_apply
op = frame_apply(self, func=func, axis=axis, args=args, kwargs=kwargs)
result = op.transform()
assert isinstance(result, DataFrame)
return result
def apply(
self,
func: AggFuncType,
axis: Axis = 0,
raw: bool = False,
result_type=None,
args=(),
**kwargs,
):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwargs
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing ``result_type='expand'`` will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwargs=kwargs,
)
return op.apply().__finalize__(self, method="apply")
def applymap(
self, func: PythonFuncType, na_action: str | None = None, **kwargs
) -> DataFrame:
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
na_action : {None, 'ignore'}, default None
If ‘ignore’, propagate NaN values, without passing them to func.
.. versionadded:: 1.2
**kwargs
Additional keyword arguments to pass as keywords arguments to
`func`.
.. versionadded:: 1.3.0
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Like Series.map, NA values can be ignored:
>>> df_copy = df.copy()
>>> df_copy.iloc[0, 0] = pd.NA
>>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
0 1
0 NaN 4
1 5.0 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
if na_action not in {"ignore", None}:
raise ValueError(
f"na_action must be 'ignore' or None. Got {repr(na_action)}"
)
ignore_na = na_action == "ignore"
func = functools.partial(func, **kwargs)
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func, ignore_na=ignore_na)
return lib.map_infer(x.astype(object)._values, func, ignore_na=ignore_na)
return self.apply(infer).__finalize__(self, "applymap")
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
"""
Append rows of `other` to the end of caller, returning a new object.
.. deprecated:: 1.4.0
Use :func:`concat` instead. For further details see
:ref:`whatsnew_140.deprecations.frame_series_append`
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
A new DataFrame consisting of the rows of caller and the rows of `other`.
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'), index=['x', 'y'])
>>> df
A B
x 1 2
y 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'), index=['x', 'y'])
>>> df.append(df2)
A B
x 1 2
y 3 4
x 5 6
y 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
warnings.warn(
"The frame.append method is deprecated "
"and will be removed from pandas in a future version. "
"Use pandas.concat instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return self._append(other, ignore_index, verify_integrity, sort)
def _append(
self,
other,
ignore_index: bool = False,
verify_integrity: bool = False,
sort: bool = False,
) -> DataFrame:
combined_columns = None
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
if not ignore_index:
raise TypeError("Can only append a dict if ignore_index=True")
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
combined_columns = self.columns.append(idx_diff)
row_df = other.to_frame().T
# infer_objects is needed for
# test_append_empty_frame_to_series_with_dateutil_tz
other = row_df.infer_objects().rename_axis(index.names, copy=False)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if self.index.name is not None and not ignore_index:
other.index.name = self.index.name
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
result = concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
if (
combined_columns is not None
and not sort
and not combined_columns.equals(result.columns)
):
# TODO: reindexing here is a kludge bc union_indexes does not
# pass sort to index.union, xref #43375
# combined_columns.equals check is necessary for preserving dtype
# in test_crosstab_normalize
result = result.reindex(combined_columns, axis=1)
return result.__finalize__(self, method="append")
def join(
self,
other: DataFrame | Series,
on: IndexLabel | None = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
validate: str | None = None,
) -> DataFrame:
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
* cross: creates the cartesian product from both frames, preserves the order
of the left keys.
.. versionadded:: 1.2.0
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
validate : str, optional
If specified, checks if join is of specified type.
* "one_to_one" or "1:1": check if join keys are unique in both left
and right datasets.
* "one_to_many" or "1:m": check if join keys are unique in left dataset.
* "many_to_one" or "m:1": check if join keys are unique in right dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 1.5.0
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-column(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
Using non-unique key values shows how they are matched.
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K1', 'K3', 'K0', 'K1'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K1 A2
3 K3 A3
4 K0 A4
5 K1 A5
>>> df.join(other.set_index('key'), on='key', validate='m:1')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K1 A2 B1
3 K3 A3 NaN
4 K0 A4 B0
5 K1 A5 B1
"""
return self._join_compat(
other,
on=on,
how=how,
lsuffix=lsuffix,
rsuffix=rsuffix,
sort=sort,
validate=validate,
)
def _join_compat(
self,
other: DataFrame | Series,
on: IndexLabel | None = None,
how: str = "left",
lsuffix: str = "",
rsuffix: str = "",
sort: bool = False,
validate: str | None = None,
):
from pandas.core.reshape.concat import concat
from pandas.core.reshape.merge import merge
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
if how == "cross":
return merge(
self,
other,
how=how,
on=on,
suffixes=(lsuffix, rsuffix),
sort=sort,
validate=validate,
)
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
validate=validate,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
if rsuffix or lsuffix:
raise ValueError(
"Suffixes not supported when joining multiple DataFrames"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined,
frame,
how=how,
left_index=True,
right_index=True,
validate=validate,
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right: DataFrame | Series,
how: str = "inner",
on: IndexLabel | None = None,
left_on: IndexLabel | None = None,
right_on: IndexLabel | None = None,
left_index: bool = False,
right_index: bool = False,
sort: bool = False,
suffixes: Suffixes = ("_x", "_y"),
copy: bool = True,
indicator: bool = False,
validate: str | None = None,
) -> DataFrame:
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(
self, decimals: int | dict[IndexLabel, int] | Series = 0, *args, **kwargs
) -> DataFrame:
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df: DataFrame, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(ser: Series, decimals: int):
if is_integer_dtype(ser.dtype) or is_float_dtype(ser.dtype):
return ser.round(decimals)
return ser
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series) and not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
if is_dict_like(decimals) and not all(
is_integer(value) for _, value in decimals.items()
):
raise TypeError("Values in decimals must be integers")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
).__finalize__(self, method="round")
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(
self,
method: str | Callable[[np.ndarray, np.ndarray], float] = "pearson",
min_periods: int = 1,
numeric_only: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
numeric_only : bool, default True
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. deprecated:: 1.5.0
The default value of ``numeric_only`` will be ``False`` in a future
version of pandas.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith : Compute pairwise correlation with another
DataFrame or Series.
Series.corr : Compute the correlation between two Series.
Notes
-----
Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations.
* `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_
* `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_
* `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
>>> df = pd.DataFrame([(1, 1), (2, np.nan), (np.nan, 3), (4, 4)],
... columns=['dogs', 'cats'])
>>> df.corr(min_periods=3)
dogs cats
dogs 1.0 NaN
cats NaN 1.0
""" # noqa:E501
numeric_only_bool = com.resolve_numeric_only(numeric_only)
data = self._get_numeric_data() if numeric_only_bool else self
if numeric_only is lib.no_default and len(data.columns) < len(self.columns):
com.deprecate_numeric_only_default(type(self), "corr")
cols = data.columns
idx = cols.copy()
mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False)
if method == "pearson":
correl = libalgos.nancorr(mat, minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(mat, minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(
self,
min_periods: int | None = None,
ddof: int | None = 1,
numeric_only: bool | lib.NoDefault = lib.no_default,
) -> DataFrame:
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof : int, default 1
Delta degrees of freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
.. versionadded:: 1.1.0
numeric_only : bool, default True
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. deprecated:: 1.5.0
The default value of ``numeric_only`` will be ``False`` in a future
version of pandas.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.ExponentialMovingWindow.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<https://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_only_bool = com.resolve_numeric_only(numeric_only)
data = self._get_numeric_data() if numeric_only_bool else self
if numeric_only is lib.no_default and len(data.columns) < len(self.columns):
com.deprecate_numeric_only_default(type(self), "cov")
cols = data.columns
idx = cols.copy()
mat = data.to_numpy(dtype=float, na_value=np.nan, copy=False)
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
base_cov = np.empty((mat.shape[1], mat.shape[1]))
base_cov.fill(np.nan)
else:
base_cov = np.cov(mat.T, ddof=ddof)
base_cov = base_cov.reshape((len(cols), len(cols)))
else:
base_cov = libalgos.nancorr(mat, cov=True, minp=min_periods)
return self._constructor(base_cov, index=idx, columns=cols)
def corrwith(
self,
other,
axis: Axis = 0,
drop=False,
method="pearson",
numeric_only: bool | lib.NoDefault = lib.no_default,
) -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
numeric_only : bool, default True
Include only `float`, `int` or `boolean` data.
.. versionadded:: 1.5.0
.. deprecated:: 1.5.0
The default value of ``numeric_only`` will be ``False`` in a future
version of pandas.
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr : Compute pairwise correlation of columns.
Examples
--------
>>> index = ["a", "b", "c", "d", "e"]
>>> columns = ["one", "two", "three", "four"]
>>> df1 = pd.DataFrame(np.arange(20).reshape(5, 4), index=index, columns=columns)
>>> df2 = pd.DataFrame(np.arange(16).reshape(4, 4), index=index[:4], columns=columns)
>>> df1.corrwith(df2)
one 1.0
two 1.0
three 1.0
four 1.0
dtype: float64
>>> df2.corrwith(df1, axis=1)
a 1.0
b 1.0
c 1.0
d 1.0
e NaN
dtype: float64
""" # noqa:E501
axis = self._get_axis_number(axis)
numeric_only_bool = com.resolve_numeric_only(numeric_only)
this = self._get_numeric_data() if numeric_only_bool else self
if numeric_only is lib.no_default and len(this.columns) < len(self.columns):
com.deprecate_numeric_only_default(type(self), "corrwith")
# GH46174: when other is a Series object and axis=0, we achieve a speedup over
# passing .corr() to .apply() by taking the columns as ndarrays and iterating
# over the transposition row-wise. Then we delegate the correlation coefficient
# computation and null-masking to np.corrcoef and np.isnan respectively,
# which are much faster. We exploit the fact that the Spearman correlation
# of two vectors is equal to the Pearson correlation of their ranks to use
# substantially the same method for Pearson and Spearman,
# just with intermediate argsorts on the latter.
if isinstance(other, Series):
if axis == 0 and method in ["pearson", "spearman"]:
corrs = {}
if numeric_only:
cols = self.select_dtypes(include=np.number).columns
ndf = self[cols].values.transpose()
else:
cols = self.columns
ndf = self.values.transpose()
k = other.values
if method == "pearson":
for i, r in enumerate(ndf):
nonnull_mask = ~np.isnan(r) & ~np.isnan(k)
corrs[cols[i]] = np.corrcoef(r[nonnull_mask], k[nonnull_mask])[
0, 1
]
else:
for i, r in enumerate(ndf):
nonnull_mask = ~np.isnan(r) & ~np.isnan(k)
corrs[cols[i]] = np.corrcoef(
r[nonnull_mask].argsort().argsort(),
k[nonnull_mask].argsort().argsort(),
)[0, 1]
return Series(corrs)
else:
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = self._constructor_sliced(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl._append(
Series([np.nan] * len(idx_diff), index=idx_diff)
)
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(
self, axis: Axis = 0, level: Level | None = None, numeric_only: bool = False
):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each row.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.value_counts: Count unique combinations of columns.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
"""
axis = self._get_axis_number(axis)
if level is not None:
warnings.warn(
"Using the level keyword in DataFrame and Series aggregations is "
"deprecated and will be removed in a future version. Use groupby "
"instead. df.count(level=1) should use df.groupby(level=1).count().",
FutureWarning,
stacklevel=find_stack_level(),
)
res = self._count_level(level, axis=axis, numeric_only=numeric_only)
return res.__finalize__(self, method="count")
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = self._constructor_sliced(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._mgr.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = self._constructor_sliced(
counts, index=frame._get_agg_axis(axis)
)
return result.astype("int64").__finalize__(self, method="count")
def _count_level(self, level: Level, axis: int = 0, numeric_only: bool = False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
# Mask NaNs: Mask rows or columns where the index level is NaN, and all
# values in the DataFrame that are NaN
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
values_mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
values_mask = notna(frame.values)
index_mask = notna(count_axis.get_level_values(level=level))
if axis == 1:
mask = index_mask & values_mask
else:
mask = index_mask.reshape(-1, 1) & values_mask
if isinstance(level, int):
level_number = level
else:
level_number = count_axis._get_level_number(level)
level_name = count_axis._names[level_number]
level_index = count_axis.levels[level_number]._rename(name=level_name)
level_codes = ensure_platform_int(count_axis.codes[level_number])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=axis)
if axis == 1:
result = self._constructor(counts, index=agg_axis, columns=level_index)
else:
result = self._constructor(counts, index=level_index, columns=agg_axis)
return result
def _reduce(
self,
op,
name: str,
*,
axis: Axis = 0,
skipna: bool = True,
numeric_only: bool | None = None,
filter_type=None,
**kwds,
):
assert filter_type is None or filter_type == "bool", filter_type
out_dtype = "bool" if filter_type == "bool" else None
if numeric_only is None and name in ["mean", "median"]:
own_dtypes = [arr.dtype for arr in self._mgr.arrays]
dtype_is_dt = np.array(
[is_datetime64_any_dtype(dtype) for dtype in own_dtypes],
dtype=bool,
)
if dtype_is_dt.any():
warnings.warn(
"DataFrame.mean and DataFrame.median with numeric_only=None "
"will include datetime64 and datetime64tz columns in a "
"future version.",
FutureWarning,
stacklevel=find_stack_level(),
)
# Non-copy equivalent to
# dt64_cols = self.dtypes.apply(is_datetime64_any_dtype)
# cols = self.columns[~dt64_cols]
# self = self[cols]
predicate = lambda x: not is_datetime64_any_dtype(x.dtype)
mgr = self._mgr._get_data_subset(predicate)
self = type(self)(mgr)
# TODO: Make other agg func handle axis=None properly GH#21597
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
assert axis in [0, 1]
def func(values: np.ndarray):
# We only use this in the case that operates on self.values
return op(values, axis=axis, skipna=skipna, **kwds)
def blk_func(values, axis=1):
if isinstance(values, ExtensionArray):
if not is_1d_only_ea_dtype(values.dtype) and not isinstance(
self._mgr, ArrayManager
):
return values._reduce(name, axis=1, skipna=skipna, **kwds)
return values._reduce(name, skipna=skipna, **kwds)
else:
return op(values, axis=axis, skipna=skipna, **kwds)
def _get_data() -> DataFrame:
if filter_type is None:
data = self._get_numeric_data()
else:
# GH#25101, GH#24434
assert filter_type == "bool"
data = self._get_bool_data()
return data
numeric_only_bool = com.resolve_numeric_only(numeric_only)
if numeric_only is not None or axis == 0:
# For numeric_only non-None and axis non-None, we know
# which blocks to use and no try/except is needed.
# For numeric_only=None only the case with axis==0 and no object
# dtypes are unambiguous can be handled with BlockManager.reduce
# Case with EAs see GH#35881
df = self
if numeric_only_bool:
df = _get_data()
if axis == 1:
df = df.T
axis = 0
ignore_failures = numeric_only is None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager.reduce
res, _ = df._mgr.reduce(blk_func, ignore_failures=ignore_failures)
out = df._constructor(res).iloc[0]
if out_dtype is not None:
out = out.astype(out_dtype)
if axis == 0 and len(self) == 0 and name in ["sum", "prod"]:
# Even if we are object dtype, follow numpy and return
# float64, see test_apply_funcs_over_empty
out = out.astype(np.float64)
if numeric_only is None and out.shape[0] != df.shape[1]:
# columns have been dropped GH#41480
com.deprecate_numeric_only_default(
type(self), name, deprecate_none=True
)
return out
assert numeric_only is None
data = self
values = data.values
try:
result = func(values)
except TypeError:
# e.g. in nanops trying to convert strs to float
data = _get_data()
labels = data._get_agg_axis(axis)
values = data.values
with np.errstate(all="ignore"):
result = func(values)
# columns have been dropped GH#41480
arg_name = "numeric_only"
if name in ["all", "any"]:
arg_name = "bool_only"
warnings.warn(
"Dropping of nuisance columns in DataFrame reductions "
f"(with '{arg_name}=None') is deprecated; in a future "
"version this will raise TypeError. Select only valid "
"columns before calling the reduction.",
FutureWarning,
stacklevel=find_stack_level(),
)
if hasattr(result, "dtype"):
if filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
elif filter_type is None and is_object_dtype(result.dtype):
try:
result = result.astype(np.float64)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
pass
result = self._constructor_sliced(result, index=labels)
return result
def _reduce_axis1(self, name: str, func, skipna: bool) -> Series:
"""
Special case for _reduce to try to avoid a potentially-expensive transpose.
Apply the reduction block-wise along axis=1 and then reduce the resulting
1D arrays.
"""
if name == "all":
result = np.ones(len(self), dtype=bool)
ufunc = np.logical_and
elif name == "any":
result = np.zeros(len(self), dtype=bool)
# error: Incompatible types in assignment
# (expression has type "_UFunc_Nin2_Nout1[Literal['logical_or'],
# Literal[20], Literal[False]]", variable has type
# "_UFunc_Nin2_Nout1[Literal['logical_and'], Literal[20],
# Literal[True]]")
ufunc = np.logical_or # type: ignore[assignment]
else:
raise NotImplementedError(name)
for arr in self._mgr.arrays:
middle = func(arr, axis=0, skipna=skipna)
result = ufunc(result, middle)
res_ser = self._constructor_sliced(result, index=self.index)
return res_ser
def nunique(self, axis: Axis = 0, dropna: bool = True) -> Series:
"""
Count number of distinct elements in specified axis.
Return Series with number of distinct elements. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [4, 5, 6], 'B': [4, 1, 1]})
>>> df.nunique()
A 3
B 2
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
@doc(_shared_docs["idxmin"], numeric_only_default="False")
def idxmin(
self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False
) -> Series:
axis = self._get_axis_number(axis)
if numeric_only:
data = self._get_numeric_data()
else:
data = self
res = data._reduce(
nanops.nanargmin, "argmin", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = data._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return data._constructor_sliced(result, index=data._get_agg_axis(axis))
@doc(_shared_docs["idxmax"], numeric_only_default="False")
def idxmax(
self, axis: Axis = 0, skipna: bool = True, numeric_only: bool = False
) -> Series:
axis = self._get_axis_number(axis)
if numeric_only:
data = self._get_numeric_data()
else:
data = self
res = data._reduce(
nanops.nanargmax, "argmax", axis=axis, skipna=skipna, numeric_only=False
)
indices = res._values
# indices will always be np.ndarray since axis is not None and
# values is a 2d array for DataFrame
# error: Item "int" of "Union[int, Any]" has no attribute "__iter__"
assert isinstance(indices, np.ndarray) # for mypy
index = data._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return data._constructor_sliced(result, index=data._get_agg_axis(axis))
def _get_agg_axis(self, axis_num: int) -> Index:
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(
self, axis: Axis = 0, numeric_only: bool = False, dropna: bool = True
) -> DataFrame:
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. Because the resulting DataFrame has two rows,
the second row of ``species`` and ``legs`` contains ``NaN``.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
data = data.apply(f, axis=axis)
# Ensure index is type stable (should always use int index)
if data.empty:
data.index = default_index(0)
return data
def quantile(
self,
q=0.5,
axis: Axis = 0,
numeric_only: bool | lib.NoDefault = no_default,
interpolation: str = "linear",
):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'}, default 0
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
.. deprecated:: 1.5.0
The default value of ``numeric_only`` will be ``False`` in a future
version of pandas.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
axis = self._get_axis_number(axis)
any_not_numeric = any(not is_numeric_dtype(x) for x in self.dtypes)
if numeric_only is no_default and any_not_numeric:
com.deprecate_numeric_only_default(type(self), "quantile")
numeric_only = com.resolve_numeric_only(numeric_only)
if not is_list_like(q):
# BlockManager.quantile expects listlike, so we wrap and unwrap here
res_df = self.quantile(
[q], axis=axis, numeric_only=numeric_only, interpolation=interpolation
)
res = res_df.iloc[0]
if axis == 1 and len(self) == 0:
# GH#41544 try to get an appropriate dtype
dtype = find_common_type(list(self.dtypes))
if needs_i8_conversion(dtype):
return res.astype(dtype)
return res
q = Index(q, dtype=np.float64)
data = self._get_numeric_data() if numeric_only else self
if axis == 1:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
dtype = np.float64
if axis == 1:
# GH#41544 try to get an appropriate dtype
cdtype = find_common_type(list(self.dtypes))
if needs_i8_conversion(cdtype):
dtype = cdtype
res = self._constructor([], index=q, columns=cols, dtype=dtype)
return res.__finalize__(self, method="quantile")
res = data._mgr.quantile(qs=q, axis=1, interpolation=interpolation)
result = self._constructor(res)
return result.__finalize__(self, method="quantile")
@doc(NDFrame.asfreq, **_shared_doc_kwargs)
def asfreq(
self,
freq: Frequency,
method=None,
how: str | None = None,
normalize: bool = False,
fill_value=None,
) -> DataFrame:
return super().asfreq(
freq=freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
@doc(NDFrame.resample, **_shared_doc_kwargs)
def resample(
self,
rule,
axis=0,
closed: str | None = None,
label: str | None = None,
convention: str = "start",
kind: str | None = None,
loffset=None,
base: int | None = None,
on=None,
level=None,
origin: str | TimestampConvertibleTypes = "start_day",
offset: TimedeltaConvertibleTypes | None = None,
group_keys: bool | lib.NoDefault = no_default,
) -> Resampler:
return super().resample(
rule=rule,
axis=axis,
closed=closed,
label=label,
convention=convention,
kind=kind,
loffset=loffset,
base=base,
on=on,
level=level,
origin=origin,
offset=offset,
group_keys=group_keys,
)
def to_timestamp(
self,
freq: Frequency | None = None,
how: str = "start",
axis: Axis = 0,
copy: bool = True,
) -> DataFrame:
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, PeriodIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_timestamp(freq=freq, how=how)
setattr(new_obj, axis_name, new_ax)
return new_obj
def to_period(
self, freq: Frequency | None = None, axis: Axis = 0, copy: bool = True
) -> DataFrame:
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with PeriodIndex
Examples
--------
>>> idx = pd.to_datetime(
... [
... "2001-03-31 00:00:00",
... "2002-05-31 00:00:00",
... "2003-08-31 00:00:00",
... ]
... )
>>> idx
DatetimeIndex(['2001-03-31', '2002-05-31', '2003-08-31'],
dtype='datetime64[ns]', freq=None)
>>> idx.to_period("M")
PeriodIndex(['2001-03', '2002-05', '2003-08'], dtype='period[M]')
For the yearly frequency
>>> idx.to_period("Y")
PeriodIndex(['2001', '2002', '2003'], dtype='period[A-DEC]')
"""
new_obj = self.copy(deep=copy)
axis_name = self._get_axis_name(axis)
old_ax = getattr(self, axis_name)
if not isinstance(old_ax, DatetimeIndex):
raise TypeError(f"unsupported Type {type(old_ax).__name__}")
new_ax = old_ax.to_period(freq=freq)
setattr(new_obj, axis_name, new_ax)
return new_obj
def isin(self, values) -> DataFrame:
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
To check if ``values`` is *not* in the DataFrame, use the ``~`` operator:
>>> ~df.isin([0, 2])
num_legs num_wings
falcon False False
dog True False
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in other.
>>> other = pd.DataFrame({'num_legs': [8, 3], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon False True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
result = concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
result = self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
result = self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a '{type(values).__name__}'"
)
result = self._constructor(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
return result.__finalize__(self, method="isin")
# ----------------------------------------------------------------------
# Add index and columns
_AXIS_ORDERS = ["index", "columns"]
_AXIS_TO_AXIS_NUMBER: dict[Axis, int] = {
**NDFrame._AXIS_TO_AXIS_NUMBER,
1: 1,
"columns": 1,
}
_AXIS_LEN = len(_AXIS_ORDERS)
_info_axis_number = 1
_info_axis_name = "columns"
index: Index = properties.AxisProperty(
axis=1, doc="The index (row labels) of the DataFrame."
)
columns: Index = properties.AxisProperty(
axis=0, doc="The column labels of the DataFrame."
)
@property
def _AXIS_NUMBERS(self) -> dict[str, int]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NUMBERS
return {"index": 0, "columns": 1}
@property
def _AXIS_NAMES(self) -> dict[int, str]:
""".. deprecated:: 1.1.0"""
super()._AXIS_NAMES
return {0: "index", 1: "columns"}
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
# ----------------------------------------------------------------------
# Internal Interface Methods
def _to_dict_of_blocks(self, copy: bool = True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY - only works for BlockManager
"""
mgr = self._mgr
# convert to BlockManager if needed -> this way support ArrayManager as well
mgr = mgr_to_mgr(mgr, "block")
mgr = cast(BlockManager, mgr)
return {
k: self._constructor(v).__finalize__(self)
for k, v, in mgr.to_dict(copy=copy).items()
}
@property
def values(self) -> np.ndarray:
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]])
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._mgr.as_array()
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def ffill(
self: DataFrame,
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
downcast=None,
) -> DataFrame | None:
return super().ffill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self"])
def bfill(
self: DataFrame,
axis: None | Axis = None,
inplace: bool = False,
limit: None | int = None,
downcast=None,
) -> DataFrame | None:
return super().bfill(axis, inplace, limit, downcast)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "lower", "upper"]
)
def clip(
self: DataFrame,
lower=None,
upper=None,
axis: Axis | None = None,
inplace: bool = False,
*args,
**kwargs,
) -> DataFrame | None:
return super().clip(lower, upper, axis, inplace, *args, **kwargs)
@deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"])
def interpolate(
self: DataFrame,
method: str = "linear",
axis: Axis = 0,
limit: int | None = None,
inplace: bool = False,
limit_direction: str | None = None,
limit_area: str | None = None,
downcast: str | None = None,
**kwargs,
) -> DataFrame | None:
return super().interpolate(
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
def where(
self,
cond,
other=lib.no_default,
inplace=False,
axis=None,
level=None,
errors: IgnoreRaise = "raise",
try_cast=lib.no_default,
):
return super().where(cond, other, inplace, axis, level, errors, try_cast)
@deprecate_nonkeyword_arguments(
version=None, allowed_args=["self", "cond", "other"]
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors: IgnoreRaise = "raise",
try_cast=lib.no_default,
):
return super().mask(cond, other, inplace, axis, level, errors, try_cast)
DataFrame._add_numeric_operations()
ops.add_flex_arithmetic_methods(DataFrame)
def _from_nested_dict(data) -> collections.defaultdict:
new_data: collections.defaultdict = collections.defaultdict(dict)
for index, s in data.items():
for col, v in s.items():
new_data[col][index] = v
return new_data
def _reindex_for_setitem(value: DataFrame | Series, index: Index) -> ArrayLike:
# reindex if necessary
if value.index.equals(index) or not len(index):
return value._values.copy()
# GH#4107
try:
reindexed_value = value.reindex(index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
raise TypeError(
"incompatible index of inserted column with frame index"
) from err
return reindexed_value
| 34.381235
| 170
| 0.529347
|
b899882f23fc7b8ce7f515b8fb91452e63b96b9d
| 24,921
|
py
|
Python
|
SVRecords/SVAnnotator.py
|
ccmbioinfo/crg
|
3ff7a884463e872281d723934dde904d4acffe2a
|
[
"MIT"
] | null | null | null |
SVRecords/SVAnnotator.py
|
ccmbioinfo/crg
|
3ff7a884463e872281d723934dde904d4acffe2a
|
[
"MIT"
] | 5
|
2020-07-06T14:02:59.000Z
|
2021-04-08T20:01:41.000Z
|
SVRecords/SVAnnotator.py
|
ccmbioinfo/crg
|
3ff7a884463e872281d723934dde904d4acffe2a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import sqlite3
import os
import subprocess
from pybedtools import BedTool
from collections import defaultdict
from enum import Enum, auto
class SVTYPE(Enum):
def _generate_next_value_(name, start, count, last_values):
return name
DEL = auto()
DUP = auto()
INS = auto()
INV = auto()
IDP = auto()
TRA = auto()
class SVAnnotator:
def __init__(self, exon_bed, hgmd_db, hpo, exac, omim, biomart):
self.make_gene_ref_df(biomart)
if os.path.isfile(hpo):
print('Annotating genes with HPO terms')
self.HPO = True
self.annotate_hpo(hpo)
else:
print('No valid HPO file specified. Skipping annotation.')
self.HPO = False
print('Annotating genes with OMIM phenotypes and inheritance patterns')
self.annotate_omim(omim)
print('Annotating genes with ExAC transcript probabilities')
self.annotate_exac(exac)
# technical note: drop duplicates before setting index - doing the reverse order will drop all duplicated columns instead of keeping one copy
self.gene_ref_df = self.gene_ref_df.drop_duplicates(keep='first').set_index('BioMart Ensembl Gene ID').astype(str)
def set_column_values(self, df, annotation_dict, column_name):
for interval, data in annotation_dict.items():
df.loc[interval, column_name] = data
def append_prefix_to_columns(self, df, prefix):
#used to avoid collision of column names between different dataframes
df.rename(columns={col: "%s %s" % (prefix, col) for col in df.columns}, inplace=True)
def make_hgnc_dict(self, hgnc):
hgnc_dict = {}
with open(hgnc) as f:
for line in f.readlines()[1:]:
fields = line.strip('\n').split("\t")
approved_symbol = fields[1].upper()
prev_symbols = fields[4].split(', ')
synonymous_symbols = fields[5].split(', ')
for sym in prev_symbols:
hgnc_dict[sym.upper()] = approved_symbol
for sym in synonymous_symbols:
hgnc_dict[sym.upper()] = approved_symbol
self.hgnc_dict = hgnc_dict
def gene2hgnc(self, col):
def translate(cell):
if isinstance(cell, str):
if cell in self.hgnc_dict:
#print('individual sym match: %s %s' % (cell, self.hgnc_dict[cell]))
return self.hgnc_dict[cell]
return cell
#return self.hgnc_dict[cell] if cell in self.hgnc_dict else cell
elif isinstance(cell, list):
for sym in cell:
if sym in self.hgnc_dict:
# if len(cell) > 1: print('list match: %s %s' % (sym, self.hgnc_dict[sym]))
# else: print('single element match: %s %s' % (sym, self.hgnc_dict[sym]))
return self.hgnc_dict[sym]
return cell[0] # translation to hgnc gene has failed, just picked the first gene name then
else:
ValueError('DataFrame member is not of type str or list. %s' % str(cell))
return col.apply(translate)
def calc_exons_spanned(self, sample_df, exon_bed):
print('Calculating the number of exons affected by each structural variant ...')
exon_counts = defaultdict(int) #incrementing dict and setting value in df is faster than incrementing values in the df
exon_ref = BedTool(exon_bed)
sample_bedtool = BedTool(list(sample_df.reset_index()[['CHROM', 'POS', 'END', 'SVTYPE']].values))
for interval in sample_bedtool.intersect(exon_ref, wa=True):
exon_counts[(str(interval.chrom), str(interval.start), str(interval.stop), str(interval[3]))] += 1
count_df = pd.Series(exon_counts).to_frame().astype(str)
count_df.index.names = ['CHROM', 'POS', 'END', 'SVTYPE']
count_df.columns = ['EXONS_SPANNED']
return sample_df.join(count_df).fillna(value={'EXONS_SPANNED': 0})
def calc_exon_boundaries(self, sample_df, exon_bed):
print('Calculating exon boundaries closest to structural variant breakpoints ...')
def find_min_distance(position, boundaries):
#range is boundary minus position of breakpoint plus one to account for 1-based coordinates
distance = {((boundary-position) + 1):boundary for boundary in boundaries['value'].tolist()}
min_distance = min(distance, key=abs)
min_boundary = distance[min_distance]
gene = boundaries[boundaries['value'] == min_boundary]['GENE'].values[0]
return min_distance, min_boundary, gene
exons = pd.read_csv(exon_bed, sep='\t', names=['CHROM', 'POS', 'END', 'GENE'])
#make single column containing all exon boundaries
exons = pd.melt(exons, id_vars=['CHROM', 'GENE'], value_vars=['POS', 'END'])
boundary_distances = defaultdict()
boundary_distances['left'] = {'nearest_boundary':[],'nearest_distance':[]}
boundary_distances['right'] = {'nearest_boundary':[],'nearest_distance':[]}
for index,row in sample_df.iterrows():
chr, pos, end = str(row['CHROM']), int(row['POS']), int(row['END'])
print(chr, pos, end)
boundaries = exons[exons['CHROM'] == chr]
if len(boundaries) != 0:
for breakpoint in 'left', 'right':
position = pos if breakpoint == 'left' else end
min_distance, min_boundary, gene = find_min_distance(position, boundaries)
boundary_distances[breakpoint]['nearest_boundary'].append(gene + '|' + str(min_boundary))
boundary_distances[breakpoint]['nearest_distance'].append(min_distance)
else:
for breakpoint in 'left', 'right':
# for non-canonical chromosomes or MT
boundary_distances[breakpoint]['nearest_boundary'].append('.')
boundary_distances[breakpoint]['nearest_distance'].append('.')
sample_df['nearestLeftExonBoundary'], sample_df['nearestLeftExonDistance'] = boundary_distances['left']['nearest_boundary'], boundary_distances['left']['nearest_distance']
sample_df['nearestRightExonBoundary'],sample_df['nearestRightExonDistance'] = boundary_distances['right']['nearest_boundary'], boundary_distances['right']['nearest_distance']
return(sample_df.set_index(['CHROM', 'POS', 'END', 'SVTYPE']))
def find_min_distance(self, position, boundaries):
#range is boundary minus position of breakpoint plus one to account for 1-based coordinates
distance = {((boundary-position) + 1):boundary for boundary in boundaries['value'].tolist()}
min_distance = min(distance, key=abs)
min_boundary = distance[min_distance]
gene = boundaries[boundaries['value'] == min_boundary]['GENE'].values[0]
return min_distance, min_boundary, gene
def annotate_hgmd(self, hgmd, sv_record):
print('Annotating genes with published cases of pathogenic structural variants from HGMD')
def get_hgmd_df():
conn = sqlite3.connect(hgmd)
gros_del = pd.read_sql_query('''
SELECT del.DISEASE, del.TAG, del.DESCR, del.gene,
printf('%s:%s:%s:%s', del.JOURNAL, del.AUTHOR, del.YEAR, del.PMID) AS JOURNAL_DETAILS,
ALLGENES.HGNCID
FROM GROSDEL as del
LEFT JOIN ALLGENES ON ALLGENES.GENE=del.GENE;
''', conn)
gros_ins = pd.read_sql_query('''
SELECT ins.DISEASE, ins.TAG, ins.DESCR, ins.gene,
printf('%s:%s:%s:%s', ins.JOURNAL, ins.AUTHOR, ins.YEAR, ins.PMID) AS JOURNAL_DETAILS,
ALLGENES.HGNCID
FROM GROSINS as ins
LEFT JOIN ALLGENES ON ALLGENES.GENE=ins.GENE
WHERE ins.type='I';
''', conn)
gros_dup = pd.read_sql_query('''
SELECT ins.DISEASE, ins.TAG, ins.DESCR, ins.gene,
printf('%s:%s:%s:%s', ins.JOURNAL, ins.AUTHOR, ins.YEAR, ins.PMID) AS JOURNAL_DETAILS,
ALLGENES.HGNCID
FROM GROSINS as ins
LEFT JOIN ALLGENES ON ALLGENES.GENE=ins.GENE
WHERE ins.type='D';
''', conn)
conn.close()
return gros_del, gros_ins, gros_dup
def groupby_genes(df):
df['hgncID'] = df['hgncID'].astype(str)
#df['omimid'] = df['omimid'].astype(str)
#df['gene'] = self.gene2hgnc(df['gene'])
df = df.groupby(by='gene', as_index=False).agg(lambda x: "%s" % ' & '.join(x))
df['hgncID'] = df['hgncID'].apply(lambda col: col.split(', ')[0])
#df['omimid'] = df['omimid'].apply(lambda col: col.split(', ')[0])
return df
hgmd_sv_df = pd.DataFrame()
gros_del, gros_ins, gros_dup = get_hgmd_df()
gros_del = groupby_genes(gros_del)
gros_ins = groupby_genes(gros_ins)
gros_dup = groupby_genes(gros_dup)
for df in [gros_del, gros_ins, gros_dup]:
df['gene'] = df['gene'].apply(lambda symbol: symbol.upper())
self.append_prefix_to_columns(df, 'HGMD')
gros_del['HGMD SVTYPE'] = SVTYPE.DEL.value
gros_ins['HGMD SVTYPE'] = SVTYPE.INS.value
gros_dup['HGMD SVTYPE'] = SVTYPE.DUP.value
# hgmd_sv_df = hgmd_sv_df.rename(columns={'HGMD gene': 'Genes in HGMD'})
hgmd_sv_df = pd.concat([gros_del, gros_ins, gros_dup], ignore_index=True, sort=False)
hgmd_sv_df['Genes in HGMD'] = hgmd_sv_df['HGMD gene']
hgmd_sv_df = hgmd_sv_df.set_index(keys=['HGMD gene', 'HGMD SVTYPE']).astype(str)
return sv_record.join(hgmd_sv_df, on=['BioMart Associated Gene Name', 'SVTYPE'], how='left')
def prioritized_annotation(self, gene_ref_df, annotation_df, matched_fields):
matched_rows = []
ann_match_columns = list(matched_fields.keys())
ref_match_columns = list(matched_fields.values())
gene_ref_df_matching_cols = gene_ref_df[ref_match_columns].drop_duplicates()
annotation_df = annotation_df.drop_duplicates()
#join on equivalent fields
for ann_field, ref_field in matched_fields.items():
matched = gene_ref_df_matching_cols.join(annotation_df.set_index(ann_field), on=ref_field, how='inner')
matched_rows.append(matched)
#drop columns from annotation_df used for joining
# for table in matched_rows:
# for field in ann_match_columns:
# try:
# table.drop(columns=[field, ], axis=0, inplace=True)
# #print("Dropped %s" % field)
# except KeyError:
# pass #tried to drop column which was joined on
merged_df = pd.concat(matched_rows, ignore_index=True, sort=False).drop_duplicates().set_index(ref_match_columns).dropna(how='all')
#add the remaining fields to the reference dataframe
return self.gene_ref_df.join(merged_df, on=ref_match_columns, how='left').drop_duplicates()
def left_join(self, df1, df2, field1, field2):
df2 = df2.drop_duplicates().set_index(field2).dropna(how='all')
return df1.set_index(field1).join(df2, how='left').drop_duplicates().rename_axis(field1).reset_index()
def annotate_hpo(self, hpo):
matching_fields = {'HPO Gene ID': 'BioMart Ensembl Gene ID',}
hpo_df = pd.read_csv(hpo, sep='\t')
hpo_df.columns = hpo_df.columns.str.strip()
# hpo_df = hpo_df[['Gene ID', 'Gene symbol', 'Features']]
hpo_df = hpo_df[['Gene ID', 'Features']]
hpo_df['Features'] = hpo_df['Features'].apply(lambda features: features.replace('; ', ', '))
hpo_df = hpo_df.astype(str)
self.append_prefix_to_columns(hpo_df, "HPO")
hpo_df = self.left_join(hpo_df, self.gene_ref_df[["BioMart Ensembl Gene ID", 'BioMart Associated Gene Name']], 'HPO Gene ID', "BioMart Ensembl Gene ID")
hpo_df = hpo_df.rename(columns={'BioMart Associated Gene Name': 'Genes in HPO'})
self.gene_ref_df = self.prioritized_annotation(self.gene_ref_df, hpo_df, matching_fields)
#self.gene_ref_df.to_csv("hpo_ann.tsv", sep="\t")
def annotate_omim(self, omim):
omim_inheritance_codes = {"Autosomal dominant":"AD", \
"Autosomal recessive":"AR", \
"X-linked dominant":"XLD", \
"X-linked recessive":"XLR", \
"Y-linked dominant":"YLD", \
"Y-linked recessive":"YLR", \
"X-linked":"XL", \
"Y-linked":"YL"}
omim_inheritance_codes = {key.lower():value for key,value in omim_inheritance_codes.items()} # for case insensitive text search
def process_OMIM_phenotype(phenotype):
'''
omim phenotype example:
{Epilepsy, generalized, with febrile seizures plus, type 5, susceptibility to}, 613060 (3), Autosomal dominant;
{Epilepsy, idiopathic generalized, 10}, 613060 (3), Autosomal dominant;
{Epilepsy, juvenile myoclonic, susceptibility to}, 613060 (3), Autosomal dominant
'''
inheritance = []
if pd.isnull(phenotype): return phenotype
else:
for p in phenotype.split('; '):
multiple_inheritance = [code for description, code in omim_inheritance_codes.items() if description.lower() in p.lower()]
if multiple_inheritance: inheritance.append('&'.join(multiple_inheritance))
return ', '.join(inheritance)
matching_fields = {'OMIM Ensembl Gene ID': 'BioMart Ensembl Gene ID'}
# OMIM adds comments to their CSV file. These comments start with '#' character and are present in the header and footer of the file.
omim_df = pd.read_csv(omim, sep='\t', header=3, skipfooter=61, engine='python')
omim_df.columns = omim_df.columns.str.replace('#','')
omim_df.columns = omim_df.columns.str.strip()
omim_df = omim_df[['MIM Number', 'Ensembl Gene ID', 'Phenotypes']]
omim_df = omim_df[pd.notnull(omim_df['Phenotypes'])] #drop all nan phenotype columns
omim_df['Inheritance'] = omim_df['Phenotypes'].apply(lambda col: process_OMIM_phenotype(col))
omim_df = omim_df.astype(str).groupby('Ensembl Gene ID', as_index=False).agg({'Phenotypes' : ' & '.join, 'MIM Number' : ' & '.join, 'Inheritance' : ' & '.join,})
self.append_prefix_to_columns(omim_df, "OMIM")
omim_df = self.left_join(omim_df, self.gene_ref_df[["BioMart Ensembl Gene ID", 'BioMart Associated Gene Name']], 'OMIM Ensembl Gene ID', "BioMart Ensembl Gene ID")
omim_df = omim_df.rename(columns={'BioMart Associated Gene Name': 'Genes in OMIM'})
self.gene_ref_df = self.prioritized_annotation(self.gene_ref_df, omim_df, matching_fields)
# self.gene_ref_df.to_csv("omim_ann.tsv", sep="\t")
def annotate_exac(self, exac):
matching_fields = {
'ExAC gene' : 'BioMart Associated Gene Name',
}
exac_df = pd.read_csv(exac, sep='\t')
exac_df.columns = exac_df.columns.str.strip()
exac_df['transcript'] = exac_df['transcript'].apply(lambda transcript_id: transcript_id.split('.')[0])
exac_df = exac_df[['gene', 'syn_z', 'mis_z', 'lof_z', 'pLI']]
exac_df = exac_df.astype(str)
self.append_prefix_to_columns(exac_df, "ExAC")
self.gene_ref_df = self.prioritized_annotation(self.gene_ref_df, exac_df, matching_fields)
#self.gene_ref_df.to_csv("exac_ann.tsv", sep="\t")
def annotate_gnomad(self, gnomad, sv_record, reciprocal_overlap=0.5):
print('Annotating structural variants with those seen in gnomAD_SV based on a %f reciprocal overlap ...' % reciprocal_overlap)
gnomad_cols = ['CHROM', 'START', 'END', 'NAME', 'SVTYPE', 'AN', 'AC', 'AF', 'N_HOMREF', 'N_HET', 'N_HOMALT', 'FREQ_HOMREF', 'FREQ_HET', 'FREQ_HOMALT', 'POPMAX_AF']
gnomad_ann_cols = ['gnomAD_SVTYPE', 'gnomAD_AN', 'gnomAD_AC', 'gnomAD_AF', 'gnomAD_N_HOMREF', 'gnomAD_N_HET', 'gnomAD_N_HOMALT', 'gnomAD_FREQ_HOMREF', 'gnomAD_FREQ_HET', 'gnomAD_FREQ_HOMALT', 'gnomAD_POPMAX_AF']
gnomad_df = pd.read_csv(gnomad, sep='\t', dtype='str').astype(str)
gnomad_df.columns = gnomad_df.columns.str.replace('#', '')
gnomad_df.columns = gnomad_df.columns.str.strip()
gnomad_df = gnomad_df[gnomad_cols]
gnomad_bed = BedTool(gnomad_df.itertuples(index=False))
sample_sv = sv_record.make_ref_bedtool()
ann_df = sample_sv.intersect(gnomad_bed, wa=True, wb=True, F=reciprocal_overlap, f=reciprocal_overlap).to_dataframe(\
names=['CHROM', 'POS', 'END', 'SVTYPE', 'gnomAD_CHROM', 'gnomAD_START', 'gnomAD_END', 'gnomAD_ID', ] + gnomad_ann_cols).astype(str)
ann_df = ann_df.drop(ann_df[ann_df['SVTYPE'] != ann_df['gnomAD_SVTYPE']].index)
ann_df['gnomAD_SV'] = ann_df[['gnomAD_CHROM', 'gnomAD_START', 'gnomAD_END']].apply(lambda x: '{}:{}-{}'.format(x[0],x[1],x[2]), axis=1)
ann_df = ann_df.drop(columns=['gnomAD_CHROM', 'gnomAD_START', 'gnomAD_END'])
ann_df = ann_df.groupby(['CHROM', 'POS', 'END', 'SVTYPE']).agg(list)
ann_df = ann_df[ann_df.columns].applymap(lambda cell: ' & '.join(cell))
return sv_record.df.join(ann_df)
def annotate_counts(self, counts, sv_record, prefix="COUNT", reciprocal_overlap=0.5):
print('Annotating structural variants with those seen in %s based on a %f reciprocal overlap ...' % (counts, reciprocal_overlap))
cols = ['COUNT_CHROM', 'COUNT_START', 'COUNT_END', 'COUNT_SVTYPE', 'COUNT']
count_df = pd.read_csv(counts, sep='\t', dtype='str').astype(str)
count_bed = BedTool(count_df.itertuples(index=False))
sample_sv = sv_record.make_ref_bedtool()
ann_df = sample_sv.intersect(count_bed, wa=True, wb=True, F=reciprocal_overlap, f=reciprocal_overlap).to_dataframe(\
names=['CHROM', 'POS', 'END', 'SVTYPE', ] + cols)
ann_df[['CHROM', 'POS', 'END', 'SVTYPE', ]] = ann_df[['CHROM', 'POS', 'END', 'SVTYPE', ]].astype(str) # reference dataframe is typecasted as string
ann_df = ann_df.drop(ann_df[ann_df['SVTYPE'] != ann_df['COUNT_SVTYPE']].index)
ann_df = ann_df.groupby(['CHROM', 'POS', 'END', 'SVTYPE']).agg({'COUNT_CHROM' : 'first', 'COUNT_SVTYPE' : 'first', 'COUNT_START' : 'min', 'COUNT_END' : 'max', 'COUNT' : 'sum'})
ann_df['COUNT_SV'] = ann_df[['COUNT_CHROM', 'COUNT_START', 'COUNT_END']].apply(lambda x: '{}:{}-{}'.format(x[0],x[1],x[2]), axis=1)
ann_df = ann_df.drop(columns=['COUNT_CHROM', 'COUNT_START', 'COUNT_END', 'COUNT_SVTYPE'])
ann_df.columns = ann_df.columns.str.replace('COUNT', prefix)
df = sv_record.df.join(ann_df)
df[[prefix]] = df[[prefix]].fillna(0)
return df
def annotsv(self, sample_df):
'''
Handles DGV, DDD annotations
'''
all_sv_bed_name = "all_sv.bed"
annotated = "./{}.annotated.tsv".format(all_sv_bed_name)
sample_df.reset_index()[['CHROM', 'POS', 'END', 'SVTYPE']].to_csv(all_sv_bed_name, index=False, sep='\t')
subprocess.call("$ANNOTSV/bin/AnnotSV -SVinputFile {} -SVinputInfo 1 -outputFile {}".format(all_sv_bed_name, annotated), shell=True)
annotsv_df_original = pd.read_csv(annotated, sep='\t').astype(str)
# DDD annotations are only given to SVs that are 'split'
# but we want the DGV annotations that are given to the 'full' SVs
# so, separate these and join them to get both
sv_cols = ['SV chrom', 'SV start', 'SV end', 'SV type']
DDD_cols = ['DDD_status', 'DDD_mode', 'DDD_consequence', 'DDD_disease', 'DDD_pmids']
annotsv_df_split = annotsv_df_original[annotsv_df_original['AnnotSV type'] == 'split'][sv_cols + DDD_cols].set_index(sv_cols)
annotsv_df_full = annotsv_df_original[annotsv_df_original['AnnotSV type'] == 'full'][sv_cols + [col for col in annotsv_df_original.columns.tolist() if 'DGV' in col.upper()]].set_index(sv_cols)
annotsv_df_all = annotsv_df_split.merge(annotsv_df_full, how='outer', on=sv_cols).reset_index().drop_duplicates(subset=sv_cols)
annotsv_df_cols = [col for col in annotsv_df_all.columns.tolist() if col not in DDD_cols]
annotsv_df_all = annotsv_df_all.fillna('.')
# annotsv_df_split will have multiple rows per SV (one for each gene overlapped by the SV); aggregate to de-duplicate
annotsv_df = annotsv_df_all.groupby(annotsv_df_cols)[DDD_cols].agg({'DDD_status': ','.join,
'DDD_mode': ','.join,
'DDD_consequence': ','.join,
'DDD_disease': ','.join,
'DDD_pmids': ','.join}).reset_index()
annotsv_df = annotsv_df.rename(columns={annotsv_df.columns[0]: 'CHROM', annotsv_df.columns[1]: 'POS', annotsv_df.columns[2]: 'END', annotsv_df.columns[3]:'SVTYPE'}).set_index(keys=['CHROM', 'POS', 'END', 'SVTYPE'])
sample_df = sample_df.join(annotsv_df)
os.remove(all_sv_bed_name)
#os.remove(annotated)
return sample_df
def make_gene_ref_df(self, biomart):
df = pd.read_csv(biomart, sep='\t')
# df = df[['Ensembl Gene ID', 'Ensembl Transcript ID', 'Associated Gene Name', 'HGNC ID(s)', 'MIM Gene Accession']].drop_duplicates()
df = df[['Ensembl Gene ID', 'Associated Gene Name',]].drop_duplicates()
df['Associated Gene Name'] = df['Associated Gene Name'].apply(lambda symbol: symbol.upper()) #make all gene symbols a single case to increase match rate with other dataframes
df = df.astype(str)
self.append_prefix_to_columns(df, "BioMart")
self.gene_ref_df = df
def annotate_genes(self, sample_df, gene_col):
def count_unique_terms(cell):
terms = set()
for elem in cell:
if pd.isnull(elem):
continue
elif ', ' in elem:
terms.update(elem.split(', '))
elif elem != 'na' and elem != 'nan':
terms.add(elem)
return len(terms)
# extract genes from sample_df, create a new dataframe where each row only has a single ensemble id and interval info
gene_df = sample_df.apply(lambda x: pd.Series(x[gene_col]),axis=1).stack().reset_index(level=4, drop=True)
gene_df = gene_df.to_frame().rename(columns={0: gene_col}).astype(str)
# gene_df.to_csv('seperated_genes.csv')
# annotate passed in ensemble gene id's using the generated reference dataframe
gene_df = gene_df.join(self.gene_ref_df, on=gene_col, how='left').reset_index()
# gene_df.to_csv('annotated_genes.csv')
# aggregate all annotation columns within the same sv interval
gene_df = gene_df.groupby(['CHROM', 'POS', 'END', 'SVTYPE']).agg(list)
# gene_df.to_csv('grouped_index.csv')
# add cardinality columns
if self.HPO:
gene_df["N_UNIQUE_HPO_TERMS"] = [ [count_unique_terms(values["HPO Features"])] for index, values in gene_df.iterrows()]
gene_df["N_GENES_IN_HPO"] = [ [count_unique_terms(values["Genes in HPO"])] for index, values in gene_df.iterrows()]
gene_df["N_GENES_IN_OMIM"] = [ [count_unique_terms(values["Genes in OMIM"])] for index, values in gene_df.iterrows()]
# parse out and replace nan values with "na" string
gene_df[gene_df.columns] = gene_df[gene_df.columns].applymap(lambda cell: [str(item) for item in cell])
gene_df[gene_df.columns] = gene_df[gene_df.columns].applymap(lambda cell: ["na"] if all("nan" == item.lower() for item in cell) else cell)
gene_df[gene_df.columns] = gene_df[gene_df.columns].applymap(lambda cell: ["na" if "nan" == item.lower() else item for item in cell])
gene_df[gene_df.columns] = gene_df[gene_df.columns].applymap(lambda cell: ' | '.join(cell))
gene_df = gene_df[gene_df.columns]
# annotate the passed in dataframe
sample_df = sample_df.drop(gene_col, axis=1).join(gene_df)
return sample_df
def add_decipher_link(self, df):
df['DECIPHER_LINK'] = ['''=HYPERLINK("https://decipher.sanger.ac.uk/browser#q/%s:%s-%s")''' % index[0:3] for index, fields in df.iterrows()]
| 52.135983
| 222
| 0.621604
|
48e88fa362928d9e44d6cbfc01305b9f504c6327
| 546
|
py
|
Python
|
setup.py
|
julienaubert/fabutils
|
60a891abe05f1c88986ff010cbd884b79f91c50e
|
[
"MIT"
] | null | null | null |
setup.py
|
julienaubert/fabutils
|
60a891abe05f1c88986ff010cbd884b79f91c50e
|
[
"MIT"
] | null | null | null |
setup.py
|
julienaubert/fabutils
|
60a891abe05f1c88986ff010cbd884b79f91c50e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from distutils.core import setup
setup(name='boolfab',
version='0.0.2',
description='Boolfab: override fabric Task so it handles arguments as bool.',
author='Julien Aubert',
author_email='julien.aubert.mail@gmail.com',
url='https://github.com/julienaubert/boolfab',
keywords='fabric utilities',
packages=['boolfab'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: MIT License",
],
)
| 32.117647
| 83
| 0.613553
|
f7d5e9df2675829230b535c5424ac7d10c242766
| 12,653
|
py
|
Python
|
modules/qbittorrent.py
|
scambra/HTPC-Manager
|
1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d
|
[
"MIT"
] | 422
|
2015-01-08T14:08:08.000Z
|
2022-02-07T11:47:37.000Z
|
modules/qbittorrent.py
|
scambra/HTPC-Manager
|
1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d
|
[
"MIT"
] | 581
|
2015-01-01T08:07:16.000Z
|
2022-02-23T11:44:37.000Z
|
modules/qbittorrent.py
|
scambra/HTPC-Manager
|
1a1440db84ae1b6e7a2610c7f3bd5b6adf0aab1d
|
[
"MIT"
] | 115
|
2015-01-08T14:41:00.000Z
|
2022-02-13T12:31:17.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import htpc
import cherrypy
import json
import logging
import time
import math
from cherrypy.lib.auth2 import require, member_of
from htpc.helpers import striphttp, sizeof
import requests
from requests.auth import HTTPDigestAuth
class Qbittorrent(object):
session = requests.Session()
def __init__(self):
self.logger = logging.getLogger('modules.qbittorrent')
self.newapi = False
self.authenticated = False
self.testapi = None
htpc.MODULES.append({
'name': 'qBittorrent',
'id': 'qbittorrent',
'test': htpc.WEBDIR + 'qbittorrent/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'qbittorrent_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'qbittorrent_name'},
{'type': 'text', 'label': 'IP / Host', 'placeholder': 'localhost', 'name': 'qbittorrent_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '8080', 'name': 'qbittorrent_port'},
{'type': 'text', 'label': 'Username', 'name': 'qbittorrent_username'},
{'type': 'password', 'label': 'Password', 'name': 'qbittorrent_password'},
{'type': 'bool', 'label': 'Use SSL', 'name': 'qbittorrent_ssl'},
{'type': 'text', 'label': 'Reverse proxy link', 'placeholder': '', 'desc': 'Reverse proxy link ex: https://qbt.domain.com', 'name': 'qbittorrent_reverse_proxy_link'},
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('qbittorrent.html').render(scriptname='qbittorrent', webinterface=self.webinterface())
def webinterface(self):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl', 0) else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
if htpc.settings.get('qbittorrent_reverse_proxy_link'):
url = htpc.settings.get('qbittorrent_reverse_proxy_link')
return url
def qbturl(self):
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl', 0) else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
return url
@cherrypy.expose()
@require()
def login(self):
self.logger.debug('Trying to login to qbittorrent')
try:
d = {'username': htpc.settings.get('qbittorrent_username', ''),
'password': htpc.settings.get('qbittorrent_password', '')
}
# F33d da cookie monster
r = self.session.post(self.qbturl() + 'login', data=d, verify=False, timeout=5)
if r.content == 'Ok.':
self.logger.debug('Successfully logged in with new api')
self.authenticated = True
self.newapi = True
else:
self.logger.error('Check your username and password')
return r.content
except Exception as e:
self.logger.error('Failed to auth with new api %s' % e)
return
def _fetch(self, u, post=False, params=None, data=None):
if params is None:
params = {}
host = striphttp(htpc.settings.get('qbittorrent_host', ''))
port = htpc.settings.get('qbittorrent_port', '')
ssl = 's' if htpc.settings.get('qbittorrent_ssl') else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
username = htpc.settings.get('qbittorrent_username', '')
password = htpc.settings.get('qbittorrent_password', '')
url += u
if self.testapi is None:
self.ping()
if self.newapi:
if self.authenticated is False:
self.login()
if post:
if self.newapi:
r = self.session.post(url, data=data, verify=False, timeout=8)
else:
r = self.session.post(url, data=data, verify=False, timeout=8, auth=HTTPDigestAuth(username, password))
else:
if self.newapi:
r = self.session.get(url, verify=False, timeout=8)
else:
r = self.session.get(url, verify=False, timeout=8, auth=HTTPDigestAuth(username, password))
return r
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def fetch(self):
try:
if self.newapi:
result = self._fetch('query/torrents?filter=all&sort=size&reverse=false')
torrents = result.json()
l = []
for torrent in torrents:
t = {}
for k, v in torrent.items():
t[k] = v
if k == 'size':
t['size'] = sizeof(int(v))
if k == 'eta':
eta = time.strftime('%H:%M:%S', time.gmtime(v))
if eta == '00:00:00':
eta = u'\u221E'
t['eta'] = eta
if k == 'ratio':
t['ratio'] = math.ceil(v)
l.append(t)
return l
else:
result = self._fetch('json/torrents')
# r.json() does not like the infinity
return json.loads(result.content)
except Exception as e:
self.logger.error("Couldn't get torrents %s" % e)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_speed(self):
''' Get total download and upload speed '''
try:
d = {}
if not self.newapi:
result = self._fetch('json/transferInfo/')
result = result.json()
speeddown = result['dl_info']
speedup = result['up_info']
list_of_down = speeddown.split()
list_of_up = speedup.split()
ds = list_of_down[1] + ' ' + list_of_down[2]
dlstat = list_of_down[5] + ' ' + list_of_down[6]
us = list_of_up[1] + ' ' + list_of_up[2]
ulstat = list_of_down[5] + ' ' + list_of_down[6]
d = {
'qbittorrent_speed_down': ds,
'qbittorrent_speed_up': us,
'qbittorrent_total_dl': dlstat,
'qbittorrent_total_ul': ulstat
}
else:
# new api stuff
result = self._fetch('query/transferInfo')
result = result.json()
d = {
'qbittorrent_speed_down': sizeof(result['dl_info_speed']),
'qbittorrent_speed_up': sizeof(result['up_info_speed']),
'qbittorrent_total_dl': sizeof(result['dl_info_data']),
'qbittorrent_total_ul': sizeof(result['up_info_data'])
}
return d
except Exception as e:
self.logger.error("Couldn't get total download and uploads speed %s" % e)
def get_global_dl_limit(self):
try:
result = self._fetch('command/getGlobalDlLimit/')
speed = int(result.content)
speed /= 1024
return speed
except Exception as e:
self.logger.error("Couldn't get global download limit %s" % e)
def get_global_ul_limit(self):
try:
result = self._fetch('command/getGlobalUpLimit')
speed = int(result.content)
speed /= 1024
return speed
except Exception as e:
self.logger.error("Couldn't get global upload limit %s" % e)
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def get_global_limit(self):
try:
d = {}
d['dl_limit'] = self.get_global_dl_limit()
d['ul_limit'] = self.get_global_ul_limit()
return d
except Exception as e:
self.logger.debug("Couldn't get global upload and download limits %s" % e)
@cherrypy.expose()
@require(member_of(htpc.role_user))
def command(self, cmd=None, hash=None, name=None, dlurl=None):
''' Handles pause, resume, delete singel torrents '''
try:
self.logger.debug('%s %s' % (cmd, name))
data = {}
if cmd == 'delete':
data['hashes'] = hash
elif cmd == 'download':
data['urls'] = dlurl
elif cmd == 'resumeall' or cmd == 'pauseall':
# this does not work, bug in qbt see
# https://github.com/qbittorrent/qBittorrent/issues/3016
if self.newapi:
cmd = cmd[:-3] + 'All'
else:
data['hash'] = hash
url = 'command/%s' % cmd
# data is form encode..
r = self._fetch(url, post=True, data=data)
return r.content
except Exception as e:
self.logger.error('Failed at %s %s %s %s' % (cmd, name, hash, e))
@cherrypy.expose()
@require()
def to_client(self, link, torrentname, **kwargs):
''' Is used by torrent search '''
try:
url = 'command/download/'
data = {}
data['urls'] = link
return self._fetch(url, data=data, post=True)
self.logger.info('%s %s is sendt to qBittorrent' % (torrentname, link))
except Exception as e:
self.logger.error('Failed to send %s %s to qBittorrent %s' % (link, torrentname, e))
@cherrypy.expose()
@require(member_of(htpc.role_user))
def set_speedlimit(self, type=None, speed=None):
''' Sets global upload and download speed '''
try:
self.logger.debug('Setting %s to %s' % (type, speed))
speed = int(speed)
if speed == 0:
speed = 0
else:
speed = speed * 1024
url = 'command/' + type + '/'
data = {}
data['limit'] = speed
r = self._fetch(url, data=data, post=True)
return r.content
except Exception as e:
self.logger.error('Failed to set %s to %s %s' % (type, speed, e))
@cherrypy.expose()
@require() # leave it as it uses this is get api version
@cherrypy.tools.json_out()
def ping(self, qbittorrent_host='', qbittorrent_port='', qbittorrent_username='', qbittorrent_password='', qbittorrent_ssl=False, **kw):
self.logger.debug('Trying to connect to qBittorret')
host = qbittorrent_host or htpc.settings.get('qbittorrent_host')
port = qbittorrent_port or htpc.settings.get('qbittorrent_port')
username = qbittorrent_username or htpc.settings.get('qbittorrent_username')
password = qbittorrent_password or htpc.settings.get('qbittorrent_password')
ssl = 's' if qbittorrent_ssl or htpc.settings.get('qbittorrent_ssl') else ''
url = 'http%s://%s:%s/' % (ssl, host, port)
self.newapi = False
self.authenticated = False
try:
# We assume that its atleast 3.2 if this works.
r = requests.get(url + 'version/api', timeout=8, verify=False)
self.logger.debug('Trying to connect with new API %s' % r.url)
# Old api returns a empty page
if r.content != '' and r.ok:
self.newapi = r.content
self.testapi = True
return r.content
else:
raise requests.ConnectionError
except Exception as e:
self.logger.debug('Failed to figure out what api version, trying old API')
try:
r = requests.post(url + 'json/torrents', auth=HTTPDigestAuth(username, password), timeout=10, verify=False)
if r.ok:
self.logger.debug('Old API works %s' % r.url)
# Disable new api stuff
self.testapi = True
self.newapi = False
self.authenticated = False
except Exception as e:
self.newapi = False
self.authenticated = False
self.logger.debug('Failed to contact qBittorrent via old and newapi')
self.logger.error('Cant contact qBittorrent, check you settings and try again %s' % e)
| 37.434911
| 182
| 0.529598
|
273259011421f8dccad789c7b46fe183079c58a5
| 1,097
|
py
|
Python
|
src/tipboard/app/utils.py
|
adeo/benchmark-tipboard
|
12e372a6ed10c43984157b21e7c284938f1a8413
|
[
"Apache-2.0"
] | null | null | null |
src/tipboard/app/utils.py
|
adeo/benchmark-tipboard
|
12e372a6ed10c43984157b21e7c284938f1a8413
|
[
"Apache-2.0"
] | 6
|
2020-06-05T20:25:27.000Z
|
2022-03-31T09:27:14.000Z
|
src/tipboard/app/utils.py
|
adeo/benchmark-tipboard
|
12e372a6ed10c43984157b21e7c284938f1a8413
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from datetime import datetime
from src.tipboard.app.properties import API_KEY
from src.tipboard.app.properties import COLOR_TAB
def getTimeStr():
return datetime.now().strftime("%Hh%M")
def checkAccessToken(method='GET', request=None, unsecured=False):
""" Check if API_TOKEN is correct. Who cares about api version ?"""
key = "NO_KEY_FOUND"
if unsecured:
return True
try:
if method == 'GET' or method == 'POST' or method == 'DELETE' and \
request.GET.get('API_KEY', 'NO_API_KEY_FOUND') == API_KEY: # TODO: check if it's work with delete:
return True
except Exception:
print(f"{getTimeStr()} (-) Access Token error: {key}")
return False
def buildBasicDataset(data=None, seriesNumber=1, borderColor=False):
if data is None:
data = list()
dataset = dict(data=data, label=f'Series {seriesNumber}')
if borderColor:
dataset['borderColor'] = COLOR_TAB[seriesNumber - 1]
else:
dataset['backgroundColor'] = COLOR_TAB[seriesNumber - 1]
return dataset
| 32.264706
| 115
| 0.653601
|
20254b17ea80d1290aef9f41ae0e2b141e20259f
| 2,904
|
py
|
Python
|
ciscoisesdk/models/validators/v3_1_1/jsd_c3d67df26a4d58f5a5efc6083ba187eb.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 36
|
2021-05-18T16:24:19.000Z
|
2022-03-05T13:44:41.000Z
|
ciscoisesdk/models/validators/v3_1_0/jsd_c3d67df26a4d58f5a5efc6083ba187eb.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 15
|
2021-06-08T19:03:37.000Z
|
2022-02-25T14:47:33.000Z
|
ciscoisesdk/models/validators/v3_1_0/jsd_c3d67df26a4d58f5a5efc6083ba187eb.py
|
CiscoISE/ciscoisesdk
|
860b0fc7cc15d0c2a39c64608195a7ab3d5f4885
|
[
"MIT"
] | 6
|
2021-06-10T09:32:01.000Z
|
2022-01-12T08:34:39.000Z
|
# -*- coding: utf-8 -*-
"""Identity Services Engine updateVnVlanMappingById data model.
Copyright (c) 2021 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from ciscoisesdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorC3D67Df26A4D58F5A5EfC6083Ba187Eb(object):
"""updateVnVlanMappingById request schema definition."""
def __init__(self):
super(JSONSchemaValidatorC3D67Df26A4D58F5A5EfC6083Ba187Eb, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"id": {
"type": "string"
},
"isData": {
"type": "boolean"
},
"isDefaultVlan": {
"type": "boolean"
},
"lastUpdate": {
"type": "string"
},
"maxValue": {
"type": "integer"
},
"name": {
"type": "string"
},
"vnId": {
"type": "string"
},
"vnName": {
"type": "string"
}
},
"required": [
"name"
],
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 33.37931
| 83
| 0.585055
|
2f451f7d1973e1107469215062c47b2c4a84ef46
| 1,388
|
py
|
Python
|
graphics.py
|
xedre/Minesweeper
|
d62d001a3ded72e0b8c5bd4efffacb73a720500c
|
[
"MIT"
] | 1
|
2019-08-24T17:01:58.000Z
|
2019-08-24T17:01:58.000Z
|
graphics.py
|
xedre/Minesweeper
|
d62d001a3ded72e0b8c5bd4efffacb73a720500c
|
[
"MIT"
] | null | null | null |
graphics.py
|
xedre/Minesweeper
|
d62d001a3ded72e0b8c5bd4efffacb73a720500c
|
[
"MIT"
] | null | null | null |
import pygame
black = (50, 50, 50)
def start(screen_size):
pygame.init()
clock = pygame.time.Clock()
display = pygame.display.set_mode(screen_size)
pygame.display.set_caption("Minesweeper")
pygame.display.update()
font = pygame.font.SysFont('arial', 25)
return display, clock, font
def end():
pygame.quit()
exit()
def background(display, colour=black):
display.fill(colour)
def text(msg, colour, x, y, window, font):
screen_text = font.render(msg, True, colour)
window.blit(screen_text, [x, y])
def update(block_size, mine_map, screen, font, gap):
background(screen)
for x in range(len(mine_map)):
for y in range(len(mine_map[x])):
if mine_map[x][y][0] is False:
screen.fill((0, 0, 0), rect=[y * (block_size + gap) + gap, x * (block_size + gap) + gap, block_size, block_size])
else:
screen.fill((150, 150, 150), rect=[y * (block_size + gap) + gap, x * (block_size + gap) + gap, block_size, block_size])
if mine_map[x][y][1] == 0:
show = ""
elif mine_map[x][y][1] == -2:
show = "F"
else:
show = str(mine_map[x][y][1])
text(show, black, y * (block_size + gap) + 5, x * (block_size + gap) + 2, screen, font)
pygame.display.update()
| 30.173913
| 135
| 0.557637
|
51583011f947885be67dc3debd4940c569de75fb
| 7,946
|
py
|
Python
|
utilities/loss.py
|
TayfunKaraderi/ICPRAI-2022-Visual-Microfossil-Identification-via-Deep-Metric-Learning
|
372fd88e7b7907b3acfcad16ac60a04ab2febf3f
|
[
"MIT"
] | null | null | null |
utilities/loss.py
|
TayfunKaraderi/ICPRAI-2022-Visual-Microfossil-Identification-via-Deep-Metric-Learning
|
372fd88e7b7907b3acfcad16ac60a04ab2febf3f
|
[
"MIT"
] | null | null | null |
utilities/loss.py
|
TayfunKaraderi/ICPRAI-2022-Visual-Microfossil-Identification-via-Deep-Metric-Learning
|
372fd88e7b7907b3acfcad16ac60a04ab2febf3f
|
[
"MIT"
] | null | null | null |
# Core libraries
import numpy as np
# PyTorch stuff
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
"""
File contains loss functions selectable during training
"""
class TripletLoss(nn.Module):
def __init__(self, margin=4.0):
super(TripletLoss, self).__init__()
self.margin = margin
def forward(self, anchor, positive, negative, labels):
distance_positive = (anchor - positive).pow(2).sum(1) # .pow(.5)
distance_negative = (anchor - negative).pow(2).sum(1) # .pow(.5)
losses = F.relu(distance_positive - distance_negative + self.margin)
return losses.sum()
class TripletSoftmaxLoss(nn.Module):
def __init__(self, margin=0.0, lambda_factor=0.01):
super(TripletSoftmaxLoss, self).__init__()
self.margin = margin
self.loss_fn = nn.CrossEntropyLoss()
self.lambda_factor = lambda_factor
def forward(self, anchor, positive, negative, outputs, labels ):
#distance_positive = torch.abs(anchor - positive).sum(1)
#distance_negative = torch.abs(anchor - negative).sum(1)
#losses = F.relu(distance_positive - distance_negative + self.margin)
loss_softmax = self.loss_fn(input=outputs, target=labels)
loss_total = loss_softmax # self.lambda_factor*losses.sum() + loss_softmax
return loss_softmax,loss_softmax,loss_softmax# loss_total, losses.sum(), loss_softmax
class OnlineTripletLoss(nn.Module):
def __init__(self, triplet_selector, margin=0.0):
super(OnlineTripletLoss, self).__init__()
self.margin = margin
self.triplet_selector = triplet_selector
def forward(self, anchor_embed, pos_embed, neg_embed, labels):
# Combine the embeddings from each network
embeddings = torch.cat((anchor_embed, pos_embed, neg_embed), dim=0)
# Get the (e.g. hardest) triplets in this minibatch
triplets, num_triplets = self.triplet_selector.get_triplets(embeddings, labels)
# There might be no triplets selected, if so, just compute the loss over the entire
# minibatch
if num_triplets == 0:
ap_distances = (anchor_embed - pos_embed).pow(2).sum(1)
an_distances = (anchor_embed - neg_embed).pow(2).sum(1)
else:
# Use CUDA if we can
if anchor_embed.is_cuda: triplets = triplets.cuda()
# Compute triplet loss over the selected triplets
ap_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 1]]).pow(2).sum(1)
an_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 2]]).pow(2).sum(1)
# Compute the losses
losses = F.relu(ap_distances - an_distances + self.margin)
return losses.mean()
class OnlineTripletSoftmaxLoss(nn.Module):
def __init__(self, triplet_selector, margin=0.0, lambda_factor=0.01):
super(OnlineTripletSoftmaxLoss, self).__init__()
self.margin = margin
self.loss_fn = nn.CrossEntropyLoss()
self.lambda_factor = lambda_factor
self.triplet_selector = triplet_selector
def forward(self, anchor_embed, pos_embed, neg_embed, preds, labels, labels_neg):
# Combine the embeddings from each network
embeddings = torch.cat((anchor_embed, pos_embed, neg_embed), dim=0)
# Define the labels as variables and put on the GPU
gpu_labels = labels.view(len(labels))
gpu_labels_neg = labels_neg.view(len(labels_neg))
gpu_labels = Variable(gpu_labels.cuda())
gpu_labels_neg = Variable(gpu_labels_neg.cuda())
# Concatenate labels for softmax/crossentropy targets
target = torch.cat((gpu_labels, gpu_labels, gpu_labels_neg), dim=0)
# Get the (e.g. hardest) triplets in this minibatch
triplets, num_triplets = self.triplet_selector.get_triplets(embeddings, labels)
# There might be no triplets selected, if so, just compute the loss over the entire
# minibatch
if num_triplets == 0:
ap_distances = (anchor_embed - pos_embed).pow(2).sum(1)
an_distances = (anchor_embed - neg_embed).pow(2).sum(1)
else:
# Use CUDA if we can
if anchor_embed.is_cuda: triplets = triplets.cuda()
# Compute triplet loss over the selected triplets
ap_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 1]]).pow(2).sum(1)
an_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 2]]).pow(2).sum(1)
# Compute the triplet losses
triplet_losses = F.relu(ap_distances - an_distances + self.margin)
# Compute softmax loss
loss_softmax = self.loss_fn(input=preds, target=target-1)
# Compute the total loss
loss_total = triplet_losses.mean() #self.lambda_factor*triplet_losses.mean() #+ loss_softmax
# Return them all!
return loss_total, triplet_losses.mean(), loss_softmax
# Reciprocal triplet loss from
# "Who Goes There? Exploiting Silhouettes and Wearable Signals for Subject Identification
# in Multi-Person Environments"
class OnlineReciprocalTripletLoss(nn.Module):
def __init__(self, triplet_selector):
super(OnlineReciprocalTripletLoss, self).__init__()
self.triplet_selector = triplet_selector
def forward(self, anchor_embed, pos_embed, neg_embed, labels):
# Combine the embeddings from each network
embeddings = torch.cat((anchor_embed, pos_embed, neg_embed), dim=0)
# Get the (e.g. hardest) triplets in this minibatch
triplets, num_triplets = self.triplet_selector.get_triplets(embeddings, labels)
# There might be no triplets selected, if so, just compute the loss over the entire
# minibatch
if num_triplets == 0:
ap_distances = (anchor_embed - pos_embed).pow(2).sum(1)
an_distances = (anchor_embed - neg_embed).pow(2).sum(1)
else:
# Use CUDA if we can
if anchor_embed.is_cuda: triplets = triplets.cuda()
# Compute distances over the selected triplets
ap_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 1]]).pow(2).sum(1)
an_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 2]]).pow(2).sum(1)
# Actually compute reciprocal triplet loss
losses = ap_distances + (1/an_distances)
return losses.mean()
# Reciprocal triplet loss from
# "Who Goes There? Exploiting Silhouettes and Wearable Signals for Subject Identification
# in Multi-Person Environments"
class OnlineReciprocalSoftmaxLoss(nn.Module):
def __init__(self, triplet_selector, margin=0.0, lambda_factor=0.01):
super(OnlineReciprocalSoftmaxLoss, self).__init__()
self.margin = margin
self.loss_fn = nn.CrossEntropyLoss()
self.lambda_factor = lambda_factor
self.triplet_selector = triplet_selector
def forward(self, anchor_embed, pos_embed, neg_embed, preds, labels, labels_neg):
# Combine the embeddings from each network
embeddings = torch.cat((anchor_embed, pos_embed, neg_embed), dim=0)
# Define the labels as variables and put on the GPU
gpu_labels = labels.view(len(labels))
gpu_labels_neg = labels_neg.view(len(labels_neg))
gpu_labels = Variable(gpu_labels.cuda())
gpu_labels_neg = Variable(gpu_labels_neg.cuda())
# Concatenate labels for softmax/crossentropy targets
target = torch.cat((gpu_labels, gpu_labels, gpu_labels_neg), dim=0)
# Get the (e.g. hardest) triplets in this minibatch
triplets, num_triplets = self.triplet_selector.get_triplets(embeddings, labels)
# There might be no triplets selected, if so, just compute the loss over the entire
# minibatch
if num_triplets == 0:
ap_distances = (anchor_embed - pos_embed).pow(2).sum(1)
an_distances = (anchor_embed - neg_embed).pow(2).sum(1)
else:
# Use CUDA if we can
if anchor_embed.is_cuda: triplets = triplets.cuda()
# Compute triplet loss over the selected triplets
ap_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 1]]).pow(2).sum(1)
an_distances = (embeddings[triplets[:, 0]] - embeddings[triplets[:, 2]]).pow(2).sum(1)
# Compute the triplet losses
triplet_losses = ap_distances + (1/an_distances)
# Compute softmax loss
loss_softmax = self.loss_fn(input=preds, target=target-1)
# Compute the total loss
loss_total = self.lambda_factor*triplet_losses.mean() + loss_softmax
# Return them all!
return loss_total, triplet_losses.mean(), loss_softmax
| 38.386473
| 94
| 0.740624
|
81cf91e1929f66d0d592668f58c259c7cb1a879a
| 2,623
|
py
|
Python
|
face_detector.py
|
beingnothing/FaceTrack_by_FaceBoxes
|
5b84c163fd9851bf6b9bd2764798c292bee04fa7
|
[
"MIT"
] | 3
|
2019-09-23T13:07:14.000Z
|
2020-06-03T09:04:56.000Z
|
face_detector.py
|
beingnothing/FaceTrack_by_FaceBoxes
|
5b84c163fd9851bf6b9bd2764798c292bee04fa7
|
[
"MIT"
] | null | null | null |
face_detector.py
|
beingnothing/FaceTrack_by_FaceBoxes
|
5b84c163fd9851bf6b9bd2764798c292bee04fa7
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
class FaceDetector:
def __init__(self, model_path, gpu_memory_fraction=0.25, visible_device_list='0'):
"""
Arguments:
model_path: a string, path to a pb file.
gpu_memory_fraction: a float number.
visible_device_list: a string.
"""
with tf.gfile.GFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
graph = tf.Graph()
with graph.as_default():
tf.import_graph_def(graph_def, name='import')
self.input_image = graph.get_tensor_by_name('import/image_tensor:0')
self.output_ops = [
graph.get_tensor_by_name('import/boxes:0'),
graph.get_tensor_by_name('import/scores:0'),
graph.get_tensor_by_name('import/num_boxes:0'),
]
gpu_options = tf.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction,
visible_device_list=visible_device_list
)
config_proto = tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)
self.sess = tf.Session(graph=graph, config=config_proto)
def __call__(self, image, score_threshold=0.5):
"""Detect faces.
Arguments:
image: a numpy uint8 array with shape [height, width, 3],
that represents a RGB image.
score_threshold: a float number.
Returns:
boxes: a float numpy array of shape [num_faces, 4].
scores: a float numpy array of shape [num_faces].
Note that box coordinates are in the order: ymin, xmin, ymax, xmax!
"""
h, w, _ = image.shape
image = np.expand_dims(image, 0)
boxes, scores, num_boxes = self.sess.run(
self.output_ops, feed_dict={self.input_image: image}
)
print('import/image_tensor:0'+str(self.input_image.shape))
print('import/boxes:0'+str(self.output_ops[0].shape))
print('import/scores:0'+str(self.output_ops[1].shape))
print('import/num_boxes:0'+str(self.output_ops[2].shape))
print('boxes:'+str(boxes.shape))
print('scores:'+str(scores.shape))
print('num_boxes:'+str(num_boxes.shape))
num_boxes = num_boxes[0]
boxes = boxes[0][:num_boxes]
scores = scores[0][:num_boxes]
to_keep = scores > score_threshold
boxes = boxes[to_keep]
scores = scores[to_keep]
scaler = np.array([h, w, h, w], dtype='float32')
boxes = boxes * scaler
return boxes, scores
| 34.973333
| 90
| 0.611132
|
4685ec775351dfc841fafc59ef7a4ae7d628238b
| 8,064
|
py
|
Python
|
scripts/group_and_merge_by_gene.py
|
pdiakumis/NGS_Utils
|
9eae9f8d5f0e408118d429fde90e297dbac9ae15
|
[
"MIT"
] | 3
|
2018-06-06T01:41:51.000Z
|
2020-08-20T11:36:06.000Z
|
scripts/group_and_merge_by_gene.py
|
pdiakumis/NGS_Utils
|
9eae9f8d5f0e408118d429fde90e297dbac9ae15
|
[
"MIT"
] | 4
|
2019-11-28T03:34:54.000Z
|
2021-06-24T23:04:55.000Z
|
scripts/group_and_merge_by_gene.py
|
pdiakumis/NGS_Utils
|
9eae9f8d5f0e408118d429fde90e297dbac9ae15
|
[
"MIT"
] | 5
|
2018-03-15T12:43:38.000Z
|
2021-06-24T23:12:48.000Z
|
#!/usr/bin/env python
from os.path import abspath, dirname, realpath, join
import sys
from collections import OrderedDict, defaultdict
from ngs_utils.bed_utils import count_bed_cols
def main():
if len(sys.argv) < 2:
sys.exit('Usage: ' + __file__ + ' bed_file > merged_bed_file')
summarize_by_genes = True # len(sys.argv) > 2
# sys.stderr.write('Setting summarize_by_genes to ' + str(summarize_by_genes) + '\n')
num_bed_cols = count_bed_cols(sys.argv[1])
if num_bed_cols < 3:
sys.exit('Incorrect number of fields: ' + str(num_bed_cols) + '. Should be at least 3.')
if num_bed_cols < 7:
summarize_by_genes = False
sys.stderr.write('less than 7 columns in BED; no summarizing by genes\n')
gene_by_chrom_and_name = OrderedDict()
total_lines = 0
feature_counter = defaultdict(int)
with open(sys.argv[1]) as inp:
for l in inp:
if not l:
pass
elif l.startswith('#') or '\t' not in l:
sys.stdout.write(l)
else:
fields = l.replace('\n', '').split('\t')
if len(fields) != num_bed_cols:
sys.stderr.write('Error: number of fields inconsistent. Expected ' + str(num_bed_cols) + ', got ' + str(len(fields)) + ' at + ' + ' | '.join(fields) + '\n')
sys.exit(1)
else:
chrom, start, end = fields[:3]
start, end = int(start), int(end)
gname = fields[3] if num_bed_cols >= 4 else '.'
strand = fields[5] if num_bed_cols >= 6 else None
(feature, biotype) = fields[6:8] if num_bed_cols >= 8 else (None, None)
if feature:
feature_counter[feature] += 1
gene = gene_by_chrom_and_name.get((chrom, gname, strand))
if gene is None:
gene = Gene(gname, chrom, strand, 'Gene', biotype)
gene_by_chrom_and_name[(chrom, gname, strand)] = gene
if feature in ['Gene', 'Multi_Gene']: # in fact '*Gene' features in BED files are optional
if gene.already_met_gene_feature_for_this_gene:
# sys.stderr.write(gene.name + ' is duplicating: ' + str(gene) + '\n')
# sys.exit(1)
# miltiple records for gene, picking the lowest start and the biggest end
gene.start = min(gene.start, start)
gene.end = max(gene.end, end)
gene.biotype = merge_fields(gene.biotype, biotype)
# assert gene.strand == strand, 'Prev gene strand is ' + gene.strand + ', new strand is ' + strand + ' gene is ' + gene.name
assert gene.strand == strand, str(gene) + ' strand is not ' + strand
gene.feature = feature
gene.start = start
gene.end = end
gene.biotype = biotype
gene.already_met_gene_feature_for_this_gene = True
elif feature in [None, '.', 'CDS', 'Exon', 'UTR/Intron/Decay']:
assert gene.strand == strand, str(gene) + ' strand is not ' + strand
gene.regions.append(Exon(int(start), int(end), biotype, feature))
total_lines += 1
if total_lines % 10000 == 0:
sys.stderr.write('processed ' + str(total_lines // 1000) + 'k lines\n')
sys.stderr.flush()
sys.stderr.write('Processed ' + str(total_lines) + ' lines, found ' + str(len(gene_by_chrom_and_name)) + ' unique genes.\n')
if feature_counter:
sys.stderr.write('Features:\n')
for ft, cnt in feature_counter.items():
sys.stderr.write(' ' + ft + ': ' + str(cnt) + '\n')
sys.stderr.write('\n')
genes = []
for gene in gene_by_chrom_and_name.values():
if gene.sort_regions() is not None:
genes.append(gene)
sys.stderr.write('Merging regions...\n')
final_regions = []
for gene in sorted(genes, key=lambda g: g.get_key()):
if summarize_by_genes and gene.name != '.':
final_regions.append((gene.chrom, gene.start, gene.end, gene.name, gene.strand, gene.feature, gene.biotype))
merged_regions = gene.merge_regions()
for r in merged_regions:
final_regions.append((gene.chrom, r.start, r.end, gene.name, gene.strand, r.feature, r.biotype))
sys.stderr.write('Merged, regions after merge: ' + str(len(final_regions)) + ', saving...\n')
for chrom, start, end, gname, strand, feature, biotype in final_regions:
fs = [chrom, str(start), str(end), gname, '.', strand or '.', feature or '.', biotype or '.']
sys.stdout.write('\t'.join(fs[:num_bed_cols]) + '\n')
sys.stderr.write('Saved\n')
class Exon:
def __init__(self, start, end, biotype=None, feature=None):
self.start = start
self.end = end
self.biotype = biotype
self.feature = feature
def __repr__(self):
return str(self.start) + '-' + str(self.end) + ',' + str(self.biotype) + ', ' + str(self.feature)
CHROMS = [('Y', 23), ('X', 24), ('M', 0)]
for i in range(22, 0, -1):
CHROMS.append((str(i), i))
class Gene:
def __init__(self, name, chrom, strand=None, feature=None, biotype=None):
self.name = name
self.chrom = chrom
self.__chrom_key = self.__make_chrom_key()
self.strand = strand
self.start = None
self.end = None
self.feature = feature
self.biotype = biotype
self.already_met_gene_feature_for_this_gene = False # some BED files can contain '*Gene' features, so we can take start and end from them
self.regions = []
def __make_chrom_key(self):
chr_remainder = self.chrom
if self.chrom.startswith('chr'):
chr_remainder = self.chrom[3:]
for (c, i) in CHROMS:
if chr_remainder == c:
return i
elif chr_remainder.startswith(c):
return i + 24
sys.stderr.write('Cannot parse chromosome ' + self.chrom + '\n')
return None
def get_key(self):
return self.__chrom_key, self.start, (0 if 'Gene' in self.feature else 1), self.end, self.name
def sort_regions(self):
self.regions = sorted(self.regions, key=lambda r: (r.start, r.end))
if self.start is None or self.end is None:
if not self.regions:
return None # no coordinates and no exons to infer coordinates
else:
self.start = self.regions[0].start
self.end = self.regions[-1].end
return self.regions
def merge_regions(self):
if len(self.regions) == 0:
sys.stderr.write('Error: no sub-regions of ' + str(self) + '\n')
sys.exit(1)
non_overlapping_regions = [self.regions[0]]
for r in self.regions[1:]:
if r.start > non_overlapping_regions[-1].end:
non_overlapping_regions.append(r)
else:
prev_r = non_overlapping_regions[-1]
prev_r.end = r.end
prev_r.biotype = merge_fields(prev_r.biotype, r.biotype)
prev_r.feature = merge_fields(prev_r.feature, r.feature)
self.regions = non_overlapping_regions
return non_overlapping_regions
def __repr__(self):
return self.chrom + ':' + str(self.start) + '-' + str(self.end) + ',' + str(self.name)
def __str__(self):
return self.__repr__()
def merge_fields(consensus_field, other_field):
if not consensus_field:
consensus_field = other_field
else:
consensus_field = ','.join(set(consensus_field.split(',')) | set(other_field.split(',')))
return consensus_field
if __name__ == '__main__':
main()
| 40.32
| 176
| 0.56064
|
473544c8c8b645999afe0225b7a86140b29d46f6
| 1,783
|
py
|
Python
|
core/case/config.py
|
yaoanderson/testingRPA
|
b65cade39d111ac95ada443c3a1497d2ccb5b95d
|
[
"Apache-2.0"
] | 1
|
2019-11-29T06:04:58.000Z
|
2019-11-29T06:04:58.000Z
|
core/case/config.py
|
yaoanderson/testingRPA
|
b65cade39d111ac95ada443c3a1497d2ccb5b95d
|
[
"Apache-2.0"
] | null | null | null |
core/case/config.py
|
yaoanderson/testingRPA
|
b65cade39d111ac95ada443c3a1497d2ccb5b95d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf8
"""
-------------------------------------------------
File Name: config
Description:
Author: anderson
date: 2019-11-25
-------------------------------------------------
"""
from core.common.parseutil import ConfigParser
from core.case.policy import Policy
import subprocess
import os
class Config(object):
@classmethod
def generate_pict_input_file(cls, pict_input_file):
user_config_lines = list()
with open(pict_input_file, "w") as wf:
for item in ConfigParser.get_section('includeChars'):
# generate basic conf combine based on rule [null, full, part] which need manual intervention.
user_config_lines.append("%s: " % item)
Policy.apply_policies(user_config_lines,
[char.strip() for char in ConfigParser.get_option('includeChars', item).split(",")],
int(ConfigParser.get_option('minLength', item)))
wf.writelines(user_config_lines)
@classmethod
def generate_pict_output_list(cls, pict_input_file):
# pict process then output result file
p = subprocess.Popen(os.path.join(os.getcwd(), "core/common/pict %s" % pict_input_file), shell=True,
stdout=subprocess.PIPE, universal_newlines=True)
p.wait()
os.remove(pict_input_file)
return p.stdout.readlines()
@classmethod
def get_pict_output_factor_list(cls, pict_output_list):
return [factor.strip() for factor in pict_output_list[0].split("\t")]
@classmethod
def get_pict_output_data_list(cls, pict_output_list):
return [line.strip().split("\t") for line in pict_output_list[1:]]
| 36.387755
| 122
| 0.601795
|
cc21c2fe44de8e0ac92b386a7dbf00dd5ec8451b
| 2,783
|
py
|
Python
|
demos/python_demos/single_human_pose_estimation_demo/detector.py
|
evgeny-izutov/open_model_zoo
|
2cd6145ef342fc9b7ccf32676af73f4a1cb8d9ba
|
[
"Apache-2.0"
] | 1,031
|
2020-07-16T08:30:57.000Z
|
2022-03-30T19:42:52.000Z
|
demos/python_demos/single_human_pose_estimation_demo/detector.py
|
evgeny-izutov/open_model_zoo
|
2cd6145ef342fc9b7ccf32676af73f4a1cb8d9ba
|
[
"Apache-2.0"
] | 966
|
2020-07-16T08:13:00.000Z
|
2022-03-31T18:09:18.000Z
|
demos/python_demos/single_human_pose_estimation_demo/detector.py
|
evgeny-izutov/open_model_zoo
|
2cd6145ef342fc9b7ccf32676af73f4a1cb8d9ba
|
[
"Apache-2.0"
] | 440
|
2020-07-16T12:52:50.000Z
|
2022-03-31T14:21:41.000Z
|
import os
import cv2
class Detector(object):
def __init__(self, ie, path_to_model_xml, label_class, scale=None, thr=0.3, device='CPU'):
self.OUTPUT_SIZE = 7
self.CHANNELS_SIZE = 3
self.model = ie.read_network(path_to_model_xml, os.path.splitext(path_to_model_xml)[0] + '.bin')
assert len(self.model.input_info) == 1, "Expected 1 input blob"
assert len(self.model.outputs) == 1, "Expected 1 output blob"
self._input_layer_name = next(iter(self.model.input_info))
self._output_layer_name = next(iter(self.model.outputs))
assert len(self.model.input_info[self._input_layer_name].input_data.shape) == 4 and \
self.model.input_info[self._input_layer_name].input_data.shape[1] == self.CHANNELS_SIZE, \
"Expected model output shape with %s channels " % (self.CHANNELS_SIZE)
assert len(self.model.outputs[self._output_layer_name].shape) == 4 and \
self.model.outputs[self._output_layer_name].shape[3] == self.OUTPUT_SIZE, \
"Expected model output shape with %s outputs" % (self.OUTPUT_SIZE)
self._ie = ie
self._exec_model = self._ie.load_network(self.model, device)
self._scale = scale
self._thr = thr
self._label_class = label_class
_, _, self.input_h, self.input_w = self.model.input_info[self._input_layer_name].input_data.shape
self._h = -1
self._w = -1
self.infer_time = -1
def _preprocess(self, img):
self._h, self._w, _ = img.shape
if self._h != self.input_h or self._w != self.input_w:
img = cv2.resize(img, dsize=(self.input_w, self.input_h), fy=self._h / self.input_h,
fx=self._h / self.input_h)
img = img.transpose(2, 0, 1)
return img[None, ]
def _infer(self, prep_img):
t0 = cv2.getTickCount()
output = self._exec_model.infer(inputs={self._input_layer_name: prep_img})
self.infer_time = (cv2.getTickCount() - t0) / cv2.getTickFrequency()
return output
def _postprocess(self, bboxes):
def coord_translation(bbox):
xmin = int(self._w * bbox[0])
ymin = int(self._h * bbox[1])
xmax = int(self._w * bbox[2])
ymax = int(self._h * bbox[3])
w_box = xmax - xmin
h_box = ymax - ymin
return [xmin, ymin, w_box, h_box]
bboxes_new = [coord_translation(bbox[3:]) for bbox in bboxes if bbox[1] == self._label_class and bbox[2] > self._thr]
return bboxes_new
def detect(self, img):
img = self._preprocess(img)
output = self._infer(img)
bboxes = self._postprocess(output[self._output_layer_name][0][0])
return bboxes
| 39.197183
| 125
| 0.619475
|
b439782e2a680f28dd8db89c84735dc5e34a3f5a
| 3,283
|
py
|
Python
|
setup.py
|
CarsonSlovoka/youtube-dl-cli
|
80de0ef2500f58292723532d372b740a964635f5
|
[
"Apache-2.0"
] | 1
|
2020-10-26T13:51:39.000Z
|
2020-10-26T13:51:39.000Z
|
setup.py
|
CarsonSlovoka/youtube-dl-cli
|
80de0ef2500f58292723532d372b740a964635f5
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
CarsonSlovoka/youtube-dl-cli
|
80de0ef2500f58292723532d372b740a964635f5
|
[
"Apache-2.0"
] | 1
|
2020-04-02T07:26:49.000Z
|
2020-04-02T07:26:49.000Z
|
import setuptools
from setuptools import setup, find_packages, find_namespace_packages
from setuptools.command.test import test as test_class
# from distutils.core import setup
from pathlib import Path
import sys
import unittest
if 'eny path':
import youtube_dl_cli
from youtube_dl_cli import __version__, __exe_name__, __author__, __description__
VERSION_NUMBER = __version__
DOWNLOAD_VERSION = __version__
PACKAGES_DIR = youtube_dl_cli.__name__
SETUP_NAME = PACKAGES_DIR.replace('_', '-')
ALIAS_NAME = __exe_name__
GITHUB_URL = f'https://github.com/CarsonSlovoka/{SETUP_NAME}/tree/master'
# find_package_modules = setuptools.command.build_py.build_py.find_package_modules
with open(Path(__file__).parent / Path('README.rst'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
with open(Path(__file__).parent / Path('requirements.txt')) as req_txt:
LIST_REQUIRES = [line.strip() for line in req_txt if not line.startswith('#') and line.strip() != '']
def test_setup():
from youtube_dl_cli.test.test_cli import CLITests
suite_list = [unittest.TestLoader().loadTestsFromTestCase(class_module) for class_module in (CLITests, )]
suite_class_set = unittest.TestSuite(suite_list)
# suite_function_set = unittest.TestSuite()
# suite_function_set.addTest(module.class('fun_name'))
suite = suite_class_set # pick one of two: suite_class_set, suite_function_set
# unittest.TextTestRunner(verbosity=1).run(suite) # self.verbosity = 0 # 0, 1, 2. unittest.TextTestResult
return suite
setup(
name=SETUP_NAME,
version=f'{VERSION_NUMBER}', # x.x.x.{dev, a, b, rc}
packages=find_packages(exclude=['youtube_dl_cli.test.*']), # ignore modules
include_package_data=True, # include any data files it finds inside your package directories that are specified by your MANIFEST.in
package_data={}, # {f'{PACKAGES_DIR}.config': ['gui.ui', f'static/{SETUP_NAME}/*.ico'],},
license="Apache-2.0",
author=' |'.join(__author__),
author_email='jackparadise520a@gmail.com',
install_requires=LIST_REQUIRES,
url=GITHUB_URL,
description=__description__,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/x-rst',
keywords=['youtube', 'download'],
download_url=f'{GITHUB_URL}/tarball/v{DOWNLOAD_VERSION}',
python_requires='>=3.6.2,',
zip_safe=False,
classifiers=[ # https://pypi.org/classifiers/
'Topic :: Multimedia :: Video',
'Topic :: Multimedia :: Video :: Capture',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'Natural Language :: Chinese (Traditional)',
'Natural Language :: English',
'Operating System :: Microsoft',
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
entry_points={
'console_scripts': [
f'{ALIAS_NAME}=youtube_dl_cli.cli:main',
'ydl_cmd=youtube_dl_cli.cli:main',
],
},
test_suite='setup.test_setup', # `python setup.py test` will call this function. # return value must is `suite`
)
| 36.477778
| 136
| 0.698447
|
3605447e39352819f631b262d7c92ad7317019ad
| 1,684
|
py
|
Python
|
Projects/project2/lib/MyUtils.py
|
jdiegoh3/distributed_computing
|
33088e741d35590d5699e8ecd9a35ff12b65f7f8
|
[
"MIT"
] | null | null | null |
Projects/project2/lib/MyUtils.py
|
jdiegoh3/distributed_computing
|
33088e741d35590d5699e8ecd9a35ff12b65f7f8
|
[
"MIT"
] | null | null | null |
Projects/project2/lib/MyUtils.py
|
jdiegoh3/distributed_computing
|
33088e741d35590d5699e8ecd9a35ff12b65f7f8
|
[
"MIT"
] | null | null | null |
import socket
class Elements(object):
def __init__(self, elements):
self.elements = elements
def add_element(self, id, process_info):
self.elements[id] = process_info
def remove_element(self, id):
try:
del self.elements[id]
except KeyError as error:
pass
def list_elements(self):
return self.elements
class FreeDevices(Elements):
elements = {}
def __init__(self):
super().__init__(self.elements)
pass
class OccupiedDevices(Elements):
elements = {}
def __init__(self):
super().__init__(self.elements)
pass
class UnClassifiedClients(Elements):
elements = {}
def __init__(self):
super().__init__(self.elements)
pass
class MessageHandler(object):
body = None
def __init__(self, message):
if not isinstance(message, str):
message = message.decode("utf-8")
self.body = message
def message_loads(self):
if self.body:
result = self.body.split("|")
return result
def send_message(host, port, message):
temporal_socket_instance = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
temporal_socket_instance.connect((host, port))
temporal_socket_instance.send(message)
result = temporal_socket_instance.recv(1024)
return result
class MessageBuilder(object):
message = ""
operation = None
def __init__(self, message_elements, op=None):
self.message += op + "|"
for string in message_elements:
self.message += str(string) + "|"
def get_message(self):
return self.message.encode()
| 21.87013
| 80
| 0.630048
|
5620dc51c8cca7f70fbec02dcfcbdac96018cf2a
| 6,469
|
py
|
Python
|
tools/calc_features_aug.py
|
r-pad/PoseCNN
|
0d2a39f3b5572474ed1a13228ac9ff6f31a134e6
|
[
"MIT"
] | null | null | null |
tools/calc_features_aug.py
|
r-pad/PoseCNN
|
0d2a39f3b5572474ed1a13228ac9ff6f31a134e6
|
[
"MIT"
] | null | null | null |
tools/calc_features_aug.py
|
r-pad/PoseCNN
|
0d2a39f3b5572474ed1a13228ac9ff6f31a134e6
|
[
"MIT"
] | 1
|
2022-01-07T11:15:31.000Z
|
2022-01-07T11:15:31.000Z
|
import _init_paths
import argparse
import os
import random
import time
import numpy as np
from object_pose_utils.datasets.pose_dataset import OutputTypes as otypes
from object_pose_utils.datasets.ycb_dataset import YcbDataset as YCBDataset
from object_pose_utils.datasets.image_processing import ColorJitter, ImageNormalizer
from object_pose_utils.datasets.ycb_occlusion_augmentation import YCBOcclusionAugmentor
from object_pose_utils.datasets.point_processing import PointShifter
from object_pose_utils.utils import to_np
from tqdm import tqdm, trange
from time import sleep
import contextlib
import sys
from featurization import PoseCNNFeaturizer, toPoseCNNImage, getObjectGTQuaternion
import torch
import scipy.io as scio
import os
import sys
module_path = os.path.abspath(os.path.join('tools'))
if module_path not in sys.path:
sys.path.append(module_path)
module_path = os.path.abspath(os.path.join('lib'))
if module_path not in sys.path:
sys.path.append(module_path)
class DummyTqdmFile(object):
"""Dummy file-like that will write to tqdm"""
file = None
def __init__(self, file):
self.file = file
def write(self, x):
# Avoid print() second call (useless \n)
if len(x.rstrip()) > 0:
tqdm.write(x, file=self.file)
def flush(self):
return getattr(self.file, "flush", lambda: None)()
@contextlib.contextmanager
def std_out_err_redirect_tqdm():
orig_out_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
yield orig_out_err[0]
# Relay exceptions
except Exception as exc:
raise exc
# Always restore sys.stdout/err if necessary
finally:
sys.stdout, sys.stderr = orig_out_err
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_root', type=str, default = 'datasets/ycb/YCB_Video_Dataset',
help='Dataset root dir (''YCB_Video_Dataset'')')
parser.add_argument('--dataset_mode', type=str, default = 'train_syn_valid',
help='Dataset mode')
parser.add_argument('--num_augmentations', type=int, default = 0,
help='Number of augmented images per render')
parser.add_argument('--workers', type=int, default = 10, help='Number of data loading workers')
#parser.add_argument('--weights', type=str, help='PoseNetGlobal weights file')
parser.add_argument('--output_folder', type=str, help='Feature save location')
parser.add_argument('--object_indices', type=int, nargs='+', default = None, help='Object indices to featureize')
parser.add_argument('--start_index', type=int, default = 0, help='Starting augmentation index')
opt = parser.parse_args()
def main():
opt.manualSeed = random.randint(1, 10000)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if not os.path.exists(opt.output_folder):
os.makedirs(opt.output_folder)
num_points = 1000 #number of points on the input pointcloud
num_objects = 21
if(opt.object_indices is None):
opt.object_indices = list(range(1,num_objects+1))
estimator = PoseCNNFeaturizer()
output_format = [otypes.IMAGE,
otypes.DEPTH_IMAGE]
with std_out_err_redirect_tqdm() as orig_stdout:
preprocessors = []
postprocessors = []
if(opt.num_augmentations > 0):
preprocessors.extend([YCBOcclusionAugmentor(opt.dataset_root),
ColorJitter(),])
postprocessors.append(PointShifter())
dataset = YCBDataset(opt.dataset_root, mode = opt.dataset_mode,
object_list = opt.object_indices,
output_data = output_format,
resample_on_error = False,
preprocessors = preprocessors,
postprocessors = postprocessors,
image_size = [640, 480], num_points=1000)
_, u_idxs = np.unique(zip(*dataset.image_list)[0], return_index = True)
dataset.image_list = np.array(dataset.image_list)[u_idxs].tolist()
dataset.list_obj = np.array(dataset.list_obj)[u_idxs].tolist()
classes = dataset.classes
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=opt.workers)
#pbar.set_description('Featurizing {}'.format(classes[cls]))
if(opt.num_augmentations > 0):
pbar_aug = trange(opt.start_index, opt.num_augmentations, file=orig_stdout, dynamic_ncols=True)
else:
pbar_aug = [None]
for aug_idx in pbar_aug:
pbar_save = tqdm(enumerate(dataloader), total = len(dataloader),
file=orig_stdout, dynamic_ncols=True)
for i, data in pbar_save:
if(len(data) == 0 or len(data[0]) == 0):
continue
img, depth = data
img = toPoseCNNImage(img[0])
depth = to_np(depth[0])
data_path = dataset.image_list[i]
path = '{}/data/{}-meta.mat'.format(dataset.dataset_root, dataset.getPath(i))
meta_data = scio.loadmat(path)
try:
seg = estimator(img, depth, meta_data)
except Exception as e:
print(e)
continue
for pose_idx, cls in enumerate(seg['rois'][:,1]):
cls = int(cls)
quat = getObjectGTQuaternion(meta_data, cls)
feat = seg['feats'][pose_idx]
fc6 = seg['fc6'][pose_idx]
if(opt.num_augmentations > 0):
output_filename = '{0}/data/{1}_{2}_{3}_feat.npz'.format(opt.output_folder,
data_path[0], classes[cls], aug_idx)
else:
output_filename = '{0}/data/{1}_{2}_feat.npz'.format(opt.output_folder,
data_path[0], classes[cls])
#pbar_save.set_description(output_filename)
if not os.path.exists(os.path.dirname(output_filename)):
os.makedirs(os.path.dirname(output_filename))
np.savez(output_filename, quat = quat, feat = feat, fc6 = fc6)
if __name__ == '__main__':
main()
| 40.943038
| 113
| 0.618488
|
9c0a372edcc736c9ae4158b135809c163b8e2b28
| 29,023
|
py
|
Python
|
catkin_tools/context.py
|
iwanders/catkin_tools
|
76bfe96cc18cdf4b4e88a1f764f73260f77843b5
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/context.py
|
iwanders/catkin_tools
|
76bfe96cc18cdf4b4e88a1f764f73260f77843b5
|
[
"Apache-2.0"
] | null | null | null |
catkin_tools/context.py
|
iwanders/catkin_tools
|
76bfe96cc18cdf4b4e88a1f764f73260f77843b5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements a class for representing a catkin workspace context"""
from __future__ import print_function
import os
import re
import sys
from . import metadata
from .common import getcwd
from .common import printed_fill
from .common import remove_ansi_escape
from .common import terminal_width
from .metadata import find_enclosing_workspace
from .resultspace import get_resultspace_environment
from .terminal_color import ColorMapper
color_mapper = ColorMapper()
clr = color_mapper.clr
class Context(object):
"""Encapsulates a catkin workspace's settings which affect build results.
This class will validate some of the settings on assignment using the
filesystem, but it will never modify the filesystem. For instance, it will
raise an exception if the source space does not exist, but it will not
create a folder for the build space if it does not already exist.
This context can be locked, so that changing the members is prevented.
"""
DEFAULT_SOURCE_SPACE = 'src'
DEFAULT_BUILD_SPACE = 'build'
DEFAULT_DEVEL_SPACE = 'devel'
DEFAULT_INSTALL_SPACE = 'install'
STORED_KEYS = [
'extend_path',
'source_space',
'build_space',
'devel_space',
'install_space',
'isolate_devel',
'install',
'isolate_install',
'cmake_args',
'make_args',
'use_internal_make_jobserver',
'use_env_cache',
'catkin_make_args',
'whitelist',
'blacklist',
]
KEYS = STORED_KEYS + [
'workspace',
'profile',
'space_suffix',
]
@classmethod
def load(
cls,
workspace_hint=None,
profile=None,
opts=None,
strict=False,
append=False,
remove=False,
load_env=True
):
"""Load a context from a given workspace and profile with optional
modifications.
This function will try to load a given context from the specified
workspace with the following resolution strategy:
- existing workspace enclosing given workspace path
- existing workspace enclosing "."
- given workspace path
- "."
If a workspace cannot be found, it will assume that the user is
specifying a new workspace, unless `strict=True` is given. In this
latter case, this function will return None.
:param workspace_hint: The hint used to find a workspace (see description for more details)
:type workspace_hint: str
:param profile: The profile to load the context from, if the profile is None, the active profile is used
:type profile: str
:param opts: An argparse options namespace containing context keys to override stored context keys
:type opts: namespace
:param strict: Causes this function to return None if a workspace isn't found
:type strict: bool
:param append: Appends any list-type opts to existing opts
:type append: bool
:param remove: Removes any list-type opts from existing opts
:type remove: bool
:param load_env: Control whether the context loads the resultspace
environment for the full build context
:type load_env: bool
:returns: A potentially valid Context object constructed from the given arguments
:rtype: Context
"""
# Initialize dictionary version of opts namespace
opts_vars = vars(opts) if opts else {}
# Get the workspace (either the given directory or the enclosing ws)
workspace_hint = workspace_hint or opts_vars.get('workspace', None) or getcwd()
workspace = find_enclosing_workspace(workspace_hint)
if not workspace:
if strict or not workspace_hint:
return None
else:
workspace = workspace_hint
opts_vars['workspace'] = workspace
# Get the active profile
profile = profile or opts_vars.get('profile', None) or metadata.get_active_profile(workspace)
opts_vars['profile'] = profile
# Initialize empty metadata/args
config_metadata = {}
context_args = {}
# Get the metadata stored in the workspace if it was found
if workspace:
config_metadata = metadata.get_metadata(workspace, profile, 'config')
context_args.update(config_metadata)
# User-supplied args are used to update stored args
# Only update context args with given opts which are not none
for (k, v) in opts_vars.items():
if k in Context.KEYS and v is not None:
# Handle list-type arguments with append/remove functionality
if type(context_args.get(k, None)) is list and type(v) is list:
if append:
context_args[k] += v
elif remove:
context_args[k] = [w for w in context_args[k] if w not in v]
else:
context_args[k] = v
else:
context_args[k] = v
# Create the build context
ctx = Context(**context_args)
# Don't load the cmake config if it's not needed
if load_env:
ctx.load_env()
return ctx
@classmethod
def save(cls, context):
"""Save a context in the associated workspace and profile."""
metadata.update_metadata(
context.workspace,
context.profile,
'config',
context.get_stored_dict())
def get_stored_dict(self):
"""Get the context parameters which should be stored persistently."""
return dict([(k, getattr(self, k)) for k in Context.STORED_KEYS])
def __init__(
self,
workspace=None,
profile=None,
extend_path=None,
source_space=None,
build_space=None,
devel_space=None,
install_space=None,
isolate_devel=False,
install=False,
isolate_install=False,
cmake_args=None,
make_args=None,
use_internal_make_jobserver=True,
use_env_cache=False,
catkin_make_args=None,
space_suffix=None,
whitelist=None,
blacklist=None,
**kwargs
):
"""Creates a new Context object, optionally initializing with parameters
:param workspace: root of the workspace, defaults to the enclosing workspace
:type workspace: str
:param profile: profile name, defaults to the default profile
:type profile: str
:param extend_path: catkin result-space to extend
:type extend_path: str
:param source_space: relative location of source space, defaults to '<workspace>/src'
:type source_space: str
:param build_space: relativetarget location of build space, defaults to '<workspace>/build'
:type build_space: str
:param devel_space: relative target location of devel space, defaults to '<workspace>/devel'
:type devel_space: str
:param install_space: relative target location of install space, defaults to '<workspace>/install'
:type install_space: str
:param isolate_devel: each package will have its own develspace if True, default is False
:type isolate_devel: bool
:param install: packages will be installed by invoking ``make install``, defaults to False
:type install: bool
:param isolate_install: packages will be installed to separate folders if True, defaults to False
:type isolate_install: bool
:param cmake_args: extra cmake arguments to be passed to cmake for each package
:type cmake_args: list
:param make_args: extra make arguments to be passed to make for each package
:type make_args: list
:param use_internal_make_jobserver: true if this configuration should use an internal make jobserv
:type use_internal_make_jobserver: bool
:param use_env_cache: true if this configuration should cache job environments loaded from resultspaces
:type use_env_cache: bool
:param catkin_make_args: extra make arguments to be passed to make for each catkin package
:type catkin_make_args: list
:param space_suffix: suffix for build, devel, and install spaces which are not explicitly set.
:type space_suffix: str
:param whitelist: a list of packages to build by default
:type whitelist: list
:param blacklist: a list of packages to ignore by default
:type blacklist: list
:raises: ValueError if workspace or source space does not exist
"""
self.__locked = False
# Check for unhandled context options
if len(kwargs) > 0:
print('Warning: Unhandled config context options: {}'.format(kwargs), file=sys.stderr)
# Validation is done on assignment
# Handle *space assignment and defaults
self.workspace = workspace
self.extend_path = extend_path if extend_path else None
ss = '' if space_suffix is None else space_suffix
self.profile = profile
self.source_space = Context.DEFAULT_SOURCE_SPACE if source_space is None else source_space
self.build_space = Context.DEFAULT_BUILD_SPACE + ss if ss or build_space is None else build_space
self.devel_space = Context.DEFAULT_DEVEL_SPACE + ss if ss or devel_space is None else devel_space
self.install_space = Context.DEFAULT_INSTALL_SPACE + ss if ss or install_space is None else install_space
self.destdir = os.environ['DESTDIR'] if 'DESTDIR' in os.environ else None
# Handle package whitelist/blacklist
self.whitelist = whitelist or []
self.blacklist = blacklist or []
# Handle build options
self.isolate_devel = isolate_devel
self.install = install
self.isolate_install = isolate_install
# Handle additional cmake and make arguments
self.cmake_args = cmake_args or []
self.make_args = make_args or []
self.use_internal_make_jobserver = use_internal_make_jobserver
self.use_env_cache = use_env_cache
self.catkin_make_args = catkin_make_args or []
# List of packages in the workspace is set externally
self.packages = []
# List of warnings about the workspace is set internally
self.warnings = []
# Initialize environment settings set by load_env
self.manual_cmake_prefix_path = None
self.cached_cmake_prefix_path = None
self.env_cmake_prefix_path = None
self.cmake_prefix_path = None
def load_env(self):
# Check for CMAKE_PREFIX_PATH in manual cmake args
self.manual_cmake_prefix_path = ''
for cmake_arg in self.cmake_args:
prefix_path_match = re.findall('-DCMAKE_PREFIX_PATH.*?=(.+)', cmake_arg)
if len(prefix_path_match) > 0:
self.manual_cmake_prefix_path = prefix_path_match[0]
# Load and update mirror of 'sticky' CMake information
if self.install:
sticky_env = get_resultspace_environment(self.install_space_abs, quiet=True)
else:
sticky_env = get_resultspace_environment(self.devel_space_abs, quiet=True)
self.cached_cmake_prefix_path = ''
if 'CMAKE_PREFIX_PATH' in sticky_env:
split_result_cmake_prefix_path = sticky_env.get('CMAKE_PREFIX_PATH', '').split(':')
if len(split_result_cmake_prefix_path) > 1:
self.cached_cmake_prefix_path = ':'.join(split_result_cmake_prefix_path[1:])
# Either load an explicit environment or get it from the current environment
self.env_cmake_prefix_path = ''
if self.extend_path:
extended_env = get_resultspace_environment(self.extend_path, quiet=False)
self.env_cmake_prefix_path = extended_env.get('CMAKE_PREFIX_PATH', '')
if not self.env_cmake_prefix_path:
print(clr("@!@{rf}Error:@| Could not load environment from workspace: '%s', "
"target environment (env.sh) does not provide 'CMAKE_PREFIX_PATH'" % self.extend_path))
print(extended_env)
sys.exit(1)
else:
# Get the current CMAKE_PREFIX_PATH
if 'CMAKE_PREFIX_PATH' in os.environ:
split_result_cmake_prefix_path = os.environ['CMAKE_PREFIX_PATH'].split(':')
if len(split_result_cmake_prefix_path) > 1 and (
(not self.install and split_result_cmake_prefix_path[0] == self.devel_space_abs) or
(self.install and split_result_cmake_prefix_path[0] == self.install_space_abs)):
self.env_cmake_prefix_path = ':'.join(split_result_cmake_prefix_path[1:])
else:
self.env_cmake_prefix_path = os.environ.get('CMAKE_PREFIX_PATH', '').rstrip(':')
# Add warnings based on conflicing CMAKE_PREFIX_PATH
if self.cached_cmake_prefix_path and self.extend_path:
ep_not_in_lcpp = any([self.extend_path in p for p in self.cached_cmake_prefix_path.split(':')])
if not ep_not_in_lcpp:
self.warnings += [clr(
"Your workspace is configured to explicitly extend a "
"workspace which yields a CMAKE_PREFIX_PATH which is "
"different from the cached CMAKE_PREFIX_PATH used last time "
"this workspace was built.\\n\\n"
"If you want to use a different CMAKE_PREFIX_PATH you "
"should call @{yf}`catkin clean --all`@| to remove all "
"references to the previous CMAKE_PREFIX_PATH.\\n\\n"
"@{cf}Cached CMAKE_PREFIX_PATH:@|\\n\\t@{yf}%s@|\\n"
"@{cf}Other workspace to extend:@|\\n\\t@{yf}{_Context__extend_path}@|\\n"
"@{cf}Other workspace's CMAKE_PREFIX_PATH:@|\\n\\t@{yf}%s@|"
% (self.cached_cmake_prefix_path, self.env_cmake_prefix_path))]
elif self.env_cmake_prefix_path and\
self.cached_cmake_prefix_path and\
self.env_cmake_prefix_path != self.cached_cmake_prefix_path:
self.warnings += [clr(
"Your current environment's CMAKE_PREFIX_PATH is different "
"from the cached CMAKE_PREFIX_PATH used the last time this "
"workspace was built.\\n\\n"
"If you want to use a different CMAKE_PREFIX_PATH you should "
"call @{yf}`catkin clean --all`@| to remove all references to "
"the previous CMAKE_PREFIX_PATH.\\n\\n"
"@{cf}Cached CMAKE_PREFIX_PATH:@|\\n\\t@{yf}%s@|\\n"
"@{cf}Current CMAKE_PREFIX_PATH:@|\\n\\t@{yf}%s@|" %
(self.cached_cmake_prefix_path, self.env_cmake_prefix_path))]
# Check if prefix path is different from the environment prefix path
if self.manual_cmake_prefix_path:
self.cmake_prefix_path = self.manual_cmake_prefix_path
elif self.cached_cmake_prefix_path:
self.cmake_prefix_path = self.cached_cmake_prefix_path
else:
self.cmake_prefix_path = self.env_cmake_prefix_path
def summary(self, notes=[]):
# Add warnings (missing dirs in CMAKE_PREFIX_PATH, etc)
summary_warnings = self.warnings
if not self.initialized():
summary_warnings += [clr(
"Workspace `@{yf}{_Context__workspace}@|` is not yet "
"initialized. Use the `catkin init` or run `catkin config "
"--init`.")]
if not self.source_space_exists():
summary_warnings += [clr(
"Source space `@{yf}{_Context__source_space_abs}@|` does not yet exist.")]
summary = [
[
clr("@{cf}Profile:@| @{yf}{profile}@|"),
clr("@{cf}Extending:@| {extend_mode} @{yf}{extend}@|"),
clr("@{cf}Workspace:@| @{yf}{_Context__workspace}@|"),
clr("@{cf}Source Space:@| {source_missing} @{yf}{_Context__source_space_abs}@|"),
clr("@{cf}Build Space:@| {build_missing} @{yf}{_Context__build_space_abs}@|"),
clr("@{cf}Devel Space:@| {devel_missing} @{yf}{_Context__devel_space_abs}@|"),
clr("@{cf}Install Space:@| {install_missing} @{yf}{_Context__install_space_abs}@|"),
clr("@{cf}DESTDIR:@| @{yf}{_Context__destdir}@|"),
],
[
clr("@{cf}Isolate Develspaces:@| @{yf}{_Context__isolate_devel}@|"),
clr("@{cf}Install Packages:@| @{yf}{_Context__install}@|"),
clr("@{cf}Isolate Installs:@| @{yf}{_Context__isolate_install}@|"),
],
[
clr("@{cf}Additional CMake Args:@| @{yf}{cmake_args}@|"),
clr("@{cf}Additional Make Args:@| @{yf}{make_args}@|"),
clr("@{cf}Additional catkin Make Args:@| @{yf}{catkin_make_args}@|"),
clr("@{cf}Internal Make Job Server:@| @{yf}{_Context__use_internal_make_jobserver}@|"),
clr("@{cf}Cache Job Environments:@| @{yf}{_Context__use_env_cache}@|"),
],
[
clr("@{cf}Whitelisted Packages:@| @{yf}{whitelisted_packages}@|"),
clr("@{cf}Blacklisted Packages:@| @{yf}{blacklisted_packages}@|"),
]
]
# Construct string for extend value
if self.extend_path:
extend_value = self.extend_path
extend_mode = clr('@{gf}[explicit]@|')
elif self.cached_cmake_prefix_path:
extend_value = self.cmake_prefix_path
extend_mode = clr(' @{gf}[cached]@|')
elif (self.env_cmake_prefix_path and
self.env_cmake_prefix_path != self.devel_space_abs and
self.env_cmake_prefix_path != self.install_space_abs):
extend_value = self.cmake_prefix_path
extend_mode = clr(' @{gf}[env]@|')
else:
extend_value = 'None'
extend_mode = clr(' ')
def existence_str(path):
return clr(' @{gf}[exists]@|' if os.path.exists(path) else '@{rf}[missing]@|')
subs = {
'profile': self.profile,
'extend_mode': extend_mode,
'extend': extend_value,
'cmake_prefix_path': (self.cmake_prefix_path or ['Empty']),
'cmake_args': ' '.join(self.__cmake_args or ['None']),
'make_args': ' '.join(self.__make_args or ['None']),
'catkin_make_args': ', '.join(self.__catkin_make_args or ['None']),
'source_missing': existence_str(self.source_space_abs),
'build_missing': existence_str(self.build_space_abs),
'devel_missing': existence_str(self.devel_space_abs),
'install_missing': existence_str(self.install_space_abs),
'whitelisted_packages': ' '.join(self.__whitelist or ['None']),
'blacklisted_packages': ' '.join(self.__blacklist or ['None']),
}
subs.update(**self.__dict__)
# Get the width of the shell
width = terminal_width()
max_length = 0
groups = []
for group in summary:
for index, line in enumerate(group):
group[index] = line.format(**subs)
max_length = min(width, max(max_length, len(remove_ansi_escape(group[index]))))
groups.append("\n".join(group))
divider = clr('@{pf}' + ('-' * max_length) + '@|')
warning_divider = clr('@{rf}' + ('-' * max_length) + '@|')
# Format warnings
if len(summary_warnings) == 0:
notes = [clr("@!@{cf}Workspace configuration appears valid.@|")] + notes
warnings_joined = ''
else:
warnings_formatted = [
printed_fill(clr('@!@{rf}WARNING:@| ') + sw.format(**subs), max_length)
for sw in summary_warnings]
warnings_joined = (
"\n\n" + warning_divider + "\n" +
("\n" + warning_divider + "\n").join(warnings_formatted) +
"\n" + warning_divider + "\n")
return (divider + "\n" +
("\n" + divider + "\n").join(groups) + "\n" + divider + "\n" +
((("\n\n").join(notes) + "\n" + divider) if notes else '') +
warnings_joined)
@property
def workspace(self):
return self.__workspace
@workspace.setter
def workspace(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
# Validate Workspace
if not os.path.exists(value):
raise ValueError("Workspace path '{0}' does not exist.".format(value))
self.__workspace = os.path.abspath(value)
@property
def extend_path(self):
return self.__extend_path
@extend_path.setter
def extend_path(self, value):
if value is not None:
if not os.path.isabs(value):
value = os.path.join(self.workspace, value)
if not os.path.exists(value):
raise ValueError("Resultspace path '{0}' does not exist.".format(value))
self.__extend_path = value
@property
def source_space_abs(self):
return self.__source_space_abs
@property
def source_space(self):
return self.__source_space
@source_space.setter
def source_space(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__source_space = value
self.__source_space_abs = os.path.join(self.__workspace, value)
def source_space_exists(self):
"Returns true if the source space exists"
return os.path.exists(self.source_space_abs) and os.path.isdir(self.source_space_abs)
def initialized(self):
"""Check if this context is initialized."""
return self.workspace == find_enclosing_workspace(self.workspace)
@property
def build_space_abs(self):
return self.__build_space_abs
@property
def build_space(self):
return self.__build_space
@build_space.setter
def build_space(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
# TODO: check that build space was not run with a different context before
self.__build_space = value
self.__build_space_abs = os.path.join(self.__workspace, value)
@property
def devel_space_abs(self):
return self.__devel_space_abs
@property
def devel_space(self):
return self.__devel_space
@devel_space.setter
def devel_space(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
# TODO: check that devel space was not run with a different context before
self.__devel_space = value
self.__devel_space_abs = os.path.join(self.__workspace, value)
@property
def install_space_abs(self):
return self.__install_space_abs
@property
def install_space(self):
return self.__install_space
@install_space.setter
def install_space(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
# TODO: check that install space was not run with a different context before
self.__install_space = value
self.__install_space_abs = os.path.join(self.__workspace, value)
@property
def destdir(self):
return self.__destdir
@destdir.setter
def destdir(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__destdir = value
@property
def isolate_devel(self):
return self.__isolate_devel
@isolate_devel.setter
def isolate_devel(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__isolate_devel = value
@property
def install(self):
return self.__install
@install.setter
def install(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__install = value
@property
def isolate_install(self):
return self.__isolate_install
@isolate_install.setter
def isolate_install(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__isolate_install = value
@property
def cmake_args(self):
return self.__cmake_args
@cmake_args.setter
def cmake_args(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__cmake_args = value
@property
def make_args(self):
return self.__make_args
@make_args.setter
def make_args(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__make_args = value
@property
def use_internal_make_jobserver(self):
return self.__use_internal_make_jobserver
@use_internal_make_jobserver.setter
def use_internal_make_jobserver(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__use_internal_make_jobserver = value
@property
def use_env_cache(self):
return self.__use_env_cache
@use_env_cache.setter
def use_env_cache(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__use_env_cache = value
@property
def catkin_make_args(self):
return self.__catkin_make_args
@catkin_make_args.setter
def catkin_make_args(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__catkin_make_args = value
@property
def packages(self):
return self.__packages
@packages.setter
def packages(self, value):
if self.__locked:
raise RuntimeError("Setting of context members is not allowed while locked.")
self.__packages = value
@property
def whitelist(self):
return self.__whitelist
@whitelist.setter
def whitelist(self, value):
self.__whitelist = value
@property
def blacklist(self):
return self.__blacklist
@blacklist.setter
def blacklist(self, value):
self.__blacklist = value
def package_build_space(self, package):
return os.path.join(self.build_space_abs, package.name)
def package_devel_space(self, package):
if self.isolate_devel:
return os.path.join(self.devel_space_abs, package.name)
else:
return self.devel_space_abs
def package_install_space(self, package):
if self.isolate_install:
return os.path.join(self.install_space_abs, package.name)
else:
return self.install_space_abs
def package_dest_path(self, package):
if self.install:
if self.destdir is None:
return self.package_install_space(package.name)
else:
return os.path.join(
self.destdir,
self.package_install_space(package.name).lstrip(os.sep)
)
else:
return self.package_devel_space(package.name)
def package_final_path(self, package):
if self.install:
return self.package_install_space(package.name)
else:
return self.package_devel_space(package.name)
| 39.594816
| 113
| 0.625228
|
7660cca1e5bfbeb8c30bdea336a8fb6239b595c0
| 1,653
|
py
|
Python
|
locations/views.py
|
PatelKeviin/Locations-API-using-Django-REST-Framework
|
80c527722d875b859b5cb127967a58381594e058
|
[
"MIT"
] | null | null | null |
locations/views.py
|
PatelKeviin/Locations-API-using-Django-REST-Framework
|
80c527722d875b859b5cb127967a58381594e058
|
[
"MIT"
] | null | null | null |
locations/views.py
|
PatelKeviin/Locations-API-using-Django-REST-Framework
|
80c527722d875b859b5cb127967a58381594e058
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from django.db import connection
from .serializers import LocationSerializer
from rest_framework.response import Response
from rest_framework.status import (HTTP_200_OK)
def dictfetch(cursor, location, fetchall=False):
"""
Returns all rows from a cursor as a dictionary.
"""
desc = cursor.description
if fetchall:
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
]
fetched = cursor.fetchone()
if fetched is not None:
return dict(zip([col[0] for col in desc], fetched))
else:
return dict(zip([col[0] for col in desc], (location, -1, -1)))
def get_location_data(locations):
data = []
with connection.cursor() as cursor:
if len(locations) == 0:
cursor.execute("SELECT loc, lat, lon FROM 'locations_location'")
data = dictfetch(cursor, None, fetchall=True)
for loc_ in locations:
cursor.execute("SELECT loc, lat, lon FROM 'locations_location' WHERE loc = %s", [loc_])
data.append(dictfetch(cursor, loc_))
return data
# Create your views here.
class LocationViewSet(viewsets.ViewSet):
"""
A model-less API.
"""
def list(self, request):
locations = request.GET.getlist('location')
location_data = get_location_data(locations)
my_serializer = LocationSerializer(data=location_data, many=True)
if my_serializer.is_valid():
return Response(my_serializer.data, status=HTTP_200_OK)
else:
return Response({'error': 'something went wrong :('})
| 29.517857
| 99
| 0.645493
|
7244f77ac018e6e3e90a47af8a5c36138c59bb70
| 352
|
py
|
Python
|
iOS/others/otherjsonpractices/readJSON.py
|
leochoo/ios-sfcbustimer
|
e7e49a3c7a8270583ab2907ad1d8f9826341ef5b
|
[
"MIT"
] | 4
|
2019-03-25T08:26:45.000Z
|
2019-04-27T02:56:37.000Z
|
iOS/others/otherjsonpractices/readJSON.py
|
leochoo/ios-sfcbustimer
|
e7e49a3c7a8270583ab2907ad1d8f9826341ef5b
|
[
"MIT"
] | 38
|
2018-10-12T06:13:46.000Z
|
2019-04-06T16:44:33.000Z
|
iOS/others/otherjsonpractices/readJSON.py
|
leochoo/SFC-Bustimer
|
e7e49a3c7a8270583ab2907ad1d8f9826341ef5b
|
[
"MIT"
] | 1
|
2019-04-27T03:09:33.000Z
|
2019-04-27T03:09:33.000Z
|
import json
import requests
#loading json
with open("weekdaytest.json","r") as file:
timetable = json.load(file)
# print(timetable["hour"])
# print(timetable["hour"][0]["7"])
print(timetable["hour"][0]["7"][0])
print(timetable["hour"][0]["7"][1])
# print(timetable["hour"][0]["7"][0]["minute"])
# print(timetable["hour"][0]["7"][1]["minute"])
| 20.705882
| 47
| 0.625
|
dc75d3258ad62f45e7daa42f8847482b185ba052
| 265
|
py
|
Python
|
Lesson_7/pickle_example.py
|
mirdinemris/Python_lesson_2
|
bf2fce1dbd6ae635d6aa631703b9930b164972b0
|
[
"MIT"
] | null | null | null |
Lesson_7/pickle_example.py
|
mirdinemris/Python_lesson_2
|
bf2fce1dbd6ae635d6aa631703b9930b164972b0
|
[
"MIT"
] | 1
|
2020-04-14T14:13:57.000Z
|
2020-04-14T14:13:57.000Z
|
Lesson_7/pickle_example.py
|
mirdinemris/Python_lesson_2
|
bf2fce1dbd6ae635d6aa631703b9930b164972b0
|
[
"MIT"
] | null | null | null |
import pickle
data = {'1': (1,2,3), '2':['a','b','c'], '3': {0,1,2,0}}
print(data)
# Сериализация
with open('data.pickle', 'wb') as f:
pickle.dump(data, f)
# Дечериализация
with open('data.pickle', 'rb') as f:
data_load = pickle.load(f)
print(data_load)
| 18.928571
| 56
| 0.6
|
d4e46320107b9efea9f7c7475ea17920dcfd99cf
| 802
|
py
|
Python
|
var/spack/repos/builtin/packages/xvinfo/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/xvinfo/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8
|
2021-11-09T20:28:40.000Z
|
2022-03-15T03:26:33.000Z
|
var/spack/repos/builtin/packages/xvinfo/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class Xvinfo(AutotoolsPackage, XorgPackage):
"""xvinfo prints out the capabilities of any video adaptors associated
with the display that are accessible through the X-Video extension."""
homepage = "https://cgit.freedesktop.org/xorg/app/xvinfo"
xorg_mirror_path = "app/xvinfo-1.1.3.tar.gz"
version('1.1.3', sha256='1c1c2f97abfe114389e94399cc7bf3dfd802ed30ad41ba23921d005bd8a6c39f')
depends_on('libxv')
depends_on('libx11')
depends_on('xproto@7.0.25:')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| 33.416667
| 95
| 0.739401
|
9002f5b5db71aac3882a863e4977d5eafe252096
| 2,158
|
py
|
Python
|
utils/pagination.py
|
yuyuyuhaoshi/Blog-BE
|
a485d5159076d619d4fd6019fe9b96ac04020d4d
|
[
"Apache-2.0"
] | null | null | null |
utils/pagination.py
|
yuyuyuhaoshi/Blog-BE
|
a485d5159076d619d4fd6019fe9b96ac04020d4d
|
[
"Apache-2.0"
] | null | null | null |
utils/pagination.py
|
yuyuyuhaoshi/Blog-BE
|
a485d5159076d619d4fd6019fe9b96ac04020d4d
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
from rest_framework.exceptions import NotFound
from django.core.paginator import InvalidPage
from django.utils import six
class CustomPageNumberPagination(PageNumberPagination):
page_size = 20
page_size_query_param = 'page_size'
page_query_param = 'page'
max_page_size = 100
def paginate_queryset(self, queryset, request, view=None):
"""
Update self.page_size, if page_size_query_param appears in the request url
Paginate a queryset if required, either returning a
page object, or `None` if pagination is not configured for this view.
"""
page_size = self.get_page_size(request)
# this is the only difference from the PageNumberPagination's paginate_queryset function
if not page_size:
return None
else:
self.page_size = page_size
paginator = self.django_paginator_class(queryset, page_size)
page_number = request.query_params.get(self.page_query_param, 1)
if page_number in self.last_page_strings:
page_number = paginator.num_pages
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(exc)
)
raise NotFound(msg)
if paginator.num_pages > 1 and self.template is not None:
# The browsable API should display pagination controls.
self.display_page_controls = True
self.request = request
return list(self.page)
def get_paginated_response(self, data):
return Response(OrderedDict([
('page_size', self.page_size),
('current_page', self.page.number),
('last_page', self.page.paginator.num_pages),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('count', self.page.paginator.count),
('data', data)
]))
| 36.576271
| 96
| 0.662651
|
31a650ffaac512f1378d1716dbbba9d4cad96357
| 1,014
|
py
|
Python
|
twitter/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
seankim84/twitter
|
71cbcd821effc4e77588b195d770ef003887d322
|
[
"MIT"
] | null | null | null |
twitter/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
seankim84/twitter
|
71cbcd821effc4e77588b195d770ef003887d322
|
[
"MIT"
] | null | null | null |
twitter/contrib/sites/migrations/0003_set_site_domain_and_name.py
|
seankim84/twitter
|
71cbcd821effc4e77588b195d770ef003887d322
|
[
"MIT"
] | null | null | null |
"""
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
"domain": "sean.kim84@gmail.com",
"name": "twitter",
},
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model("sites", "Site")
Site.objects.update_or_create(
id=settings.SITE_ID, defaults={"domain": "example.com", "name": "example.com"}
)
class Migration(migrations.Migration):
dependencies = [("sites", "0002_alter_domain_unique")]
operations = [migrations.RunPython(update_site_forward, update_site_backward)]
| 28.971429
| 129
| 0.686391
|
3d2277708de50aa88581b2972cc68dc300e2f6c2
| 999
|
py
|
Python
|
utim-esp32/modules/inisetup.py
|
connax-utim/utim-micropython
|
23c30f134af701a44a8736b09c8c201e13760d18
|
[
"Apache-2.0"
] | null | null | null |
utim-esp32/modules/inisetup.py
|
connax-utim/utim-micropython
|
23c30f134af701a44a8736b09c8c201e13760d18
|
[
"Apache-2.0"
] | null | null | null |
utim-esp32/modules/inisetup.py
|
connax-utim/utim-micropython
|
23c30f134af701a44a8736b09c8c201e13760d18
|
[
"Apache-2.0"
] | null | null | null |
import uos
from flashbdev import bdev
def check_bootsec():
buf = bytearray(bdev.SEC_SIZE)
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xff:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print("""\
FAT filesystem appears to be corrupted. If you had important data there, you
may want to make a flash snapshot to try to recover it. Otherwise, perform
factory reprogramming of MicroPython firmware (completely erase flash, followed
by firmware programming).
""")
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev)
uos.mount(vfs, '/flash')
uos.chdir('/flash')
with open("boot.py", "w") as f:
f.write("""\
import network
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
sta_if.connect('connax', '1berezka')
""")
return vfs
| 22.2
| 79
| 0.645646
|
3a501b92d20183c9d3c14415e1b3268369a795fc
| 19,616
|
py
|
Python
|
libspn/tests/perf_dense_generator_path.py
|
pronobis/libspn
|
b98141ea5a609a02706433220758e58f46bd3f5e
|
[
"MIT"
] | 22
|
2019-03-01T15:58:20.000Z
|
2022-02-18T10:32:04.000Z
|
libspn/tests/perf_dense_generator_path.py
|
pronobis/libspn
|
b98141ea5a609a02706433220758e58f46bd3f5e
|
[
"MIT"
] | 10
|
2019-03-03T18:15:24.000Z
|
2021-05-04T09:02:55.000Z
|
libspn/tests/perf_dense_generator_path.py
|
pronobis/libspn
|
b98141ea5a609a02706433220758e58f46bd3f5e
|
[
"MIT"
] | 8
|
2019-03-22T20:45:20.000Z
|
2021-05-03T13:22:09.000Z
|
#!/usr/bin/env python3
import tensorflow as tf
import numpy as np
from context import libspn as spn
import time
import argparse
import colorama as col
import sys
from tensorflow.python.client import timeline
import os
col.init()
red = col.Fore.RED
blue = col.Fore.BLUE
green = col.Fore.GREEN
yellow = col.Fore.YELLOW
magenta = col.Fore.MAGENTA
def print1(str, file, color=yellow):
if file:
print(str, file=file)
print(color + str + col.Style.RESET_ALL)
def print2(str, file):
if file:
print(str, file=file)
print(blue + str + col.Style.RESET_ALL)
class Ops:
def dense_sing(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.SINGLE
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with single-op nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate path ops based on inf_type and log
if log:
mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
else:
mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=False)
mpe_path_gen.get_mpe_path(root)
path_ops = [mpe_path_gen.counts[inp] for inp in
(inputs if isinstance(inputs, list) else [inputs])]
return root, spn.initialize_weights(root), path_ops
def dense_block(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.BLOCK
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with single-op nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate path ops based on inf_type and log
if log:
mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
else:
mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=False)
mpe_path_gen.get_mpe_path(root)
path_ops = [mpe_path_gen.counts[inp] for inp in
(inputs if isinstance(inputs, list) else [inputs])]
return root, spn.initialize_weights(root), path_ops
def dense_layer(inputs, num_decomps, num_subsets, num_mixtures,
num_input_mixtures, balanced, input_dist, inf_type, log=False):
# Set node-type as single-node
node_type = spn.DenseSPNGenerator.NodeType.LAYER
# Create a dense generator
gen = spn.DenseSPNGenerator(num_decomps=num_decomps,
num_subsets=num_subsets,
num_mixtures=num_mixtures,
num_input_mixtures=num_input_mixtures,
balanced=balanced,
node_type=node_type,
input_dist=(spn.DenseSPNGenerator.
InputDist.RAW if input_dist is
"RAW" else spn.
DenseSPNGenerator.
InputDist.MIXTURE))
# Generate a dense SPN, with single-op nodes, and all weights in the network
root = gen.generate(inputs, root_name="root")
spn.generate_weights(root, tf.initializers.random_uniform(0.0, 1.0))
# Generate path ops based on inf_type and log
if log:
mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=True)
else:
mpe_path_gen = spn.MPEPath(value_inference_type=inf_type, log=False)
mpe_path_gen.get_mpe_path(root)
path_ops = [mpe_path_gen.counts[inp] for inp in
(inputs if isinstance(inputs, list) else [inputs])]
return root, spn.initialize_weights(root), path_ops
class OpTestResult:
"""Result of a single test of a single op."""
def __init__(self, op_name, on_gpu, spn_size, tf_size, input_dist, setup_time,
weights_init_time, run_times, output_correct):
self.op_name = op_name
self.on_gpu = on_gpu
self.spn_size = spn_size
self.tf_size = tf_size
self.input_dist = input_dist
self.setup_time = setup_time
self.weights_init_time = weights_init_time
self.run_times = run_times
self.output_correct = output_correct
class TestResults:
"""Results for a single test for multiple ops and devices."""
def __init__(self, test_name, cpu_results, gpu_results):
self.test_name = test_name
self.cpu_results = cpu_results
self.gpu_results = gpu_results
def print(self, file):
def get_header(dev):
return ("%4s %11s %9s %8s %11s %11s %17s %15s %14s %10s" %
(dev, 'op', 'SPN_size', 'TF_size', 'input_dist', 'setup_time',
'weights_init_time', 'first_run_time', 'rest_run_time',
'correct'))
def get_res(res):
"""Helper function printing a single result."""
return ("%16s %7d %7d% 11s %11.2f %15.2f %15.2f %14.2f %12s" %
(res.op_name, res.spn_size, res.tf_size, res.input_dist,
res.setup_time * 1000, res.weights_init_time * 1000,
res.run_times[0] * 1000, np.mean(res.run_times[1:]) * 1000,
res.output_correct))
# Print results
print1("\n-----------------------", file)
print1("%s" % self.test_name, file)
print1("-----------------------", file)
print1(get_header("CPU"), file)
for res in sorted(self.cpu_results, key=lambda x: len(x.op_name)):
print1(get_res(res), file, (red if res.input_dist is "RAW" else green))
print1(get_header("GPU"), file)
for res in sorted(self.gpu_results, key=lambda x: len(x.op_name)):
print1(get_res(res), file, (red if res.input_dist is "RAW" else green))
class PerformanceTest:
def __init__(self, num_input_rows, num_input_vars, num_input_vals, num_decomps,
num_subsets, num_mixtures, num_input_mixtures, balanced, num_runs,
without_cpu, without_gpu, log_devs, profile, profiles_dir, file):
self.num_input_rows = num_input_rows
self.num_input_vars = num_input_vars
self.num_input_vals = num_input_vals
self.num_decomps = num_decomps
self.num_subsets = num_subsets
self.num_mixtures = num_mixtures
self.num_input_mixtures = num_input_mixtures
self.balanced = balanced
self.num_runs = num_runs
self.without_cpu = without_cpu
self.without_gpu = without_gpu
self.log_devs = log_devs
self.profile = profile
self.profiles_dir = profiles_dir
self.file = file
self.test_failed = False
print1("Params:", file)
print1("- num_input_rows=%s" % num_input_rows, file)
print1("- num_input_vars=%s" % num_input_vars, file)
print1("- num_input_vals=%s" % num_input_vals, file)
print1("- num_decomps=%s" % num_decomps, file)
print1("- num_subsets=%s" % num_subsets, file)
print1("- num_mixtures=%s" % num_mixtures, file)
print1("- num_input_mixtures=%s" % num_input_mixtures, file)
print1("- balanced=%s" % balanced, file)
print1("- num_runs=%s" % num_runs, file)
print1("", file=file)
def _true_output(self):
true_out = np.zeros((1, self.num_input_rows,
self.num_input_vars*self.num_input_vals))
true_out[:, :, list(range(0, self.num_input_vars*self.num_input_vals,
self.num_input_vals))] = 1
return true_out
def _run_op_test(self, op_fun, inputs, input_dist='MIXTURE',
inf_type=spn.InferenceType.MARGINAL, log=False, on_gpu=True):
"""Run a single test for a single op."""
# Preparations
op_name = op_fun.__name__
device_name = '/gpu:0' if on_gpu else '/cpu:0'
# Print
print2("--> %s: on_gpu=%s, inputs_shape=%s, input_dist=%s, inference=%s, \
node_type=%s, log=%s"
% (op_name, on_gpu, inputs.shape, input_dist, ("MPE" if inf_type ==
spn.InferenceType.MPE else "MARGINAL"),
("SINGLE" if op_name == "dense_sing" else "BLOCK" if
op_name == "dense_block" else "LAYER"), log), self.file)
# Compute true output
true_out = self._true_output()
# Create graph
tf.reset_default_graph()
with tf.device(device_name):
# Create input
inputs_pl = spn.IndicatorLeaf(num_vars=self.num_input_vars,
num_vals=self.num_input_vals)
# Create dense SPN
start_time = time.time()
root, init_ops, ops = op_fun(inputs_pl, self.num_decomps, self.num_subsets,
self.num_mixtures, self.num_input_mixtures,
self.balanced, input_dist, inf_type, log)
setup_time = time.time() - start_time
# Get num of SPN ops
spn_size = root.get_num_nodes()
# Get num of graph ops
tf_size = len(tf.get_default_graph().get_operations())
# Run op multiple times
output_correct = True
with tf.Session(config=tf.ConfigProto(
allow_soft_placement=False,
log_device_placement=self.log_devs)) as sess:
# Initialize weights of all the sum node types in the graph
start_time = time.time()
init_ops.run()
weights_init_time = time.time() - start_time
run_times = []
# Create feed dictionary
feed = {inputs_pl: inputs}
for n in range(self.num_runs):
# Run
start_time = time.time()
out = sess.run(ops, feed_dict=feed)
run_times.append(time.time() - start_time)
# Test value only for MARGINAL inference
try:
np.testing.assert_almost_equal(out, true_out)
except AssertionError:
output_correct = False
self.test_failed = True
if self.profile:
# Add additional options to trace the session execution
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
out = sess.run(ops, feed_dict=feed, options=options,
run_metadata=run_metadata)
# Create the Timeline object, and write it to a json file
fetched_timeline = timeline.Timeline(run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format()
if not os.path.exists(self.profiles_dir):
os.makedirs(self.profiles_dir)
file_name = op_name
file_name += ("_GPU_" if on_gpu else "_CPU_")
file_name += input_dist
file_name += ("_ SINGLE" if op_name == "dense_sing" else
"_BLOCK" if op_name == "dense_block" else "_LAYER")
file_name += ("_MPE-LOG" if log else "_MPE") if inf_type == \
spn.InferenceType.MPE else ("_MARGINAL-LOG" if log else
"_MARGINAL")
with open('%s/timeline_path_%s.json' % (self.profiles_dir,
file_name), 'w') as f:
f.write(chrome_trace)
# Return stats
return OpTestResult(op_name, on_gpu, spn_size, tf_size, input_dist, setup_time,
weights_init_time, run_times, output_correct)
def _run_test(self, test_name, op_funs, inputs, inf_type, log):
"""Run a single test for multiple ops and devices."""
cpu_results = []
gpu_results = []
for op_fun in op_funs:
if not self.without_cpu:
cpu_results.append( # Input Dist = RAW
self._run_op_test(op_fun, inputs, input_dist="RAW",
inf_type=inf_type, log=log, on_gpu=False))
cpu_results.append( # Input Dist = MIXTURE
self._run_op_test(op_fun, inputs, input_dist="MIXTURE",
inf_type=inf_type, log=log, on_gpu=False))
if not self.without_gpu:
gpu_results.append( # Input Dist = RAW
self._run_op_test(op_fun, inputs, input_dist="RAW",
inf_type=inf_type, log=log, on_gpu=True))
gpu_results.append( # Input Dist = MIXTURE
self._run_op_test(op_fun, inputs, input_dist="MIXTURE",
inf_type=inf_type, log=log, on_gpu=True))
return TestResults(test_name, cpu_results, gpu_results)
def run(self):
"""Run all tests."""
print1("Running tests:", self.file)
results = []
inputs = np.ones((self.num_input_rows, self.num_input_vars), dtype=np.int) * 0
r = self._run_test('InferenceType: MARGINAL',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MARGINAL, log=False)
results.append(r)
r = self._run_test('InferenceType: MARGINAL-LOG',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MARGINAL, log=True)
results.append(r)
r = self._run_test('InferenceType: MPE',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MPE, log=False)
results.append(r)
r = self._run_test('InferenceType: MPE-LOG',
[Ops.dense_sing, Ops.dense_block, Ops.dense_layer],
inputs, inf_type=spn.InferenceType.MPE, log=True)
results.append(r)
# Print results
for res in results:
res.print(self.file)
if self.test_failed:
print("\n ATLEAST ONE TEST FAILED!")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num-input-rows', default=200, type=int,
help="Num of rows of inputs")
parser.add_argument('--num-input-vars', default=5, type=int,
help="Num of input variables")
parser.add_argument('--num-input-vals', default=5, type=int,
help="Num of input values per variable")
parser.add_argument('--num-decomps', default=1, type=int,
help="Num of decompositions at each level")
parser.add_argument('--num-subsets', default=5, type=int,
help="Num of subsets in each desomposition")
parser.add_argument('--num-mixtures', default=5, type=int,
help="Num of mixtures for each subset")
parser.add_argument('--num-input-mixtures', default=5, type=int,
help="Num of input mixtures")
parser.add_argument('--balanced', default=True, action='store_true',
help="Generated dense SPN is balanced between decompositions")
parser.add_argument('--num-runs', default=50, type=int,
help="Num of times each test is run")
parser.add_argument('--log-devices', action='store_true',
help="Log on which device op is run. Affects run time!")
parser.add_argument('--without-cpu', action='store_true',
help="Do not run CPU tests")
parser.add_argument('--without-gpu', action='store_true',
help="Do not run GPU tests")
parser.add_argument('--profile', default=False, action='store_true',
help="Run test one more time and profile")
parser.add_argument('--profiles-dir', default='profiles', type=str,
help="Run test one more time and profile")
parser.add_argument('--save-to', default='', type=str,
help="Save results to file")
args = parser.parse_args()
# To ensure that SPN graph size between 'MIXTURE' and 'RAW' networks are consistant
if args.num_input_mixtures is not None:
if args.num_input_mixtures != args.num_input_vals:
sys.exit('ERROR: num_input_mixtures must be == num_input_vals')
else:
if args.num_mixtures != args.num_input_vals:
sys.exit('ERROR: num_mixtures must be == num_input_vals')
# Open a file
f = None
if args.save_to:
f = open(args.save_to, 'w')
try:
t = PerformanceTest(args.num_input_rows, args.num_input_vars,
args.num_input_vals, args.num_decomps, args.num_subsets,
args.num_mixtures, args.num_input_mixtures, args.balanced,
args.num_runs, args.without_cpu, args.without_gpu,
args.log_devices, args.profile, args.profiles_dir, f)
t.run()
finally:
if f is not None:
f.close()
if __name__ == '__main__':
main()
| 44.480726
| 88
| 0.555771
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.