code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
config_path = attributes.get('config_path')
tokens = {}
def build_config_key(value_def, config_key):
key = value_def.config_key or config_key
return '%s.%s' % (config_path, key) if config_path else key
def build_token(name, value_def):
config_key = build_config_key(value_def, name)
value_token = ValueToken.from_definition(
value_def, namespace, config_key)
getters.register_value_proxy(namespace, value_token, value_def.help)
tokens[name] = value_token
return name, build_property(value_token)
def build_attr(name, attribute):
if not isinstance(attribute, ValueTypeDefinition):
return name, attribute
return build_token(name, attribute)
attributes = dict(build_attr(*item)
for item in six.iteritems(attributes))
attributes['_tokens'] = tokens
return attributes
|
def build_attributes(cls, attributes, namespace)
|
Return an attributes dictionary with ValueTokens replaced by a
property which returns the config value.
| 3.724664
| 3.478972
| 1.070622
|
def cache_wrapper(func):
@functools.wraps(func)
def inner_wrapper(self, *args, **kwargs):
value = getattr(self, cache_name, UndefToken)
if value != UndefToken:
return value
ret = func(self, *args, **kwargs)
setattr(self, cache_name, ret)
return ret
return inner_wrapper
return cache_wrapper
|
def cache_as_field(cache_name)
|
Cache a functions return value as the field 'cache_name'.
| 2.256817
| 2.271019
| 0.993746
|
value = proxy.namespace.get(proxy.config_key, proxy.default)
if value is UndefToken:
raise errors.ConfigurationError("%s is missing value for: %s" %
(proxy.namespace, proxy.config_key))
try:
return proxy.validator(value)
except errors.ValidationError as e:
raise errors.ConfigurationError("%s failed to validate %s: %s" %
(proxy.namespace, proxy.config_key, e))
|
def extract_value(proxy)
|
Given a value proxy type, Retrieve a value from a namespace, raising
exception if no value is found, or the value does not validate.
| 3.730338
| 3.516491
| 1.060812
|
def reader(config_key, default=UndefToken, namespace=None):
config_namespace = config.get_namespace(namespace or reader_namespace)
return validator(_read_config(config_key, config_namespace, default))
return reader
|
def build_reader(validator, reader_namespace=config.DEFAULT)
|
A factory method for creating a custom config reader from a validation
function.
:param validator: a validation function which acceptance one argument (the
configuration value), and returns that value casted to
the appropriate type.
:param reader_namespace: the default namespace to use. Defaults to
`DEFAULT`.
| 5.592813
| 6.53708
| 0.855552
|
names = configuration_namespaces.keys() if all_names else [name]
for name in names:
yield get_namespace(name)
|
def get_namespaces_from_names(name, all_names)
|
Return a generator which yields namespace objects.
| 6.941687
| 5.105514
| 1.359645
|
if name not in configuration_namespaces:
configuration_namespaces[name] = ConfigNamespace(name)
return configuration_namespaces[name]
|
def get_namespace(name)
|
Return a :class:`ConfigNamespace` by name, creating the
namespace if it does not exist.
| 4.074572
| 3.295712
| 1.236325
|
for namespace in get_namespaces_from_names(name, all_names):
for value_proxy in namespace.get_value_proxies():
value_proxy.reset()
|
def reload(name=DEFAULT, all_names=False)
|
Reload one or all :class:`ConfigNamespace`. Reload clears the cache of
:mod:`staticconf.schema` and :mod:`staticconf.getters`, allowing them to
pickup the latest values in the namespace.
Defaults to reloading just the DEFAULT namespace.
:param name: the name of the :class:`ConfigNamespace` to reload
:param all_names: If True, reload all namespaces, and ignore `name`
| 5.695298
| 10.150599
| 0.56108
|
for namespace in get_namespaces_from_names(name, all_names):
all(value_proxy.get_value() for value_proxy in namespace.get_value_proxies())
|
def validate(name=DEFAULT, all_names=False)
|
Validate all registered keys after loading configuration.
Missing values or values which do not pass validation raise
:class:`staticconf.errors.ConfigurationError`. By default only validates
the `DEFAULT` namespace.
:param name: the namespace to validate
:type name: string
:param all_names: if True validates all namespaces and ignores `name`
:type all_names: boolean
| 6.653617
| 7.73291
| 0.860429
|
duplicate_keys = set(base_conf) & set(config_data)
if not duplicate_keys:
return
msg = "Duplicate keys in config: %s" % duplicate_keys
if raise_error:
raise errors.ConfigurationError(msg)
log.info(msg)
return True
|
def has_duplicate_keys(config_data, base_conf, raise_error)
|
Compare two dictionaries for duplicate keys. if raise_error is True
then raise on exception, otherwise log return True.
| 2.90595
| 2.637082
| 1.101957
|
def compare_func(filename):
try:
return os.path.getmtime(filename)
except OSError:
if err_logger is not None:
err_logger(filename)
return -1
return compare_func
|
def build_compare_func(err_logger=None)
|
Returns a compare_func that can be passed to MTimeComparator.
The returned compare_func first tries os.path.getmtime(filename),
then calls err_logger(filename) if that fails. If err_logger is None,
then it does nothing. err_logger is always called within the context of
an OSError raised by os.path.getmtime(filename). Information on this
error can be retrieved by calling sys.exc_info inside of err_logger.
| 2.713017
| 2.133706
| 1.271505
|
config_dict = {}
for dotted_key, value in self.get_config_values().items():
subkeys = dotted_key.split('.')
d = config_dict
for key in subkeys:
d = d.setdefault(key, value if key == subkeys[-1] else {})
return config_dict
|
def get_config_dict(self)
|
Reconstruct the nested structure of this object's configuration
and return it as a dict.
| 3.227608
| 3.009708
| 1.072399
|
def format_desc(desc):
return "%s (Type: %s, Default: %s)\n%s" % (
desc.name,
desc.validator.__name__.replace('validate_', ''),
desc.default,
desc.help or '')
def format_namespace(key, desc_list):
return "\nNamespace: %s\n%s" % (
key,
'\n'.join(sorted(format_desc(desc) for desc in desc_list)))
def namespace_cmp(item):
name, _ = item
return chr(0) if name == DEFAULT else name
return '\n'.join(format_namespace(*desc) for desc in
sorted(six.iteritems(self.descriptions),
key=namespace_cmp))
|
def view_help(self)
|
Return a help message describing all the statically configured keys.
| 4.1055
| 3.907318
| 1.050721
|
if (force or self.should_check) and self.file_modified():
return self.reload()
|
def reload_if_changed(self, force=False)
|
If the file(s) being watched by this object have changed,
their configuration will be loaded again using `config_loader`.
Otherwise this is a noop.
:param force: If True ignore the `min_interval` and proceed to
file modified comparisons. To force a reload use
:func:`reload` directly.
| 10.047168
| 10.602758
| 0.947599
|
watcher = ConfigurationWatcher(
build_loader_callable(loader_func, filename, namespace=namespace),
filename,
min_interval=min_interval,
reloader=ReloadCallbackChain(namespace=namespace),
comparators=comparators,
)
watcher.load_config()
return cls(watcher)
|
def load(
cls,
filename,
namespace,
loader_func,
min_interval=0,
comparators=None,
)
|
Create a new :class:`ConfigurationWatcher` and load the initial
configuration by calling `loader_func`.
:param filename: a filename or list of filenames to monitor for changes
:param namespace: the name of a namespace to use when loading
configuration. All config data from `filename` will
end up in a :class:`ConfigNamespace` with this name
:param loader_func: a function which accepts two arguments and uses
loader functions from :mod:`staticconf.loader` to
load configuration data into a namespace. The
arguments are `filename` and `namespace`
:param min_interval: minimum number of seconds to wait between calls to
:func:`os.path.getmtime` to check if a file has
been modified.
:param comparators: a list of classes which support the
:class:`IComparator` interface which are used to determine if a config
file has been modified. See ConfigurationWatcher::__init__.
:returns: a :class:`ConfigFacade`
| 6.22314
| 7.221387
| 0.861765
|
if isinstance(value, six.string_types):
msg = "Invalid iterable of type(%s): %s"
raise ValidationError(msg % (type(value), value))
try:
return iterable_type(value)
except TypeError:
raise ValidationError("Invalid iterable: %s" % (value))
|
def _validate_iterable(iterable_type, value)
|
Convert the iterable to iterable_type, or raise a Configuration
exception.
| 3.323351
| 3.270536
| 1.016149
|
def validate_list_of_type(value):
return [item_validator(item) for item in validate_list(value)]
return validate_list_of_type
|
def build_list_type_validator(item_validator)
|
Return a function which validates that the value is a list of items
which are validated using item_validator.
| 3.400809
| 3.061677
| 1.110767
|
def validate_mapping(value):
return dict(item_validator(item) for item in validate_list(value))
return validate_mapping
|
def build_map_type_validator(item_validator)
|
Return a function which validates that the value is a mapping of
items. The function should return pairs of items that will be
passed to the `dict` constructor.
| 5.218432
| 4.235464
| 1.23208
|
namespace.register_proxy(value_proxy)
config.config_help.add(
value_proxy.config_key, value_proxy.validator, value_proxy.default,
namespace.get_name(), help_text)
|
def register_value_proxy(namespace, value_proxy, help_text)
|
Register a value proxy with the namespace, and add the help_text.
| 5.6459
| 5.176255
| 1.090731
|
def proxy_register(key_name, default=UndefToken, help=None, namespace=None):
name = namespace or getter_namespace or config.DEFAULT
namespace = config.get_namespace(name)
return proxy_factory.build(validator, namespace, key_name, default, help)
return proxy_register
|
def build_getter(validator, getter_namespace=None)
|
Create a getter function for retrieving values from the config cache.
Getters will default to the DEFAULT namespace.
| 7.832561
| 7.403394
| 1.057969
|
proxy_attrs = validator, namespace, config_key, default
proxy_key = repr(proxy_attrs)
if proxy_key in self.proxies:
return self.proxies[proxy_key]
value_proxy = proxy.ValueProxy(*proxy_attrs)
register_value_proxy(namespace, value_proxy, help)
return self.proxies.setdefault(proxy_key, value_proxy)
|
def build(self, validator, namespace, config_key, default, help)
|
Build or retrieve a ValueProxy from the attributes. Proxies are
keyed using a repr because default values can be mutable types.
| 3.783701
| 2.861487
| 1.322285
|
A = 0.01 * niter
if bounds is not None:
bounds = np.asarray(bounds)
project = lambda x: np.clip(x, bounds[:, 0], bounds[:, 1])
if args is not None:
# freeze function arguments
def funcf(x, **kwargs):
return func(x, *args, **kwargs)
N = len(x0)
x = x0
for k in range(niter):
ak = a/(k+1.0+A)**alpha
ck = c/(k+1.0)**gamma
Deltak = np.random.choice([-1, 1], size=N)
fkwargs = dict()
if paired:
fkwargs['seed'] = np.random.randint(0, np.iinfo(np.uint32).max)
if bounds is None:
grad = (funcf(x + ck*Deltak, **fkwargs) - funcf(x - ck*Deltak, **fkwargs)) / (2*ck*Deltak)
x -= ak*grad
else:
# ensure evaluation points are feasible
xplus = project(x + ck*Deltak)
xminus = project(x - ck*Deltak)
grad = (funcf(xplus, **fkwargs) - funcf(xminus, **fkwargs)) / (xplus-xminus)
x = project(x - ak*grad)
# print 100 status updates if disp=True
if disp and (k % (niter//100)) == 0:
print(x)
if callback is not None:
callback(x)
message = 'terminated after reaching max number of iterations'
return OptimizeResult(fun=funcf(x), x=x, nit=niter, nfev=2*niter, message=message, success=True)
|
def minimizeSPSA(func, x0, args=(), bounds=None, niter=100, paired=True,
a=1.0, alpha=0.602, c=1.0, gamma=0.101,
disp=False, callback=None)
|
Minimization of an objective function by a simultaneous perturbation
stochastic approximation algorithm.
This algorithm approximates the gradient of the function by finite differences
along stochastic directions Deltak. The elements of Deltak are drawn from
+- 1 with probability one half. The gradient is approximated from the
symmetric difference f(xk + ck*Deltak) - f(xk - ck*Deltak), where the evaluation
step size ck is scaled according ck = c/(k+1)**gamma.
The algorithm takes a step of size ak = a/(0.01*niter+k+1)**alpha along the
negative gradient.
See Spall, IEEE, 1998, 34, 817-823 for guidelines about how to choose the algorithm's
parameters (a, alpha, c, gamma).
Parameters
----------
func: callable
objective function to be minimized:
called as `func(x, *args)`,
if `paired=True`, then called with keyword argument `seed` additionally
x0: array-like
initial guess for parameters
args: tuple
extra arguments to be supplied to func
bounds: array-like
bounds on the variables
niter: int
number of iterations after which to terminate the algorithm
paired: boolean
calculate gradient for same random seeds
a: float
scaling parameter for step size
alpha: float
scaling exponent for step size
c: float
scaling parameter for evaluation step size
gamma: float
scaling exponent for evaluation step size
disp: boolean
whether to output status updates during the optimization
callback: callable
called after each iteration, as callback(xk), where xk are the current parameters
Returns
-------
`scipy.optimize.OptimizeResult` object
| 2.940775
| 2.731941
| 1.076442
|
search = True
# check whether function is ascending or not
if ascending is None:
if errorcontrol:
testkwargs.update(dict(type_='smaller', force=True))
fa = func.test0(a, **testkwargs)
fb = func.test0(b, **testkwargs)
else:
fa = func(a) < 0
fb = func(b) < 0
if fa and not fb:
ascending = True
elif fb and not fa:
ascending = False
else:
if disp:
print('Warning: func(a) and func(b) do not have opposing signs -> no search done')
if outside == 'raise':
raise BisectException()
search = False
# refine interval until it has reached size xtol, except if root outside
while (b-a > xtol) and search:
mid = (a+b)/2.0
if ascending:
if ((not errorcontrol) and (func(mid) < 0)) or \
(errorcontrol and func.test0(mid, **testkwargs)):
a = mid
else:
b = mid
else:
if ((not errorcontrol) and (func(mid) < 0)) or \
(errorcontrol and func.test0(mid, **testkwargs)):
b = mid
else:
a = mid
if disp:
print('bisect bounds', a, b)
# interpolate linearly to get zero
if errorcontrol:
ya, yb = func(a)[0], func(b)[0]
else:
ya, yb = func(a), func(b)
m = (yb-ya) / (b-a)
res = a-ya/m
if disp:
print('bisect final value', res)
return res
|
def bisect(func, a, b, xtol=1e-6, errorcontrol=True,
testkwargs=dict(), outside='extrapolate',
ascending=None,
disp=False)
|
Find root by bysection search.
If the function evaluation is noisy then use `errorcontrol=True` for adaptive
sampling of the function during the bisection search.
Parameters
----------
func: callable
Function of which the root should be found. If `errorcontrol=True`
then the function should be derived from `AverageBase`.
a, b: float
initial interval
xtol: float
target tolerance for interval size
errorcontrol: boolean
if true, assume that function is derived from `AverageBase`.
testkwargs: only for `errorcontrol=True`
see `AverageBase.test0`
outside: ['extrapolate', 'raise']
How to handle the case where f(a) and f(b) have same sign,
i.e. where the root lies outside of the interval.
If 'raise' throws a `BisectException`.
ascending: allow passing in directly whether function is ascending or not
if ascending=True then it is assumed without check that f(a) < 0 and f(b) > 0
if ascending=False then it is assumed without check that f(a) > 0 and f(b) < 0
Returns
-------
float, root of function
| 3.27232
| 3.079255
| 1.062699
|
f1, f1se = self(x1)
f2, f2se = self(x2)
if self.paired:
fx1 = np.array(self.cache[tuple(x1)])
fx2 = np.array(self.cache[tuple(x2)])
diffse = np.std(fx1-fx2, ddof=1)/self.N**.5
return diffse
else:
return (f1se**2 + f2se**2)**.5
|
def diffse(self, x1, x2)
|
Standard error of the difference between the function values at x1 and x2
| 3.636483
| 3.215087
| 1.131068
|
p[0] = Document(definitions=[Query(selections=p[1])] + p[2])
|
def p_document_shorthand_with_fragments(self, p)
|
document : selection_set fragment_list
| 12.119683
| 7.45759
| 1.625148
|
p[0] = self.operation_cls(p[1])(
selections=p[5],
name=p[2],
variable_definitions=p[3],
directives=p[4],
)
|
def p_operation_definition1(self, p)
|
operation_definition : operation_type name variable_definitions directives selection_set
| 6.078919
| 3.551589
| 1.711605
|
p[0] = self.operation_cls(p[1])(
selections=p[4],
name=p[2],
variable_definitions=p[3],
)
|
def p_operation_definition2(self, p)
|
operation_definition : operation_type name variable_definitions selection_set
| 7.40837
| 4.371741
| 1.694604
|
p[0] = self.operation_cls(p[1])(
selections=p[4],
name=p[2],
directives=p[3],
)
|
def p_operation_definition3(self, p)
|
operation_definition : operation_type name directives selection_set
| 7.238729
| 4.544209
| 1.592957
|
p[0] = self.operation_cls(p[1])(selections=p[3], name=p[2])
|
def p_operation_definition4(self, p)
|
operation_definition : operation_type name selection_set
| 10.383045
| 6.241528
| 1.663542
|
p[0] = self.operation_cls(p[1])(
selections=p[4],
variable_definitions=p[2],
directives=p[3],
)
|
def p_operation_definition5(self, p)
|
operation_definition : operation_type variable_definitions directives selection_set
| 7.639797
| 4.154393
| 1.838968
|
p[0] = self.operation_cls(p[1])(
selections=p[3],
variable_definitions=p[2],
)
|
def p_operation_definition6(self, p)
|
operation_definition : operation_type variable_definitions selection_set
| 10.220458
| 5.330503
| 1.917353
|
p[0] = self.operation_cls(p[1])(
selections=p[3],
directives=p[2],
)
|
def p_operation_definition7(self, p)
|
operation_definition : operation_type directives selection_set
| 10.980713
| 5.668335
| 1.937203
|
p[0] = Field(name=p[2], alias=p[1], arguments=p[3], directives=p[4],
selections=p[5])
|
def p_field_all(self, p)
|
field : alias name arguments directives selection_set
| 5.093054
| 2.889275
| 1.762744
|
p[0] = Field(name=p[1], arguments=p[2], directives=p[3],
selections=p[5])
|
def p_field_optional1_1(self, p)
|
field : name arguments directives selection_set
| 5.991436
| 3.06625
| 1.953995
|
p[0] = Field(name=p[2], alias=p[1], directives=p[3], selections=p[5])
|
def p_field_optional1_2(self, p)
|
field : alias name directives selection_set
| 5.488008
| 2.825881
| 1.942052
|
p[0] = Field(name=p[2], alias=p[1], arguments=p[3], selections=p[4])
|
def p_field_optional1_3(self, p)
|
field : alias name arguments selection_set
| 5.371235
| 2.684363
| 2.000935
|
p[0] = Field(name=p[2], alias=p[1], arguments=p[3], directives=p[4])
|
def p_field_optional1_4(self, p)
|
field : alias name arguments directives
| 5.091366
| 2.365402
| 2.152432
|
p[0] = Field(name=p[1], directives=p[2], selections=p[3])
|
def p_field_optional2_1(self, p)
|
field : name directives selection_set
| 6.141108
| 2.954524
| 2.078544
|
p[0] = Field(name=p[1], arguments=p[2], selections=p[3])
|
def p_field_optional2_2(self, p)
|
field : name arguments selection_set
| 6.13221
| 2.986051
| 2.053619
|
p[0] = Field(name=p[1], arguments=p[2], directives=p[3])
|
def p_field_optional2_3(self, p)
|
field : name arguments directives
| 5.581983
| 2.402966
| 2.322956
|
p[0] = Field(name=p[2], alias=p[1], selections=p[3])
|
def p_field_optional2_4(self, p)
|
field : alias name selection_set
| 6.147282
| 3.598474
| 1.708302
|
p[0] = Field(name=p[2], alias=p[1], directives=p[3])
|
def p_field_optional2_5(self, p)
|
field : alias name directives
| 5.856087
| 3.03358
| 1.930421
|
p[0] = Field(name=p[2], alias=p[1], arguments=p[3])
|
def p_field_optional2_6(self, p)
|
field : alias name arguments
| 5.437779
| 2.950944
| 1.842725
|
p[0] = FragmentDefinition(name=p[2], type_condition=p[4],
selections=p[6], directives=p[5])
|
def p_fragment_definition1(self, p)
|
fragment_definition : FRAGMENT fragment_name ON type_condition directives selection_set
| 6.179478
| 3.688479
| 1.675346
|
p[0] = FragmentDefinition(name=p[2], type_condition=p[4],
selections=p[5])
|
def p_fragment_definition2(self, p)
|
fragment_definition : FRAGMENT fragment_name ON type_condition selection_set
| 6.457779
| 4.237252
| 1.524049
|
p[0] = InlineFragment(type_condition=p[3], selections=p[5],
directives=p[4])
|
def p_inline_fragment1(self, p)
|
inline_fragment : SPREAD ON type_condition directives selection_set
| 4.786138
| 4.19785
| 1.14014
|
arguments = p[3] if len(p) == 4 else None
p[0] = Directive(name=p[2], arguments=arguments)
|
def p_directive(self, p)
|
directive : AT name arguments
| AT name
| 3.697705
| 2.885276
| 1.281578
|
p[0] = VariableDefinition(name=p[2], type=p[4], default_value=p[5])
|
def p_variable_definition1(self, p)
|
variable_definition : DOLLAR name COLON type default_value
| 3.712757
| 2.431244
| 1.527102
|
obj = p[1].copy()
obj.update(p[2])
p[0] = obj
|
def p_object_field_list(self, p)
|
object_field_list : object_field_list object_field
| 4.051494
| 3.495873
| 1.158936
|
obj = p[1].copy()
obj.update(p[2])
p[0] = obj
|
def p_const_object_field_list(self, p)
|
const_object_field_list : const_object_field_list const_object_field
| 4.247465
| 4.788525
| 0.887009
|
return sorted(
triples,
key=lambda t: [
int(t) if t.isdigit() else t
for t in re.split(r'([0-9]+)', t.relation or '')
]
)
|
def alphanum_order(triples)
|
Sort a list of triples by relation name.
Embedded integers are sorted numerically, but otherwise the sorting
is alphabetic.
| 3.580595
| 3.524677
| 1.015865
|
codec = cls(**kwargs)
return codec.decode(s)
|
def decode(s, cls=PENMANCodec, **kwargs)
|
Deserialize PENMAN-serialized *s* into its Graph object
Args:
s: a string containing a single PENMAN-serialized graph
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the Graph object described by *s*
Example:
>>> decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
| 4.685117
| 17.946638
| 0.261058
|
codec = cls(**kwargs)
return codec.encode(g, top=top)
|
def encode(g, top=None, cls=PENMANCodec, **kwargs)
|
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized graph; if
unset, the original top of *g* is used
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
| 3.724328
| 12.543238
| 0.296919
|
decode = cls(**kwargs).iterdecode
if hasattr(source, 'read'):
return list(decode(source.read()))
else:
with open(source) as fh:
return list(decode(fh.read()))
|
def load(source, triples=False, cls=PENMANCodec, **kwargs)
|
Deserialize a list of PENMAN-encoded graphs from *source*.
Args:
source: a filename or file-like object to read from
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
| 3.651183
| 5.283999
| 0.690989
|
codec = cls(**kwargs)
return list(codec.iterdecode(string, triples=triples))
|
def loads(string, triples=False, cls=PENMANCodec, **kwargs)
|
Deserialize a list of PENMAN-encoded graphs from *string*.
Args:
string: a string containing graph data
triples: if True, read graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
Returns:
a list of Graph objects
| 4.58827
| 8.503858
| 0.539552
|
text = dumps(graphs, triples=triples, cls=cls, **kwargs)
if hasattr(file, 'write'):
print(text, file=file)
else:
with open(file, 'w') as fh:
print(text, file=fh)
|
def dump(graphs, file, triples=False, cls=PENMANCodec, **kwargs)
|
Serialize each graph in *graphs* to PENMAN and write to *file*.
Args:
graphs: an iterable of Graph objects
file: a filename or file-like object to write to
triples: if True, write graphs as triples instead of as PENMAN
cls: serialization codec class
kwargs: keyword arguments passed to the constructor of *cls*
| 2.073997
| 3.134442
| 0.66168
|
codec = cls(**kwargs)
strings = [codec.encode(g, triples=triples) for g in graphs]
return '\n\n'.join(strings)
|
def dumps(graphs, triples=False, cls=PENMANCodec, **kwargs)
|
Serialize each graph in *graphs* to the PENMAN format.
Args:
graphs: an iterable of Graph objects
triples: if True, write graphs as triples instead of as PENMAN
Returns:
the string of serialized graphs
| 2.817769
| 3.812632
| 0.739061
|
try:
if triples:
span, data = self._decode_triple_conjunction(s)
else:
span, data = self._decode_penman_node(s)
except IndexError:
raise DecodeError(
'Unexpected end of string.', string=s, pos=len(s)
)
top, nodes, edges = data
return self.triples_to_graph(nodes + edges, top=top)
|
def decode(self, s, triples=False)
|
Deserialize PENMAN-notation string *s* into its Graph object.
Args:
s: a string containing a single PENMAN-serialized graph
triples: if True, treat *s* as a conjunction of logical triples
Returns:
the Graph object described by *s*
Example:
>>> codec = PENMANCodec()
>>> codec.decode('(b / bark :ARG1 (d / dog))')
<Graph object (top=b) at ...>
>>> codec.decode(
... 'instance(b, bark) ^ instance(d, dog) ^ ARG1(b, d)',
... triples=True
... )
<Graph object (top=b) at ...>
| 5.46572
| 4.930243
| 1.108611
|
pos, strlen = 0, len(s)
while pos < strlen:
if s[pos] == '#':
while pos < strlen and s[pos] != '\n':
pos += 1
elif triples or s[pos] == '(':
try:
if triples:
span, data = self._decode_triple_conjunction(
s, pos=pos
)
else:
span, data = self._decode_penman_node(s, pos=pos)
except (IndexError, DecodeError):
# don't re-raise below for more robust parsing, but
# for now, raising helps with debugging bad input
raise
pos += 1
else:
top, nodes, edges = data
yield self.triples_to_graph(nodes + edges, top=top)
pos = span[1]
else:
pos += 1
|
def iterdecode(self, s, triples=False)
|
Deserialize PENMAN-notation string *s* into its Graph objects.
Args:
s: a string containing zero or more PENMAN-serialized graphs
triples: if True, treat *s* as a conjunction of logical triples
Yields:
valid Graph objects described by *s*
Example:
>>> codec = PENMANCodec()
>>> list(codec.iterdecode('(h / hello)(g / goodbye)'))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
>>> list(codec.iterdecode(
... 'instance(h, hello)\n'
... 'instance(g, goodbye)'
... ))
[<Graph object (top=h) at ...>, <Graph object (top=g) at ...>]
| 4.783756
| 4.541699
| 1.053297
|
if len(g.triples()) == 0:
raise EncodeError('Cannot encode empty graph.')
if triples:
return self._encode_triple_conjunction(g, top=top)
else:
return self._encode_penman(g, top=top)
|
def encode(self, g, top=None, triples=False)
|
Serialize the graph *g* from *top* to PENMAN notation.
Args:
g: the Graph object
top: the node identifier for the top of the serialized
graph; if unset, the original top of *g* is used
triples: if True, serialize as a conjunction of logical triples
Returns:
the PENMAN-serialized string of the Graph *g*
Example:
>>> codec = PENMANCodec()
>>> codec.encode(Graph([('h', 'instance', 'hi')]))
(h / hi)
>>> codec.encode(Graph([('h', 'instance', 'hi')]),
... triples=True)
instance(h, hi)
| 3.93571
| 4.166338
| 0.944645
|
relation = relation.replace(':', '', 1) # remove leading :
if self.is_relation_inverted(relation): # deinvert
source, target, inverted = rhs, lhs, True
relation = self.invert_relation(relation)
else:
source, target, inverted = lhs, rhs, False
source = _default_cast(source)
target = _default_cast(target)
if relation == '': # set empty relations to None
relation = None
return Triple(source, relation, target, inverted)
|
def handle_triple(self, lhs, relation, rhs)
|
Process triples before they are added to the graph.
Note that *lhs* and *rhs* are as they originally appeared, and
may be inverted. Inversions are detected by
is_relation_inverted() and de-inverted by invert_relation().
By default, this function:
* removes initial colons on relations
* de-inverts all inverted relations
* sets empty relations to `None`
* casts numeric string sources and targets to their numeric
types (e.g. float, int)
Args:
lhs: the left hand side of an observed triple
relation: the triple relation (possibly inverted)
rhs: the right hand side of an observed triple
Returns:
The processed (source, relation, target) triple. By default,
it is returned as a Triple object.
| 4.422327
| 2.929603
| 1.509531
|
inferred_top = triples[0][0] if triples else None
ts = []
for triple in triples:
if triple[0] == self.TOP_VAR and triple[1] == self.TOP_REL:
inferred_top = triple[2]
else:
ts.append(self.handle_triple(*triple))
top = self.handle_triple(self.TOP_VAR, self.TOP_REL, top).target
return Graph(ts, top=top or inferred_top)
|
def triples_to_graph(self, triples, top=None)
|
Create a Graph from *triples* considering codec configuration.
The Graph class does not know about information in the codec,
so if Graph instantiation depends on special `TYPE_REL` or
`TOP_VAR` values, use this function instead of instantiating
a Graph object directly. This is also where edge
normalization (de-inversion) and value type conversion occur
(via handle_triple()).
Args:
triples: an iterable of (lhs, relation, rhs) triples
top: node identifier of the top node
Returns:
a Graph object
| 3.56051
| 3.14936
| 1.13055
|
if top is None:
top = g.top
remaining = set(g.triples())
variables = g.variables()
store = defaultdict(lambda: ([], [])) # (preferred, dispreferred)
for t in g.triples():
if t.inverted:
store[t.target][0].append(t)
store[t.source][1].append(Triple(*t, inverted=False))
else:
store[t.source][0].append(t)
store[t.target][1].append(Triple(*t, inverted=True))
p = defaultdict(list)
topolist = [top]
def _update(t):
src, tgt = (t[2], t[0]) if t.inverted else (t[0], t[2])
p[src].append(t)
remaining.remove(t)
if tgt in variables and t.relation != self.TYPE_REL:
topolist.append(tgt)
return tgt
return None
def _explore_preferred(src):
ts = store.get(src, ([], []))[0]
for t in ts:
if t in remaining:
tgt = _update(t)
if tgt is not None:
_explore_preferred(tgt)
ts[:] = [] # clear explored list
_explore_preferred(top)
while remaining:
flip_candidates = [store.get(v, ([],[]))[1] for v in topolist]
for fc in flip_candidates:
fc[:] = [c for c in fc if c in remaining] # clear superfluous
if not any(len(fc) > 0 for fc in flip_candidates):
raise EncodeError('Invalid graph; possibly disconnected.')
c = next(c for fc in flip_candidates for c in fc)
tgt = _update(c)
if tgt is not None:
_explore_preferred(tgt)
return self._layout(p, top, 0, set())
|
def _encode_penman(self, g, top=None)
|
Walk graph g and find a spanning dag, then serialize the result.
First, depth-first traversal of preferred orientations (whether
true or inverted) to create graph p.
If any triples remain, select the first remaining triple whose
source in the dispreferred orientation exists in p, where
'first' is determined by the order of inserted nodes (i.e. a
topological sort). Add this triple, then repeat the depth-first
traversal of preferred orientations from its target. Repeat
until no triples remain, or raise an error if there are no
candidates in the dispreferred orientation (which likely means
the graph is disconnected).
| 3.619379
| 3.453126
| 1.048146
|
return (
relation in self._deinversions or
(relation.endswith('-of') and relation not in self._inversions)
)
|
def is_relation_inverted(self, relation)
|
Return True if *relation* is inverted.
| 9.596279
| 9.26283
| 1.035999
|
if self.is_relation_inverted(relation):
rel = self._deinversions.get(relation, relation[:-3])
else:
rel = self._inversions.get(relation, relation + '-of')
if rel is None:
raise PenmanError(
'Cannot (de)invert {}; not allowed'.format(relation)
)
return rel
|
def invert_relation(self, relation)
|
Invert or deinvert *relation*.
| 7.037499
| 6.572441
| 1.070759
|
triplematch = lambda t: (
(source is None or source == t.source) and
(relation is None or relation == t.relation) and
(target is None or target == t.target)
)
return list(filter(triplematch, self._triples))
|
def triples(self, source=None, relation=None, target=None)
|
Return triples filtered by their *source*, *relation*, or *target*.
| 2.373604
| 2.10216
| 1.129126
|
edgematch = lambda e: (
(source is None or source == e.source) and
(relation is None or relation == e.relation) and
(target is None or target == e.target)
)
variables = self.variables()
edges = [t for t in self._triples if t.target in variables]
return list(filter(edgematch, edges))
|
def edges(self, source=None, relation=None, target=None)
|
Return edges filtered by their *source*, *relation*, or *target*.
Edges don't include terminal triples (node types or attributes).
| 3.184326
| 2.969057
| 1.072504
|
attrmatch = lambda a: (
(source is None or source == a.source) and
(relation is None or relation == a.relation) and
(target is None or target == a.target)
)
variables = self.variables()
attrs = [t for t in self.triples() if t.target not in variables]
return list(filter(attrmatch, attrs))
|
def attributes(self, source=None, relation=None, target=None)
|
Return attributes filtered by their *source*, *relation*, or *target*.
Attributes don't include triples where the target is a nonterminal.
| 3.144437
| 2.822586
| 1.114027
|
entrancies = defaultdict(int)
entrancies[self.top] += 1 # implicit entrancy to top
for t in self.edges():
entrancies[t.target] += 1
return dict((v, cnt - 1) for v, cnt in entrancies.items() if cnt >= 2)
|
def reentrancies(self)
|
Return a mapping of variables to their re-entrancy count.
A re-entrancy is when more than one edge selects a node as its
target. These graphs are rooted, so the top node always has an
implicit entrancy. Only nodes with re-entrancies are reported,
and the count is only for the entrant edges beyond the first.
Also note that these counts are for the interpreted graph, not
for the linearized form, so inverted edges are always
re-entrant.
| 5.421016
| 3.98691
| 1.359703
|
if isinstance(inp, list):
return check_1d(np.array(inp))
if isinstance(inp, np.ndarray):
if inp.ndim == 1: # input is a vector
return inp
|
def check_1d(inp)
|
Check input to be a vector. Converts lists to np.ndarray.
Parameters
----------
inp : obj
Input vector
Returns
-------
numpy.ndarray or None
Input vector or None
Examples
--------
>>> check_1d([0, 1, 2, 3])
[0, 1, 2, 3]
>>> check_1d('test')
None
| 2.573624
| 3.21917
| 0.799468
|
if isinstance(inp, list):
return check_2d(np.array(inp))
if isinstance(inp, (np.ndarray, np.matrixlib.defmatrix.matrix)):
if inp.ndim == 2: # input is a dense matrix
return inp
if sps.issparse(inp):
if inp.ndim == 2: # input is a sparse matrix
return inp
|
def check_2d(inp)
|
Check input to be a matrix. Converts lists of lists to np.ndarray.
Also allows the input to be a scipy sparse matrix.
Parameters
----------
inp : obj
Input matrix
Returns
-------
numpy.ndarray, scipy.sparse or None
Input matrix or None
Examples
--------
>>> check_2d([[0, 1], [2, 3]])
[[0, 1], [2, 3]]
>>> check_2d('test')
None
| 2.660958
| 2.632364
| 1.010863
|
try:
import networkx as nx
if isinstance(G, nx.Graph):
if normalized:
return nx.normalized_laplacian_matrix(G)
else:
return nx.laplacian_matrix(G)
except ImportError:
pass
try:
import graph_tool.all as gt
if isinstance(G, gt.Graph):
if normalized:
return gt.laplacian_type(G, normalized=True)
else:
return gt.laplacian(G)
except ImportError:
pass
try:
import igraph as ig
if isinstance(G, ig.Graph):
if normalized:
return np.array(G.laplacian(normalized=True))
else:
return np.array(G.laplacian())
except ImportError:
pass
|
def graph_to_laplacian(G, normalized=True)
|
Converts a graph from popular Python packages to Laplacian representation.
Currently support NetworkX, graph_tool and igraph.
Parameters
----------
G : obj
Input graph
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
scipy.sparse
Laplacian matrix of the input graph
Examples
--------
>>> graph_to_laplacian(nx.complete_graph(3), 'unnormalized').todense()
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
>>> graph_to_laplacian('test')
None
| 1.636398
| 1.636392
| 1.000004
|
if sps.issparse(mat):
if np.all(mat.diagonal()>=0): # Check diagonal
if np.all((mat-sps.diags(mat.diagonal())).data <= 0): # Check off-diagonal elements
return mat
else:
if np.all(np.diag(mat)>=0): # Check diagonal
if np.all(mat - np.diag(mat) <= 0): # Check off-diagonal elements
return mat
deg = np.squeeze(np.asarray(mat.sum(axis=1)))
if sps.issparse(mat):
L = sps.diags(deg) - mat
else:
L = np.diag(deg) - mat
if not normalized:
return L
with np.errstate(divide='ignore'):
sqrt_deg = 1.0 / np.sqrt(deg)
sqrt_deg[sqrt_deg==np.inf] = 0
if sps.issparse(mat):
sqrt_deg_mat = sps.diags(sqrt_deg)
else:
sqrt_deg_mat = np.diag(sqrt_deg)
return sqrt_deg_mat.dot(L).dot(sqrt_deg_mat)
|
def mat_to_laplacian(mat, normalized)
|
Converts a sparse or dence adjacency matrix to Laplacian.
Parameters
----------
mat : obj
Input adjacency matrix. If it is a Laplacian matrix already, return it.
normalized : bool
Whether to use normalized Laplacian.
Normalized and unnormalized Laplacians capture different properties of graphs, e.g. normalized Laplacian spectrum can determine whether a graph is bipartite, but not the number of its edges. We recommend using normalized Laplacian.
Returns
-------
obj
Laplacian of the input adjacency matrix
Examples
--------
>>> mat_to_laplacian(numpy.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), False)
[[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]
| 2.113818
| 2.288032
| 0.923859
|
nal = len(eigvals_lower)
nau = len(eigvals_upper)
if nv < nal + nau:
raise ValueError('Number of supplied eigenvalues ({0} lower and {1} upper) is higher than number of nodes ({2})!'.format(nal, nau, nv))
ret = np.zeros(nv)
ret[:nal] = eigvals_lower
ret[-nau:] = eigvals_upper
ret[nal-1:-nau+1] = np.linspace(eigvals_lower[-1], eigvals_upper[0], nv-nal-nau+2)
return ret
|
def updown_linear_approx(eigvals_lower, eigvals_upper, nv)
|
Approximates Laplacian spectrum using upper and lower parts of the eigenspectrum.
Parameters
----------
eigvals_lower : numpy.ndarray
Lower part of the spectrum, sorted
eigvals_upper : numpy.ndarray
Upper part of the spectrum, sorted
nv : int
Total number of nodes (eigenvalues) in the graph.
Returns
-------
numpy.ndarray
Vector of approximated eigenvalues
Examples
--------
>>> updown_linear_approx([1, 2, 3], [7, 8, 9], 9)
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
| 2.541917
| 2.577103
| 0.986347
|
do_full = True
n_lower = 150
n_upper = 150
nv = mat.shape[0]
if n_eivals == 'auto':
if mat.shape[0] > 1024:
do_full = False
if n_eivals == 'full':
do_full = True
if isinstance(n_eivals, int):
n_lower = n_upper = n_eivals
do_full = False
if isinstance(n_eivals, tuple):
n_lower, n_upper = n_eivals
do_full = False
if do_full and sps.issparse(mat):
mat = mat.todense()
if sps.issparse(mat):
if n_lower == n_upper:
tr_eivals = spsl.eigsh(mat, 2*n_lower, which='BE', return_eigenvectors=False)
return updown_linear_approx(tr_eivals[:n_upper], tr_eivals[n_upper:], nv)
else:
lo_eivals = spsl.eigsh(mat, n_lower, which='SM', return_eigenvectors=False)[::-1]
up_eivals = spsl.eigsh(mat, n_upper, which='LM', return_eigenvectors=False)
return updown_linear_approx(lo_eivals, up_eivals, nv)
else:
if do_full:
return spl.eigvalsh(mat)
else:
lo_eivals = spl.eigvalsh(mat, eigvals=(0, n_lower-1))
up_eivals = spl.eigvalsh(mat, eigvals=(nv-n_upper-1, nv-1))
return updown_linear_approx(lo_eivals, up_eivals, nv)
|
def eigenvalues_auto(mat, n_eivals='auto')
|
Automatically computes the spectrum of a given Laplacian matrix.
Parameters
----------
mat : numpy.ndarray or scipy.sparse
Laplacian matrix
n_eivals : string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
Returns
-------
np.ndarray
Vector of approximated eigenvalues
Examples
--------
>>> eigenvalues_auto(numpy.array([[ 2, -1, -1], [-1, 2, -1], [-1, -1, 2]]), 'auto')
array([0, 3, 3])
| 2.024931
| 2.078526
| 0.974215
|
if kernel not in {'heat', 'wave'}:
raise AttributeError('Unirecognized kernel type: expected one of [\'heat\', \'wave\'], got {0}'.format(kernel))
if not isinstance(normalized_laplacian, bool):
raise AttributeError('Unknown Laplacian type: expected bool, got {0}'.format(normalized_laplacian))
if not isinstance(eigenvalues, (int, tuple, str)):
raise AttributeError('Unirecognized requested eigenvalue number: expected type of [\'str\', \'tuple\', or \'int\'], got {0}'.format(type(eigenvalues)))
if not isinstance(timescales, np.ndarray):
raise AttributeError('Unirecognized timescales data type: expected np.ndarray, got {0}'.format(type(timescales)))
if timescales.ndim != 1:
raise AttributeError('Unirecognized timescales dimensionality: expected a vector, got {0}-d array'.format(timescales.ndim))
if normalization not in {'complete', 'empty', 'none', True, False, None}:
if not isinstance(normalization, np.ndarray):
raise AttributeError('Unirecognized normalization type: expected one of [\'complete\', \'empty\', None or np.ndarray], got {0}'.format(normalization))
if normalization.ndim != 1:
raise AttributeError('Unirecognized normalization dimensionality: expected a vector, got {0}-d array'.format(normalization.ndim))
if timescales.shape[0] != normalization.shape[0]:
raise AttributeError('Unirecognized normalization dimensionality: expected {0}-length vector, got length {1}'.format(timescales.shape[0], normalization.shape[0]))
eivals = check_1d(inp)
if eivals is None:
mat = check_2d(inp)
if mat is None:
mat = graph_to_laplacian(inp, normalized_laplacian)
if mat is None:
raise ValueError('Unirecognized input type: expected one of [\'np.ndarray\', \'scipy.sparse\', \'networkx.Graph\',\' graph_tool.Graph,\' or \'igraph.Graph\'], got {0}'.format(type(inp)))
else:
mat = mat_to_laplacian(inp, normalized_laplacian)
eivals = eigenvalues_auto(mat, eigenvalues)
if kernel == 'heat':
return _hkt(eivals, timescales, normalization, normalized_laplacian)
else:
return _wkt(eivals, timescales, normalization, normalized_laplacian)
|
def netlsd(inp, timescales=np.logspace(-2, 2, 250), kernel='heat', eigenvalues='auto', normalization='empty', normalized_laplacian=True)
|
Computes NetLSD signature from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
kernel : str
Either 'heat' or 'wave'. Type of a kernel to use for computation.
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
NetLSD signature
| 2.360625
| 2.260608
| 1.044243
|
return netlsd(inp, timescales, 'heat', eigenvalues, normalization, normalized_laplacian)
|
def heat(inp, timescales=np.logspace(-2, 2, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True)
|
Computes heat kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Heat kernel trace signature
| 4.461063
| 6.716398
| 0.664205
|
return netlsd(inp, timescales, 'wave', eigenvalues, normalization, normalized_laplacian)
|
def wave(inp, timescales=np.linspace(0, 2*np.pi, 250), eigenvalues='auto', normalization='empty', normalized_laplacian=True)
|
Computes wave kernel trace from some given input, timescales, and normalization.
Accepts matrices, common Python graph libraries' graphs, or vectors of eigenvalues.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
inp: obj
2D numpy/scipy matrix, common Python graph libraries' graph, or vector of eigenvalues
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
eigenvalues : str
Either string or int or tuple
Number of eigenvalues to compute / use for approximation.
If string, we expect either 'full' or 'auto', otherwise error will be raised. 'auto' lets the program decide based on the faithful usage. 'full' computes all eigenvalues.
If int, compute n_eivals eigenvalues from each side and approximate using linear growth approximation.
If tuple, we expect two ints, first for lower part of approximation, and second for the upper part.
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
| 4.915521
| 7.260052
| 0.677064
|
nv = eivals.shape[0]
hkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
hkt[idx] = np.sum(np.exp(-t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return hkt / nv
if normalization == 'complete':
if normalized_laplacian:
return hkt / (1 + (nv - 1) * np.exp(-timescales))
else:
return hkt / (1 + nv * np.exp(-nv * timescales))
return hkt
|
def _hkt(eivals, timescales, normalization, normalized_laplacian)
|
Computes heat kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized heat kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Heat kernel trace signature
| 2.726385
| 2.478463
| 1.100031
|
nv = eivals.shape[0]
wkt = np.zeros(timescales.shape)
for idx, t in enumerate(timescales):
wkt[idx] = np.sum(np.exp(-1j * t * eivals))
if isinstance(normalization, np.ndarray):
return hkt / normalization
if normalization == 'empty' or normalization == True:
return wkt / nv
if normalization == 'complete':
if normalized_laplacian:
return wkt / (1 + (nv - 1) * np.cos(timescales))
else:
return wkt / (1 + (nv - 1) * np.cos(nv * timescales))
return wkt
|
def _wkt(eivals, timescales, normalization, normalized_laplacian)
|
Computes wave kernel trace from given eigenvalues, timescales, and normalization.
For precise definition, please refer to "NetLSD: Hearing the Shape of a Graph" by A. Tsitsulin, D. Mottin, P. Karras, A. Bronstein, E. Müller. Published at KDD'18.
Parameters
----------
eivals : numpy.ndarray
Eigenvalue vector
timescales : numpy.ndarray
Vector of discrete timesteps for the kernel computation
normalization : str or numpy.ndarray
Either 'empty', 'complete' or None.
If None or any ther value, return unnormalized wave kernel trace.
For the details how 'empty' and 'complete' are computed, please refer to the paper.
If np.ndarray, they are treated as exact normalization constants
normalized_laplacian: bool
Defines whether the eigenvalues came from the normalized Laplacian. It only affects 'complete' normalization.
Returns
-------
numpy.ndarray
Wave kernel trace signature
| 3.17992
| 2.885017
| 1.102219
|
self._len = 0
del self._maxes[:]
del self._lists[:]
del self._keys[:]
del self._index[:]
|
def clear(self)
|
Remove all the elements from the list.
| 6.782635
| 5.629331
| 1.204874
|
_maxes = self._maxes
if not _maxes:
return
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
return
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
return
if _lists[pos][idx] == val:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
return
len_sublist = len(_keys[pos])
idx = 0
|
def discard(self, val)
|
Remove the first occurrence of *val*.
If *val* is not a member, does nothing.
| 2.585753
| 2.568012
| 1.006909
|
_maxes = self._maxes
if not _maxes:
raise ValueError('{0} not in list'.format(repr(val)))
key = self._key(val)
pos = bisect_left(_maxes, key)
if pos == len(_maxes):
raise ValueError('{0} not in list'.format(repr(val)))
_keys = self._keys
_lists = self._lists
idx = bisect_left(_keys[pos], key)
len_keys = len(_keys)
len_sublist = len(_keys[pos])
while True:
if _keys[pos][idx] != key:
raise ValueError('{0} not in list'.format(repr(val)))
if _lists[pos][idx] == val:
self._delete(pos, idx)
return
idx += 1
if idx == len_sublist:
pos += 1
if pos == len_keys:
raise ValueError('{0} not in list'.format(repr(val)))
len_sublist = len(_keys[pos])
idx = 0
|
def remove(self, val)
|
Remove first occurrence of *val*.
Raises ValueError if *val* is not present.
| 2.27189
| 2.197037
| 1.03407
|
_maxes, _lists, _keys, _index = self._maxes, self._lists, self._keys, self._index
keys_pos = _keys[pos]
lists_pos = _lists[pos]
del keys_pos[idx]
del lists_pos[idx]
self._len -= 1
len_keys_pos = len(keys_pos)
if len_keys_pos > self._half:
_maxes[pos] = keys_pos[-1]
if len(_index) > 0:
child = self._offset + pos
while child > 0:
_index[child] -= 1
child = (child - 1) >> 1
_index[0] -= 1
elif len(_keys) > 1:
if not pos:
pos += 1
prev = pos - 1
_keys[prev].extend(_keys[pos])
_lists[prev].extend(_lists[pos])
_maxes[prev] = _keys[prev][-1]
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
self._expand(prev)
elif len_keys_pos:
_maxes[pos] = keys_pos[-1]
else:
del _keys[pos]
del _lists[pos]
del _maxes[pos]
del _index[:]
|
def _delete(self, pos, idx)
|
Delete the item at the given (pos, idx).
Combines lists that are less than half the load level.
Updates the index when the sublist length is more than half the load
level. This requires decrementing the nodes in a traversal from the leaf
node to the root. For an example traversal see self._loc.
| 2.675055
| 2.615276
| 1.022858
|
if not pos:
return idx
_index = self._index
if not len(_index):
self._build_index()
total = 0
# Increment pos to point in the index to len(self._lists[pos]).
pos += self._offset
# Iterate until reaching the root of the index tree at pos = 0.
while pos:
# Right-child nodes are at odd indices. At such indices
# account the total below the left child node.
if not (pos & 1):
total += _index[pos - 1]
# Advance pos to the parent node.
pos = (pos - 1) >> 1
return total + idx
|
def _loc(self, pos, idx)
|
Convert an index pair (alpha, beta) into a single index that corresponds to
the position of the value in the sorted list.
Most queries require the index be built. Details of the index are
described in self._build_index.
Indexing requires traversing the tree from a leaf node to the root. The
parent of each node is easily computable at (pos - 1) // 2.
Left-child nodes are always at odd indices and right-child nodes are
always at even indices.
When traversing up from a right-child node, increment the total by the
left-child node.
The final index is the sum from traversal and the index in the sublist.
For example, using the index from self._build_index:
_index = 14 5 9 3 2 4 5
_offset = 3
Tree:
14
5 9
3 2 4 5
Converting index pair (2, 3) into a single index involves iterating like
so:
1. Starting at the leaf node: offset + alpha = 3 + 2 = 5. We identify
the node as a left-child node. At such nodes, we simply traverse to
the parent.
2. At node 9, position 2, we recognize the node as a right-child node
and accumulate the left-child in our total. Total is now 5 and we
traverse to the parent at position 0.
3. Iteration ends at the root.
Computing the index is the sum of the total and beta: 5 + 3 = 8.
| 7.741471
| 6.515013
| 1.188251
|
_len = self._len
if not _len:
return iter(())
start, stop, step = self._slice(slice(start, stop))
if start >= stop:
return iter(())
_pos = self._pos
min_pos, min_idx = _pos(start)
if stop == _len:
max_pos = len(self._lists) - 1
max_idx = len(self._lists[-1])
else:
max_pos, max_idx = _pos(stop)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse)
|
def islice(self, start=None, stop=None, reverse=False)
|
Returns an iterator that slices `self` from `start` to `stop` index,
inclusive and exclusive respectively.
When `reverse` is `True`, values are yielded from the iterator in
reverse order.
Both `start` and `stop` default to `None` which is automatically
inclusive of the beginning and end.
| 3.087944
| 3.213107
| 0.961046
|
minimum = self._key(minimum) if minimum is not None else None
maximum = self._key(maximum) if maximum is not None else None
return self.irange_key(
min_key=minimum, max_key=maximum,
inclusive=inclusive, reverse=reverse,
)
|
def irange(self, minimum=None, maximum=None, inclusive=(True, True),
reverse=False)
|
Create an iterator of values between `minimum` and `maximum`.
`inclusive` is a pair of booleans that indicates whether the minimum
and maximum ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
minimum and maximum.
Both `minimum` and `maximum` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
| 2.621161
| 3.128609
| 0.837804
|
return self.__class__(self, key=self._key, load=self._load)
|
def copy(self)
|
Return a shallow copy of the sorted list.
| 10.431229
| 7.609678
| 1.370785
|
_maxes, _lists, _keys = self._maxes, self._lists, self._keys
key = self._key(val)
if not _maxes:
_maxes.append(key)
_keys.append([key])
_lists.append([val])
self._len = 1
return
pos = len(_keys) - 1
if key < _keys[pos][-1]:
msg = '{0} not in sort order at index {1}'.format(repr(val), self._len)
raise ValueError(msg)
_maxes[pos] = key
_keys[pos].append(key)
_lists[pos].append(val)
self._len += 1
self._expand(pos)
|
def append(self, val)
|
Append the element *val* to the list. Raises a ValueError if the *val*
would violate the sort order.
| 3.040012
| 2.733178
| 1.112263
|
_maxes, _keys, _lists, _load = self._maxes, self._keys, self._lists, self._load
if not isinstance(values, list):
values = list(values)
keys = list(map(self._key, values))
if any(keys[pos - 1] > keys[pos]
for pos in range(1, len(keys))):
raise ValueError('given sequence not in sort order')
offset = 0
if _maxes:
if keys[0] < _keys[-1][-1]:
msg = '{0} not in sort order at index {1}'.format(repr(values[0]), self._len)
raise ValueError(msg)
if len(_keys[-1]) < self._half:
_lists[-1].extend(values[:_load])
_keys[-1].extend(keys[:_load])
_maxes[-1] = _keys[-1][-1]
offset = _load
len_keys = len(_keys)
for idx in range(offset, len(keys), _load):
_lists.append(values[idx:(idx + _load)])
_keys.append(keys[idx:(idx + _load)])
_maxes.append(_keys[-1][-1])
_index = self._index
if len_keys == len(_keys):
len_index = len(_index)
if len_index > 0:
len_values = len(values)
child = len_index - 1
while child:
_index[child] += len_values
child = (child - 1) >> 1
_index[0] += len_values
else:
del _index[:]
self._len += len(values)
|
def extend(self, values)
|
Extend the list by appending all elements from the *values*. Raises a
ValueError if the sort order would be violated.
| 3.169071
| 3.03143
| 1.045405
|
if (idx < 0 and -idx > self._len) or (idx >= self._len):
raise IndexError('pop index out of range')
pos, idx = self._pos(idx)
val = self._lists[pos][idx]
self._delete(pos, idx)
return val
|
def pop(self, idx=-1)
|
Remove and return item at *idx* (default last). Raises IndexError if
list is empty or index is out of range. Negative indices are supported,
as for slice indices.
| 3.787951
| 3.765188
| 1.006045
|
@wraps(func)
def errfunc(*args, **kwargs):
raise NotImplementedError
if hexversion < 0x02070000:
return errfunc
else:
return func
|
def not26(func)
|
Function decorator for methods not implemented in Python 2.6.
| 4.790541
| 3.428124
| 1.397423
|
return self.__class__(self._key, self._load, self._iteritems())
|
def copy(self)
|
Return a shallow copy of the sorted dictionary.
| 26.992952
| 15.250506
| 1.769971
|
if key in self:
self._list_remove(key)
return self._pop(key)
else:
if default is _NotGiven:
raise KeyError(key)
else:
return default
|
def pop(self, key, default=_NotGiven)
|
If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised.
| 2.868318
| 2.805253
| 1.022481
|
if not len(self):
raise KeyError('popitem(): dictionary is empty')
key = self._list_pop(-1 if last else 0)
value = self._pop(key)
return (key, value)
|
def popitem(self, last=True)
|
Remove and return a ``(key, value)`` pair from the dictionary. If
last=True (default) then remove the *greatest* `key` from the
diciontary. Else, remove the *least* key from the dictionary.
If the dictionary is empty, calling `popitem` raises a
KeyError`.
| 3.791755
| 3.946121
| 0.960882
|
if key in self:
return self[key]
else:
self._setitem(key, default)
self._list_add(key)
return default
|
def setdefault(self, key, default=None)
|
If *key* is in the dictionary, return its value. If not, insert *key*
with a value of *default* and return *default*. *default* defaults to
``None``.
| 3.722466
| 3.821306
| 0.974135
|
return self._list.index(value, start, stop)
|
def index(self, value, start=None, stop=None)
|
Return the smallest *k* such that `keysview[k] == value` and `start <= k
< end`. Raises `KeyError` if *value* is not present. *stop* defaults
to the end of the set. *start* defaults to the beginning. Negative
indexes are supported, as for slice indices.
| 6.090889
| 8.367262
| 0.727943
|
if not self.ignore_self:
res = summary.summarize(muppy.get_objects())
else:
# If the user requested the data required to store summaries to be
# ignored in the summaries, we need to identify all objects which
# are related to each summary stored.
# Thus we build a list of all objects used for summary storage as
# well as a dictionary which tells us how often an object is
# referenced by the summaries.
# During this identification process, more objects are referenced,
# namely int objects identifying referenced objects as well as the
# correspondind count.
# For all these objects it will be checked wether they are
# referenced from outside the monitor's scope. If not, they will be
# subtracted from the snapshot summary, otherwise they are
# included (as this indicates that they are relevant to the
# application).
all_of_them = [] # every single object
ref_counter = {} # how often it is referenced; (id(o), o) pairs
def store_info(o):
all_of_them.append(o)
if id(o) in ref_counter:
ref_counter[id(o)] += 1
else:
ref_counter[id(o)] = 1
# store infos on every single object related to the summaries
store_info(self.summaries)
for k, v in self.summaries.items():
store_info(k)
summary._traverse(v, store_info)
# do the summary
res = summary.summarize(muppy.get_objects())
# remove ids stored in the ref_counter
for _id in ref_counter:
# referenced in frame, ref_counter, ref_counter.keys()
if len(gc.get_referrers(_id)) == (3):
summary._subtract(res, _id)
for o in all_of_them:
# referenced in frame, summary, all_of_them
if len(gc.get_referrers(o)) == (ref_counter[id(o)] + 2):
summary._subtract(res, o)
return res
|
def create_summary(self)
|
Return a summary.
See also the notes on ignore_self in the class as well as the
initializer documentation.
| 5.749729
| 5.560618
| 1.034009
|
res = None
if summary2 is None:
self.s1 = self.create_summary()
if summary1 is None:
res = summary.get_diff(self.s0, self.s1)
else:
res = summary.get_diff(summary1, self.s1)
self.s0 = self.s1
else:
if summary1 is not None:
res = summary.get_diff(summary1, summary2)
else:
raise ValueError("You cannot provide summary2 without summary1.")
return summary._sweep(res)
|
def diff(self, summary1=None, summary2=None)
|
Compute diff between to summaries.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
| 2.946673
| 2.816021
| 1.046396
|
summary.print_(self.diff(summary1=summary1, summary2=summary2))
|
def print_diff(self, summary1=None, summary2=None)
|
Compute diff between to summaries and print it.
If no summary is provided, the diff from the last to the current
summary is used. If summary1 is provided the diff from summary1
to the current summary is used. If summary1 and summary2 are
provided, the diff between these two is used.
| 7.087651
| 8.308731
| 0.853037
|
def remove_ignore(objects, ignore=[]):
# remove all objects listed in the ignore list
res = []
for o in objects:
if not compat.object_in_list(o, ignore):
res.append(o)
return res
tmp = gc.get_objects()
ignore.append(inspect.currentframe()) #PYCHOK change ignore
ignore.append(self) #PYCHOK change ignore
if hasattr(self, 'o0'): ignore.append(self.o0) #PYCHOK change ignore
if hasattr(self, 'o1'): ignore.append(self.o1) #PYCHOK change ignore
ignore.append(ignore) #PYCHOK change ignore
ignore.append(remove_ignore) #PYCHOK change ignore
# this implies that referenced objects are also ignored
tmp = remove_ignore(tmp, ignore)
res = []
for o in tmp:
# gc.get_objects returns only container objects, but we also want
# the objects referenced by them
refs = muppy.get_referents(o)
for ref in refs:
if not muppy._is_containerobject(ref):
# we already got the container objects, now we only add
# non-container objects
res.append(ref)
res.extend(tmp)
res = muppy._remove_duplicates(res)
if ignore is not None:
# repeat to filter out objects which may have been referenced
res = remove_ignore(res, ignore)
# manual cleanup, see comment above
del ignore[:]
return res
|
def _get_objects(self, ignore=[])
|
Get all currently existing objects.
XXX - ToDo: This method is a copy&paste from muppy.get_objects, but
some modifications are applied. Specifically, it allows to ignore
objects (which includes the current frame).
keyword arguments
ignore -- list of objects to ignore
| 4.192653
| 4.068903
| 1.030414
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.