hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f04f7654b91c9acdf1a8a28e56bd96cb7d669b63 | 688 | py | Python | python-annotator/meta_converter.py | bubble-07/AnimeReal | b12193f10d231ee85a2a86ec2defeca0b5a4e240 | [
"BSD-2-Clause"
] | null | null | null | python-annotator/meta_converter.py | bubble-07/AnimeReal | b12193f10d231ee85a2a86ec2defeca0b5a4e240 | [
"BSD-2-Clause"
] | null | null | null | python-annotator/meta_converter.py | bubble-07/AnimeReal | b12193f10d231ee85a2a86ec2defeca0b5a4e240 | [
"BSD-2-Clause"
] | null | null | null | import sys
import os
import tensorflow as tf
in_dir = sys.argv[1]
out_dir = sys.argv[2]
meta_path = os.path.join(in_dir, "RGBToCoord.meta")
output_node_names = ['RGBToCoordOut'] # Output nodes
with tf.Session() as sess:
# Restore the graph
saver = tf.train.import_meta_graph(meta_path)
# Load weights
saver.restore(sess,tf.train.latest_checkpoint(in_dir))
# Freeze the graph
frozen_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
sess.graph_def,
output_node_names)
# Save the frozen graph
with open(os.path.join(out_dir, 'output_graph.pb'), 'wb') as f:
f.write(frozen_graph_def.SerializeToString())
| 23.724138 | 68 | 0.700581 |
c7861870e268764722a9d52c83629e314af90c51 | 2,695 | py | Python | plugins/action/enterprise_ssid.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/action/enterprise_ssid.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | plugins/action/enterprise_ssid.py | robertcsapo/dnacenter-ansible | 33f776f8c0bc7113da73191c301dd1807e6b4a43 | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.dnac.plugins.module_utils.dnac import (
ModuleDefinition,
DNACModule,
dnac_argument_spec,
)
from ansible_collections.cisco.dnac.plugins.module_utils.definitions.enterprise_ssid import (
module_definition,
)
IDEMPOTENT = False
# Instantiate the module definition for this module
moddef = ModuleDefinition(module_definition)
# Get the argument spec for this module and add the 'state' param,
# which is common to all modules
argument_spec = moddef.get_argument_spec_dict()
argument_spec.update(dict(dnac_argument_spec(idempotent=IDEMPOTENT)))
# Get the schema conditionals, if applicable
required_if = moddef.get_required_if_list()
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = False
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(required_if=required_if),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
dnac = DNACModule(
moddef=moddef,
params=self._task.args,
verbosity=self._play_context.verbosity,
)
state = self._task.args.get("state")
if state == "query":
dnac.exec("get")
elif state == "delete":
dnac.exec("delete")
elif state == "create":
dnac.disable_validation()
dnac.exec("post")
self._result.update(dnac.exit_json())
return self._result
| 33.271605 | 128 | 0.69128 |
aaf1883a3f4a98c5ad35b186fc6df2c29b3bca5f | 64,971 | py | Python | holoviews/operation/datashader.py | Jacob-Barhak/holoviews | 5df0269595ca7befca202f9d05522c68983dc974 | [
"BSD-3-Clause"
] | null | null | null | holoviews/operation/datashader.py | Jacob-Barhak/holoviews | 5df0269595ca7befca202f9d05522c68983dc974 | [
"BSD-3-Clause"
] | null | null | null | holoviews/operation/datashader.py | Jacob-Barhak/holoviews | 5df0269595ca7befca202f9d05522c68983dc974 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division
from collections import Callable, Iterable
import warnings
import param
import numpy as np
import pandas as pd
import xarray as xr
import datashader as ds
import datashader.reductions as rd
import datashader.transfer_functions as tf
import dask.dataframe as dd
from param.parameterized import bothmethod
try:
from datashader.bundling import (directly_connect_edges as connect_edges,
hammer_bundle)
except:
hammer_bundle, connect_edges = object, object
from ..core import (Operation, Element, Dimension, NdOverlay,
CompositeOverlay, Dataset, Overlay, OrderedDict)
from ..core.data import PandasInterface, XArrayInterface, DaskInterface
from ..core.util import (
LooseVersion, basestring, cftime_types, cftime_to_timestamp,
datetime_types, dt_to_int, get_param_values, max_range)
from ..element import (Image, Path, Curve, RGB, Graph, TriMesh,
QuadMesh, Contours, Spikes, Area, Spread,
Segments, Scatter, Points)
from ..streams import RangeXY, PlotSize
ds_version = LooseVersion(ds.__version__)
class LinkableOperation(Operation):
"""
Abstract baseclass for operations supporting linked inputs.
"""
link_inputs = param.Boolean(default=True, doc="""
By default, the link_inputs parameter is set to True so that
when applying an operation, backends that support linked
streams update RangeXY streams on the inputs of the operation.
Disable when you do not want the resulting plot to be
interactive, e.g. when trying to display an interactive plot a
second time.""")
_allow_extra_keywords=True
class ResamplingOperation(LinkableOperation):
"""
Abstract baseclass for resampling operations
"""
dynamic = param.Boolean(default=True, doc="""
Enables dynamic processing by default.""")
expand = param.Boolean(default=True, doc="""
Whether the x_range and y_range should be allowed to expand
beyond the extent of the data. Setting this value to True is
useful for the case where you want to ensure a certain size of
output grid, e.g. if you are doing masking or other arithmetic
on the grids. A value of False ensures that the grid is only
just as large as it needs to be to contain the data, which will
be faster and use less memory if the resulting aggregate is
being overlaid on a much larger background.""")
height = param.Integer(default=400, doc="""
The height of the output image in pixels.""")
width = param.Integer(default=400, doc="""
The width of the output image in pixels.""")
x_range = param.Tuple(default=None, length=2, doc="""
The x_range as a tuple of min and max x-value. Auto-ranges
if set to None.""")
y_range = param.Tuple(default=None, length=2, doc="""
The y-axis range as a tuple of min and max y value. Auto-ranges
if set to None.""")
x_sampling = param.Number(default=None, doc="""
Specifies the smallest allowed sampling interval along the x axis.""")
y_sampling = param.Number(default=None, doc="""
Specifies the smallest allowed sampling interval along the y axis.""")
target = param.ClassSelector(class_=Image, doc="""
A target Image which defines the desired x_range, y_range,
width and height.
""")
streams = param.List(default=[PlotSize, RangeXY], doc="""
List of streams that are applied if dynamic=True, allowing
for dynamic interaction with the plot.""")
element_type = param.ClassSelector(class_=(Dataset,), instantiate=False,
is_instance=False, default=Image,
doc="""
The type of the returned Elements, must be a 2D Dataset type.""")
precompute = param.Boolean(default=False, doc="""
Whether to apply precomputing operations. Precomputing can
speed up resampling operations by avoiding unnecessary
recomputation if the supplied element does not change between
calls. The cost of enabling this option is that the memory
used to represent this internal state is not freed between
calls.""")
@bothmethod
def instance(self_or_cls,**params):
filtered = {k:v for k,v in params.items() if k in self_or_cls.param}
inst = super(ResamplingOperation, self_or_cls).instance(**filtered)
inst._precomputed = {}
return inst
def _get_sampling(self, element, x, y, ndim=2, default=None):
target = self.p.target
if not isinstance(x, list) and x is not None:
x = [x]
if not isinstance(y, list) and y is not None:
y = [y]
if target:
x0, y0, x1, y1 = target.bounds.lbrt()
x_range, y_range = (x0, x1), (y0, y1)
height, width = target.dimension_values(2, flat=False).shape
else:
if x is None:
x_range = self.p.x_range or (-0.5, 0.5)
elif self.p.expand or not self.p.x_range:
x_range = self.p.x_range or max_range([element.range(xd) for xd in x])
else:
x0, x1 = self.p.x_range
ex0, ex1 = max_range([element.range(xd) for xd in x])
x_range = (np.min([np.max([x0, ex0]), ex1]),
np.max([np.min([x1, ex1]), ex0]))
if (y is None and ndim == 2):
y_range = self.p.y_range or default or (-0.5, 0.5)
elif self.p.expand or not self.p.y_range:
y_range = self.p.y_range or (max_range([element.range(yd) for yd in y])
if default is None else default)
else:
y0, y1 = self.p.y_range
if default is None:
ey0, ey1 = max_range([element.range(yd) for yd in y])
else:
ey0, ey1 = default
y_range = (np.min([np.max([y0, ey0]), ey1]),
np.max([np.min([y1, ey1]), ey0]))
width, height = self.p.width, self.p.height
(xstart, xend), (ystart, yend) = x_range, y_range
xtype = 'numeric'
if isinstance(xstart, datetime_types) or isinstance(xend, datetime_types):
xstart, xend = dt_to_int(xstart, 'ns'), dt_to_int(xend, 'ns')
xtype = 'datetime'
elif not np.isfinite(xstart) and not np.isfinite(xend):
xstart, xend = 0, 0
if x and element.get_dimension_type(x[0]) in datetime_types:
xtype = 'datetime'
ytype = 'numeric'
if isinstance(ystart, datetime_types) or isinstance(yend, datetime_types):
ystart, yend = dt_to_int(ystart, 'ns'), dt_to_int(yend, 'ns')
ytype = 'datetime'
elif not np.isfinite(ystart) and not np.isfinite(yend):
ystart, yend = 0, 0
if y and element.get_dimension_type(y[0]) in datetime_types:
ytype = 'datetime'
# Compute highest allowed sampling density
xspan = xend - xstart
yspan = yend - ystart
if self.p.x_sampling:
width = int(min([(xspan/self.p.x_sampling), width]))
if self.p.y_sampling:
height = int(min([(yspan/self.p.y_sampling), height]))
if xstart == xend or width == 0:
xunit, width = 0, 0
else:
xunit = float(xspan)/width
if ystart == yend or height == 0:
yunit, height = 0, 0
else:
yunit = float(yspan)/height
xs, ys = (np.linspace(xstart+xunit/2., xend-xunit/2., width),
np.linspace(ystart+yunit/2., yend-yunit/2., height))
return ((xstart, xend), (ystart, yend)), (xs, ys), (width, height), (xtype, ytype)
def _dt_transform(self, x_range, y_range, xs, ys, xtype, ytype):
(xstart, xend), (ystart, yend) = x_range, y_range
if xtype == 'datetime':
xstart, xend = (np.array([xstart, xend])/1e3).astype('datetime64[us]')
xs = (xs/1e3).astype('datetime64[us]')
if ytype == 'datetime':
ystart, yend = (np.array([ystart, yend])/1e3).astype('datetime64[us]')
ys = (ys/1e3).astype('datetime64[us]')
return ((xstart, xend), (ystart, yend)), (xs, ys)
class AggregationOperation(ResamplingOperation):
"""
AggregationOperation extends the ResamplingOperation defining an
aggregator parameter used to define a datashader Reduction.
"""
aggregator = param.ClassSelector(class_=(ds.reductions.Reduction, basestring),
default=ds.count(), doc="""
Datashader reduction function used for aggregating the data.
The aggregator may also define a column to aggregate; if
no column is defined the first value dimension of the element
will be used. May also be defined as a string.""")
_agg_methods = {
'any': rd.any,
'count': rd.count,
'first': rd.first,
'last': rd.last,
'mode': rd.mode,
'mean': rd.mean,
'sum': rd.sum,
'var': rd.var,
'std': rd.std,
'min': rd.min,
'max': rd.max
}
def _get_aggregator(self, element, add_field=True):
agg = self.p.aggregator
if isinstance(agg, basestring):
if agg not in self._agg_methods:
agg_methods = sorted(self._agg_methods)
raise ValueError("Aggregation method '%r' is not known; "
"aggregator must be one of: %r" %
(agg, agg_methods))
agg = self._agg_methods[agg]()
elements = element.traverse(lambda x: x, [Element])
if add_field and agg.column is None and not isinstance(agg, (rd.count, rd.any)):
if not elements:
raise ValueError('Could not find any elements to apply '
'%s operation to.' % type(self).__name__)
inner_element = elements[0]
if isinstance(inner_element, TriMesh) and inner_element.nodes.vdims:
field = inner_element.nodes.vdims[0].name
elif inner_element.vdims:
field = inner_element.vdims[0].name
elif isinstance(element, NdOverlay):
field = element.kdims[0].name
else:
raise ValueError("Could not determine dimension to apply "
"'%s' operation to. Declare the dimension "
"to aggregate as part of the datashader "
"aggregator." % type(self).__name__)
agg = type(agg)(field)
return agg
def _empty_agg(self, element, x, y, width, height, xs, ys, agg_fn, **params):
x = x.name if x else 'x'
y = y.name if x else 'y'
xarray = xr.DataArray(np.full((height, width), np.NaN),
dims=[y, x], coords={x: xs, y: ys})
if width == 0:
params['xdensity'] = 1
if height == 0:
params['ydensity'] = 1
el = self.p.element_type(xarray, **params)
if isinstance(agg_fn, ds.count_cat):
vals = element.dimension_values(agg_fn.column, expanded=False)
dim = element.get_dimension(agg_fn.column)
return NdOverlay({v: el for v in vals}, dim)
return el
def _get_agg_params(self, element, x, y, agg_fn, bounds):
params = dict(get_param_values(element), kdims=[x, y],
datatype=['xarray'], bounds=bounds)
column = agg_fn.column if agg_fn else None
if column:
dims = [d for d in element.dimensions('ranges') if d == column]
if not dims:
raise ValueError("Aggregation column '%s' not found on '%s' element. "
"Ensure the aggregator references an existing "
"dimension." % (column,element))
name = '%s Count' % column if isinstance(agg_fn, ds.count_cat) else column
vdims = [dims[0].clone(name)]
else:
vdims = Dimension('Count')
params['vdims'] = vdims
return params
class aggregate(AggregationOperation):
"""
aggregate implements 2D binning for any valid HoloViews Element
type using datashader. I.e., this operation turns a HoloViews
Element or overlay of Elements into an Image or an overlay of
Images by rasterizing it. This allows quickly aggregating large
datasets computing a fixed-sized representation independent
of the original dataset size.
By default it will simply count the number of values in each bin
but other aggregators can be supplied implementing mean, max, min
and other reduction operations.
The bins of the aggregate are defined by the width and height and
the x_range and y_range. If x_sampling or y_sampling are supplied
the operation will ensure that a bin is no smaller than the minimum
sampling distance by reducing the width and height when zoomed in
beyond the minimum sampling distance.
By default, the PlotSize stream is applied when this operation
is used dynamically, which means that the height and width
will automatically be set to match the inner dimensions of
the linked plot.
"""
@classmethod
def get_agg_data(cls, obj, category=None):
"""
Reduces any Overlay or NdOverlay of Elements into a single
xarray Dataset that can be aggregated.
"""
paths = []
if isinstance(obj, Graph):
obj = obj.edgepaths
kdims = list(obj.kdims)
vdims = list(obj.vdims)
dims = obj.dimensions()[:2]
if isinstance(obj, Path):
glyph = 'line'
for p in obj.split(datatype='dataframe'):
paths.append(p)
elif isinstance(obj, CompositeOverlay):
element = None
for key, el in obj.data.items():
x, y, element, glyph = cls.get_agg_data(el)
dims = (x, y)
df = PandasInterface.as_dframe(element)
if isinstance(obj, NdOverlay):
df = df.assign(**dict(zip(obj.dimensions('key', True), key)))
paths.append(df)
if element is None:
dims = None
else:
kdims += element.kdims
vdims = element.vdims
elif isinstance(obj, Element):
glyph = 'line' if isinstance(obj, Curve) else 'points'
paths.append(PandasInterface.as_dframe(obj))
if dims is None or len(dims) != 2:
return None, None, None, None
else:
x, y = dims
if len(paths) > 1:
if glyph == 'line':
path = paths[0][:1]
if isinstance(path, dd.DataFrame):
path = path.compute()
empty = path.copy()
empty.iloc[0, :] = (np.NaN,) * empty.shape[1]
paths = [elem for p in paths for elem in (p, empty)][:-1]
if all(isinstance(path, dd.DataFrame) for path in paths):
df = dd.concat(paths)
else:
paths = [p.compute() if isinstance(p, dd.DataFrame) else p for p in paths]
df = pd.concat(paths)
else:
df = paths[0] if paths else pd.DataFrame([], columns=[x.name, y.name])
if category and df[category].dtype.name != 'category':
df[category] = df[category].astype('category')
is_dask = isinstance(df, dd.DataFrame)
if any((not is_dask and len(df[d.name]) and isinstance(df[d.name].values[0], cftime_types)) or
df[d.name].dtype.kind == 'M' for d in (x, y)):
df = df.copy()
for d in (x, y):
vals = df[d.name]
if not is_dask and len(vals) and isinstance(vals.values[0], cftime_types):
vals = cftime_to_timestamp(vals, 'ns')
elif df[d.name].dtype.kind == 'M':
vals = vals.astype('datetime64[ns]')
else:
continue
df[d.name] = vals.astype('int64')
return x, y, Dataset(df, kdims=kdims, vdims=vdims), glyph
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
category = agg_fn.column if isinstance(agg_fn, ds.count_cat) else None
if overlay_aggregate.applies(element, agg_fn):
params = dict(
{p: v for p, v in self.get_param_values() if p != 'name'},
dynamic=False, **{p: v for p, v in self.p.items()
if p not in ('name', 'dynamic')})
return overlay_aggregate(element, **params)
if element._plot_id in self._precomputed:
x, y, data, glyph = self._precomputed[element._plot_id]
else:
x, y, data, glyph = self.get_agg_data(element, category)
if self.p.precompute:
self._precomputed[element._plot_id] = x, y, data, glyph
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = self._get_sampling(element, x, y)
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
params = self._get_agg_params(element, x, y, agg_fn, (x0, y0, x1, y1))
if x is None or y is None or width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
elif not getattr(data, 'interface', None) is DaskInterface and not len(data):
empty_val = 0 if isinstance(agg_fn, ds.count) else np.NaN
xarray = xr.DataArray(np.full((height, width), empty_val),
dims=[y.name, x.name], coords={x.name: xs, y.name: ys})
return self.p.element_type(xarray, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
dfdata = PandasInterface.as_dframe(data)
agg = getattr(cvs, glyph)(dfdata, x.name, y.name, agg_fn)
if 'x_axis' in agg.coords and 'y_axis' in agg.coords:
agg = agg.rename({'x_axis': x, 'y_axis': y})
if xtype == 'datetime':
agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]')
if ytype == 'datetime':
agg[y.name] = (agg[y.name]/1e3).astype('datetime64[us]')
if agg.ndim == 2:
# Replacing x and y coordinates to avoid numerical precision issues
eldata = agg if ds_version > '0.5.0' else (xs, ys, agg.data)
return self.p.element_type(eldata, **params)
else:
layers = {}
for c in agg.coords[agg_fn.column].data:
cagg = agg.sel(**{agg_fn.column: c})
eldata = cagg if ds_version > '0.5.0' else (xs, ys, cagg.data)
layers[c] = self.p.element_type(eldata, **params)
return NdOverlay(layers, kdims=[data.get_dimension(agg_fn.column)])
class overlay_aggregate(aggregate):
"""
Optimized aggregation for NdOverlay objects by aggregating each
Element in an NdOverlay individually avoiding having to concatenate
items in the NdOverlay. Works by summing sum and count aggregates and
applying appropriate masking for NaN values. Mean aggregation
is also supported by dividing sum and count aggregates. count_cat
aggregates are grouped by the categorical dimension and a separate
aggregate for each category is generated.
"""
@classmethod
def applies(cls, element, agg_fn):
return (isinstance(element, NdOverlay) and
((isinstance(agg_fn, (ds.count, ds.sum, ds.mean)) and
(agg_fn.column is None or agg_fn.column not in element.kdims)) or
(isinstance(agg_fn, ds.count_cat) and agg_fn.column in element.kdims)))
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
if not self.applies(element, agg_fn):
raise ValueError('overlay_aggregate only handles aggregation '
'of NdOverlay types with count, sum or mean '
'reduction.')
# Compute overall bounds
dims = element.last.dimensions()[0:2]
ndims = len(dims)
if ndims == 1:
x, y = dims[0], None
else:
x, y = dims
info = self._get_sampling(element, x, y, ndims)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), _ = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
agg_params = dict({k: v for k, v in dict(self.get_param_values(), **self.p).items()
if k in aggregate.param},
x_range=(x0, x1), y_range=(y0, y1))
bbox = (x0, y0, x1, y1)
# Optimize categorical counts by aggregating them individually
if isinstance(agg_fn, ds.count_cat):
agg_params.update(dict(dynamic=False, aggregator=ds.count()))
agg_fn1 = aggregate.instance(**agg_params)
if element.ndims == 1:
grouped = element
else:
grouped = element.groupby([agg_fn.column], container_type=NdOverlay,
group_type=NdOverlay)
groups = []
for k, v in grouped.items():
agg = agg_fn1(v)
groups.append((k, agg.clone(agg.data, bounds=bbox)))
return grouped.clone(groups)
# Create aggregate instance for sum, count operations, breaking mean
# into two aggregates
column = agg_fn.column or 'Count'
if isinstance(agg_fn, ds.mean):
agg_fn1 = aggregate.instance(**dict(agg_params, aggregator=ds.sum(column)))
agg_fn2 = aggregate.instance(**dict(agg_params, aggregator=ds.count()))
else:
agg_fn1 = aggregate.instance(**agg_params)
agg_fn2 = None
is_sum = isinstance(agg_fn1.aggregator, ds.sum)
# Accumulate into two aggregates and mask
agg, agg2, mask = None, None, None
mask = None
for v in element:
# Compute aggregates and mask
new_agg = agg_fn1.process_element(v, None)
if is_sum:
new_mask = np.isnan(new_agg.data[column].values)
new_agg.data = new_agg.data.fillna(0)
if agg_fn2:
new_agg2 = agg_fn2.process_element(v, None)
if agg is None:
agg = new_agg
if is_sum: mask = new_mask
if agg_fn2: agg2 = new_agg2
else:
agg.data += new_agg.data
if is_sum: mask &= new_mask
if agg_fn2: agg2.data += new_agg2.data
# Divide sum by count to compute mean
if agg2 is not None:
agg2.data.rename({'Count': agg_fn.column}, inplace=True)
with np.errstate(divide='ignore', invalid='ignore'):
agg.data /= agg2.data
# Fill masked with with NaNs
if is_sum:
agg.data[column].values[mask] = np.NaN
return agg.clone(bounds=bbox)
class area_aggregate(AggregationOperation):
"""
Aggregates Area elements by filling the area between zero and
the y-values if only one value dimension is defined and the area
between the curves if two are provided.
"""
def _process(self, element, key=None):
x, y = element.dimensions()[:2]
agg_fn = self._get_aggregator(element)
default = None
if not self.p.y_range:
y0, y1 = element.range(1)
if len(element.vdims) > 1:
y0, _ = element.range(2)
elif y0 >= 0:
y0 = 0
elif y1 <= 0:
y1 = 0
default = (y0, y1)
ystack = element.vdims[1].name if len(element.vdims) > 1 else None
info = self._get_sampling(element, x, y, ndim=2, default=default)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
df = PandasInterface.as_dframe(element)
if isinstance(agg_fn, (ds.count, ds.any)):
vdim = type(agg_fn).__name__
else:
vdim = element.get_dimension(agg_fn.column)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
params = dict(get_param_values(element), kdims=[x, y], vdims=vdim,
datatype=['xarray'], bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
agg = cvs.area(df, x.name, y.name, agg_fn, axis=0, y_stack=ystack)
if xtype == "datetime":
agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]')
return self.p.element_type(agg, **params)
class spread_aggregate(area_aggregate):
"""
Aggregates Spread elements by filling the area between the lower
and upper error band.
"""
def _process(self, element, key=None):
x, y = element.dimensions()[:2]
df = PandasInterface.as_dframe(element)
if df is element.data:
df = df.copy()
pos, neg = element.vdims[1:3] if len(element.vdims) > 2 else element.vdims[1:2]*2
yvals = df[y.name]
df[y.name] = yvals+df[pos.name]
df['_lower'] = yvals-df[neg.name]
area = element.clone(df, vdims=[y, '_lower']+element.vdims[3:], new_type=Area)
return super(spread_aggregate, self)._process(area, key=None)
class spikes_aggregate(AggregationOperation):
"""
Aggregates Spikes elements by drawing individual line segments
over the entire y_range if no value dimension is defined and
between zero and the y-value if one is defined.
"""
spike_length = param.Number(default=None, allow_None=True, doc="""
If numeric, specifies the length of each spike, overriding the
vdims values (if present).""")
offset = param.Number(default=0., doc="""
The offset of the lower end of each spike.""")
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
x, y = element.kdims[0], None
spike_length = 0.5 if self.p.spike_length is None else self.p.spike_length
if element.vdims and self.p.spike_length is None:
x, y = element.dimensions()[:2]
rename_dict = {'x': x.name, 'y':y.name}
if not self.p.y_range:
y0, y1 = element.range(1)
if y0 >= 0:
default = (0, y1)
elif y1 <= 0:
default = (y0, 0)
else:
default = (y0, y1)
else:
default = None
else:
x, y = element.kdims[0], None
default = (float(self.p.offset),
float(self.p.offset + spike_length))
rename_dict = {'x': x.name}
info = self._get_sampling(element, x, y, ndim=1, default=default)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
value_cols = [] if agg_fn.column is None else [agg_fn.column]
if y is None:
df = element.dframe([x]+value_cols).copy()
y = Dimension('y')
df['y0'] = float(self.p.offset)
df['y1'] = float(self.p.offset + spike_length)
yagg = ['y0', 'y1']
if not self.p.expand: height = 1
else:
df = element.dframe([x, y]+value_cols).copy()
df['y0'] = np.array(0, df.dtypes[y.name])
yagg = ['y0', y.name]
if xtype == 'datetime':
df[x.name] = df[x.name].astype('datetime64[us]').astype('int64')
if isinstance(agg_fn, (ds.count, ds.any)):
vdim = type(agg_fn).__name__
else:
vdim = element.get_dimension(agg_fn.column)
params = dict(get_param_values(element), kdims=[x, y], vdims=vdim,
datatype=['xarray'], bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
agg = cvs.line(df, x.name, yagg, agg_fn, axis=1).rename(rename_dict)
if xtype == "datetime":
agg[x.name] = (agg[x.name]/1e3).astype('datetime64[us]')
return self.p.element_type(agg, **params)
class segments_aggregate(AggregationOperation):
"""
Aggregates Segments elements.
"""
def _process(self, element, key=None):
agg_fn = self._get_aggregator(element)
x0d, y0d, x1d, y1d = element.kdims
info = self._get_sampling(element, [x0d, x1d], [y0d, y1d], ndim=1)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
df = element.interface.as_dframe(element)
if xtype == 'datetime':
df[x0d.name] = df[x0d.name].astype('datetime64[us]').astype('int64')
df[x1d.name] = df[x1d.name].astype('datetime64[us]').astype('int64')
if ytype == 'datetime':
df[y0d.name] = df[y0d.name].astype('datetime64[us]').astype('int64')
df[y1d.name] = df[y1d.name].astype('datetime64[us]').astype('int64')
if isinstance(agg_fn, (ds.count, ds.any)):
vdim = type(agg_fn).__name__
else:
vdim = element.get_dimension(agg_fn.column)
params = dict(get_param_values(element), kdims=[x0d, y0d], vdims=vdim,
datatype=['xarray'], bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x0d, y0d, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
agg = cvs.line(df, [x0d.name, x1d.name], [y0d.name, y1d.name], agg_fn, axis=1)
xdim, ydim = list(agg.dims)[:2][::-1]
if xtype == "datetime":
agg[xdim] = (agg[xdim]/1e3).astype('datetime64[us]')
if ytype == "datetime":
agg[ydim] = (agg[ydim]/1e3).astype('datetime64[us]')
params['kdims'] = [xdim, ydim]
return self.p.element_type(agg, **params)
class regrid(AggregationOperation):
"""
regrid allows resampling a HoloViews Image type using specified
up- and downsampling functions defined using the aggregator and
interpolation parameters respectively. By default upsampling is
disabled to avoid unnecessarily upscaling an image that has to be
sent to the browser. Also disables expanding the image beyond its
original bounds avoiding unnecessarily padding the output array
with NaN values.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
expand = param.Boolean(default=False, doc="""
Whether the x_range and y_range should be allowed to expand
beyond the extent of the data. Setting this value to True is
useful for the case where you want to ensure a certain size of
output grid, e.g. if you are doing masking or other arithmetic
on the grids. A value of False ensures that the grid is only
just as large as it needs to be to contain the data, which will
be faster and use less memory if the resulting aggregate is
being overlaid on a much larger background.""")
interpolation = param.ObjectSelector(default='nearest',
objects=['linear', 'nearest', 'bilinear', None, False], doc="""
Interpolation method""")
upsample = param.Boolean(default=False, doc="""
Whether to allow upsampling if the source array is smaller
than the requested array. Setting this value to True will
enable upsampling using the interpolation method, when the
requested width and height are larger than what is available
on the source grid. If upsampling is disabled (the default)
the width and height are clipped to what is available on the
source array.""")
def _get_xarrays(self, element, coords, xtype, ytype):
x, y = element.kdims
dims = [y.name, x.name]
irregular = any(element.interface.irregular(element, d)
for d in dims)
if irregular:
coord_dict = {x.name: (('y', 'x'), coords[0]),
y.name: (('y', 'x'), coords[1])}
else:
coord_dict = {x.name: coords[0], y.name: coords[1]}
arrays = {}
for vd in element.vdims:
if element.interface is XArrayInterface:
xarr = element.data[vd.name]
if 'datetime' in (xtype, ytype):
xarr = xarr.copy()
if dims != xarr.dims and not irregular:
xarr = xarr.transpose(*dims)
elif irregular:
arr = element.dimension_values(vd, flat=False)
xarr = xr.DataArray(arr, coords=coord_dict, dims=['y', 'x'])
else:
arr = element.dimension_values(vd, flat=False)
xarr = xr.DataArray(arr, coords=coord_dict, dims=dims)
if xtype == "datetime":
xarr[x.name] = [dt_to_int(v, 'ns') for v in xarr[x.name].values]
if ytype == "datetime":
xarr[y.name] = [dt_to_int(v, 'ns') for v in xarr[y.name].values]
arrays[vd.name] = xarr
return arrays
def _process(self, element, key=None):
if ds_version <= '0.5.0':
raise RuntimeError('regrid operation requires datashader>=0.6.0')
# Compute coords, anges and size
x, y = element.kdims
coords = tuple(element.dimension_values(d, expanded=False) for d in [x, y])
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
# Disable upsampling by clipping size and ranges
(xstart, xend), (ystart, yend) = (x_range, y_range)
xspan, yspan = (xend-xstart), (yend-ystart)
interp = self.p.interpolation or None
if interp == 'bilinear': interp = 'linear'
if not (self.p.upsample or interp is None) and self.p.target is None:
(x0, x1), (y0, y1) = element.range(0), element.range(1)
if isinstance(x0, datetime_types):
x0, x1 = dt_to_int(x0, 'ns'), dt_to_int(x1, 'ns')
if isinstance(y0, datetime_types):
y0, y1 = dt_to_int(y0, 'ns'), dt_to_int(y1, 'ns')
exspan, eyspan = (x1-x0), (y1-y0)
if np.isfinite(exspan) and exspan > 0 and xspan > 0:
width = max([min([int((xspan/exspan) * len(coords[0])), width]), 1])
else:
width = 0
if np.isfinite(eyspan) and eyspan > 0 and yspan > 0:
height = max([min([int((yspan/eyspan) * len(coords[1])), height]), 1])
else:
height = 0
xunit = float(xspan)/width if width else 0
yunit = float(yspan)/height if height else 0
xs, ys = (np.linspace(xstart+xunit/2., xend-xunit/2., width),
np.linspace(ystart+yunit/2., yend-yunit/2., height))
# Compute bounds (converting datetimes)
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(x_range, y_range, xs, ys, xtype, ytype)
params = dict(bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
if width == 0:
params['xdensity'] = 1
if height == 0:
params['ydensity'] = 1
return element.clone((xs, ys, np.zeros((height, width))), **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
# Apply regridding to each value dimension
regridded = {}
arrays = self._get_xarrays(element, coords, xtype, ytype)
agg_fn = self._get_aggregator(element, add_field=False)
for vd, xarr in arrays.items():
rarray = cvs.raster(xarr, upsample_method=interp,
downsample_method=agg_fn)
# Convert datetime coordinates
if xtype == "datetime":
rarray[x.name] = (rarray[x.name]/1e3).astype('datetime64[us]')
if ytype == "datetime":
rarray[y.name] = (rarray[y.name]/1e3).astype('datetime64[us]')
regridded[vd] = rarray
regridded = xr.Dataset(regridded)
return element.clone(regridded, datatype=['xarray']+element.datatype, **params)
class contours_rasterize(aggregate):
"""
Rasterizes the Contours element by weighting the aggregation by
the iso-contour levels if a value dimension is defined, otherwise
default to any aggregator.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
def _get_aggregator(self, element, add_field=True):
agg = self.p.aggregator
if not element.vdims and agg.column is None and not isinstance(agg, (rd.count, rd.any)):
return ds.any()
return super(contours_rasterize, self)._get_aggregator(element, add_field)
class trimesh_rasterize(aggregate):
"""
Rasterize the TriMesh element using the supplied aggregator. If
the TriMesh nodes or edges define a value dimension, will plot
filled and shaded polygons; otherwise returns a wiremesh of the
data.
"""
aggregator = param.ClassSelector(default=ds.mean(),
class_=(ds.reductions.Reduction, basestring))
interpolation = param.ObjectSelector(default='bilinear',
objects=['bilinear', 'linear', None, False], doc="""
The interpolation method to apply during rasterization.""")
def _precompute(self, element, agg):
from datashader.utils import mesh
if element.vdims and getattr(agg, 'column', None) not in element.nodes.vdims:
simplices = element.dframe([0, 1, 2, 3])
verts = element.nodes.dframe([0, 1])
elif element.nodes.vdims:
simplices = element.dframe([0, 1, 2])
verts = element.nodes.dframe([0, 1, 3])
for c, dtype in zip(simplices.columns[:3], simplices.dtypes):
if dtype.kind != 'i':
simplices[c] = simplices[c].astype('int')
return {'mesh': mesh(verts, simplices), 'simplices': simplices,
'vertices': verts}
def _precompute_wireframe(self, element, agg):
if hasattr(element, '_wireframe'):
segments = element._wireframe.data
else:
simplexes = element.array([0, 1, 2, 0]).astype('int')
verts = element.nodes.array([0, 1])
segments = pd.DataFrame(verts[simplexes].reshape(len(simplexes), -1),
columns=['x0', 'y0', 'x1', 'y1', 'x2', 'y2', 'x3', 'y3'])
element._wireframe = Dataset(segments, datatype=['dataframe', 'dask'])
return {'segments': segments}
def _process(self, element, key=None):
if isinstance(element, TriMesh):
x, y = element.nodes.kdims[:2]
else:
x, y = element.kdims
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
agg = self.p.aggregator
interp = self.p.interpolation or None
precompute = self.p.precompute
if interp == 'linear': interp = 'bilinear'
wireframe = False
if (not (element.vdims or (isinstance(element, TriMesh) and element.nodes.vdims))) and ds_version <= '0.6.9':
self.p.aggregator = ds.any() if isinstance(agg, ds.any) or agg == 'any' else ds.count()
return aggregate._process(self, element, key)
elif ((not interp and (isinstance(agg, (ds.any, ds.count)) or
agg in ['any', 'count']))
or not (element.vdims or element.nodes.vdims)):
wireframe = True
precompute = False # TriMesh itself caches wireframe
agg = self._get_aggregator(element) if isinstance(agg, (ds.any, ds.count)) else ds.any()
vdim = 'Count' if isinstance(agg, ds.count) else 'Any'
elif getattr(agg, 'column', None):
if agg.column in element.vdims:
vdim = element.get_dimension(agg.column)
elif isinstance(element, TriMesh) and agg.column in element.nodes.vdims:
vdim = element.nodes.get_dimension(agg.column)
else:
raise ValueError("Aggregation column %s not found on TriMesh element."
% agg.column)
else:
if isinstance(element, TriMesh) and element.nodes.vdims:
vdim = element.nodes.vdims[0]
else:
vdim = element.vdims[0]
agg = self._get_aggregator(element)
if element._plot_id in self._precomputed:
precomputed = self._precomputed[element._plot_id]
elif wireframe:
precomputed = self._precompute_wireframe(element, agg)
else:
precomputed = self._precompute(element, agg)
params = dict(get_param_values(element), kdims=[x, y],
datatype=['xarray'], vdims=[vdim])
if width == 0 or height == 0:
if width == 0: params['xdensity'] = 1
if height == 0: params['ydensity'] = 1
bounds = (x_range[0], y_range[0], x_range[1], y_range[1])
return Image((xs, ys, np.zeros((height, width))), bounds=bounds, **params)
if wireframe:
segments = precomputed['segments']
else:
simplices = precomputed['simplices']
pts = precomputed['vertices']
mesh = precomputed['mesh']
if precompute:
self._precomputed = {element._plot_id: precomputed}
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
if wireframe:
agg = cvs.line(segments, x=['x0', 'x1', 'x2', 'x3'],
y=['y0', 'y1', 'y2', 'y3'], axis=1,
agg=agg)
else:
interpolate = bool(self.p.interpolation)
agg = cvs.trimesh(pts, simplices, agg=agg,
interp=interpolate, mesh=mesh)
return Image(agg, **params)
class quadmesh_rasterize(trimesh_rasterize):
"""
Rasterize the QuadMesh element using the supplied aggregator.
Simply converts to a TriMesh and lets trimesh_rasterize
handle the actual rasterization.
"""
def _precompute(self, element, agg):
if ds_version <= '0.7.0':
return super(quadmesh_rasterize, self)._precompute(element.trimesh(), agg)
def _process(self, element, key=None):
if ds_version <= '0.7.0':
return super(quadmesh_rasterize, self)._process(element, key)
if element.interface.datatype != 'xarray':
element = element.clone(datatype=['xarray'])
data = element.data
x, y = element.kdims
agg_fn = self._get_aggregator(element)
info = self._get_sampling(element, x, y)
(x_range, y_range), (xs, ys), (width, height), (xtype, ytype) = info
if xtype == 'datetime':
data[x.name] = data[x.name].astype('datetime64[us]').astype('int64')
if ytype == 'datetime':
data[y.name] = data[y.name].astype('datetime64[us]').astype('int64')
# Compute bounds (converting datetimes)
((x0, x1), (y0, y1)), (xs, ys) = self._dt_transform(
x_range, y_range, xs, ys, xtype, ytype
)
params = dict(get_param_values(element), datatype=['xarray'],
bounds=(x0, y0, x1, y1))
if width == 0 or height == 0:
return self._empty_agg(element, x, y, width, height, xs, ys, agg_fn, **params)
cvs = ds.Canvas(plot_width=width, plot_height=height,
x_range=x_range, y_range=y_range)
vdim = getattr(agg_fn, 'column', element.vdims[0].name)
agg = cvs.quadmesh(data[vdim], x.name, y.name, agg_fn)
xdim, ydim = list(agg.dims)[:2][::-1]
if xtype == "datetime":
agg[xdim] = (agg[xdim]/1e3).astype('datetime64[us]')
if ytype == "datetime":
agg[ydim] = (agg[ydim]/1e3).astype('datetime64[us]')
return Image(agg, **params)
class shade(LinkableOperation):
"""
shade applies a normalization function followed by colormapping to
an Image or NdOverlay of Images, returning an RGB Element.
The data must be in the form of a 2D or 3D DataArray, but NdOverlays
of 2D Images will be automatically converted to a 3D array.
In the 2D case data is normalized and colormapped, while a 3D
array representing categorical aggregates will be supplied a color
key for each category. The colormap (cmap) for the 2D case may be
supplied as an Iterable or a Callable.
"""
alpha = param.Integer(default=255, bounds=(0, 255), doc="""
Value between 0 - 255 representing the alpha value to use for
colormapped pixels that contain data (i.e. non-NaN values).
Regardless of this value, ``NaN`` values are set to be fully
transparent when doing colormapping.""")
cmap = param.ClassSelector(class_=(Iterable, Callable, dict), doc="""
Iterable or callable which returns colors as hex colors
or web color names (as defined by datashader), to be used
for the colormap of single-layer datashader output.
Callable type must allow mapping colors between 0 and 1.
The default value of None reverts to Datashader's default
colormap.""")
color_key = param.ClassSelector(class_=(Iterable, Callable, dict), doc="""
Iterable or callable that returns colors as hex colors, to
be used for the color key of categorical datashader output.
Callable type must allow mapping colors for supplied values
between 0 and 1.""")
normalization = param.ClassSelector(default='eq_hist',
class_=(basestring, Callable),
doc="""
The normalization operation applied before colormapping.
Valid options include 'linear', 'log', 'eq_hist', 'cbrt',
and any valid transfer function that accepts data, mask, nbins
arguments.""")
clims = param.NumericTuple(default=None, length=2, doc="""
Min and max data values to use for colormap interpolation, when
wishing to override autoranging.
""")
min_alpha = param.Number(default=40, bounds=(0, 255), doc="""
The minimum alpha value to use for non-empty pixels when doing
colormapping, in [0, 255]. Use a higher value to avoid
undersaturation, i.e. poorly visible low-value datapoints, at
the expense of the overall dynamic range..""")
@classmethod
def concatenate(cls, overlay):
"""
Concatenates an NdOverlay of Image types into a single 3D
xarray Dataset.
"""
if not isinstance(overlay, NdOverlay):
raise ValueError('Only NdOverlays can be concatenated')
xarr = xr.concat([v.data.transpose() for v in overlay.values()],
pd.Index(overlay.keys(), name=overlay.kdims[0].name))
params = dict(get_param_values(overlay.last),
vdims=overlay.last.vdims,
kdims=overlay.kdims+overlay.last.kdims)
return Dataset(xarr.transpose(), datatype=['xarray'], **params)
@classmethod
def uint32_to_uint8(cls, img):
"""
Cast uint32 RGB image to 4 uint8 channels.
"""
return np.flipud(img.view(dtype=np.uint8).reshape(img.shape + (4,)))
@classmethod
def uint32_to_uint8_xr(cls, img):
"""
Cast uint32 xarray DataArray to 4 uint8 channels.
"""
new_array = img.values.view(dtype=np.uint8).reshape(img.shape + (4,))
coords = OrderedDict(list(img.coords.items())+[('band', [0, 1, 2, 3])])
return xr.DataArray(new_array, coords=coords, dims=img.dims+('band',))
@classmethod
def rgb2hex(cls, rgb):
"""
Convert RGB(A) tuple to hex.
"""
if len(rgb) > 3:
rgb = rgb[:-1]
return "#{0:02x}{1:02x}{2:02x}".format(*(int(v*255) for v in rgb))
@classmethod
def to_xarray(cls, element):
if issubclass(element.interface, XArrayInterface):
return element
data = tuple(element.dimension_values(kd, expanded=False)
for kd in element.kdims)
data += tuple(element.dimension_values(vd, flat=False)
for vd in element.vdims)
dtypes = [dt for dt in element.datatype if dt != 'xarray']
return element.clone(data, datatype=['xarray']+dtypes,
bounds=element.bounds,
xdensity=element.xdensity,
ydensity=element.ydensity)
def _process(self, element, key=None):
element = element.map(self.to_xarray, Image)
if isinstance(element, NdOverlay):
bounds = element.last.bounds
xdensity = element.last.xdensity
ydensity = element.last.ydensity
element = self.concatenate(element)
elif isinstance(element, Overlay):
return element.map(self._process, [Element])
else:
xdensity = element.xdensity
ydensity = element.ydensity
bounds = element.bounds
vdim = element.vdims[0].name
array = element.data[vdim]
kdims = element.kdims
# Compute shading options depending on whether
# it is a categorical or regular aggregate
shade_opts = dict(how=self.p.normalization,
min_alpha=self.p.min_alpha,
alpha=self.p.alpha)
if element.ndims > 2:
kdims = element.kdims[1:]
categories = array.shape[-1]
if not self.p.color_key:
pass
elif isinstance(self.p.color_key, dict):
shade_opts['color_key'] = self.p.color_key
elif isinstance(self.p.color_key, Iterable):
shade_opts['color_key'] = [c for i, c in
zip(range(categories), self.p.color_key)]
else:
colors = [self.p.color_key(s) for s in np.linspace(0, 1, categories)]
shade_opts['color_key'] = map(self.rgb2hex, colors)
elif not self.p.cmap:
pass
elif isinstance(self.p.cmap, Callable):
colors = [self.p.cmap(s) for s in np.linspace(0, 1, 256)]
shade_opts['cmap'] = map(self.rgb2hex, colors)
else:
shade_opts['cmap'] = self.p.cmap
if self.p.clims:
shade_opts['span'] = self.p.clims
elif ds_version > '0.5.0' and self.p.normalization != 'eq_hist':
shade_opts['span'] = element.range(vdim)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'invalid value encountered in true_divide')
if np.isnan(array.data).all():
arr = np.zeros(array.data.shape, dtype=np.uint32)
img = array.copy()
img.data = arr
else:
img = tf.shade(array, **shade_opts)
params = dict(get_param_values(element), kdims=kdims,
bounds=bounds, vdims=RGB.vdims[:],
xdensity=xdensity, ydensity=ydensity)
return RGB(self.uint32_to_uint8_xr(img), **params)
class rasterize(AggregationOperation):
"""
Rasterize is a high-level operation that will rasterize any
Element or combination of Elements, aggregating them with the supplied
aggregator and interpolation method.
The default aggregation method depends on the type of Element but
usually defaults to the count of samples in each bin. Other
aggregators can be supplied implementing mean, max, min and other
reduction operations.
The bins of the aggregate are defined by the width and height and
the x_range and y_range. If x_sampling or y_sampling are supplied
the operation will ensure that a bin is no smaller than the minimum
sampling distance by reducing the width and height when zoomed in
beyond the minimum sampling distance.
By default, the PlotSize and RangeXY streams are applied when this
operation is used dynamically, which means that the width, height,
x_range and y_range will automatically be set to match the inner
dimensions of the linked plot and the ranges of the axes.
"""
aggregator = param.ClassSelector(class_=(ds.reductions.Reduction, basestring),
default=None)
interpolation = param.ObjectSelector(
default='bilinear', objects=['linear', 'nearest', 'bilinear', None, False], doc="""
The interpolation method to apply during rasterization.
Defaults to linear interpolation and None and False are aliases
of each other.""")
_transforms = [(Image, regrid),
(TriMesh, trimesh_rasterize),
(QuadMesh, quadmesh_rasterize),
(lambda x: (isinstance(x, NdOverlay) and
issubclass(x.type, (Scatter, Points, Curve, Path))),
aggregate),
(Spikes, spikes_aggregate),
(Area, area_aggregate),
(Spread, spread_aggregate),
(Segments, segments_aggregate),
(Contours, contours_rasterize),
(Graph, aggregate),
(Scatter, aggregate),
(Points, aggregate),
(Curve, aggregate),
(Path, aggregate),
(type(None), shade) # To handles parameters of datashade
]
def _process(self, element, key=None):
# Potentially needs traverse to find element types first?
all_allowed_kws = set()
all_supplied_kws = set()
for predicate, transform in self._transforms:
op_params = dict({k: v for k, v in self.p.items()
if not (v is None and k == 'aggregator')},
dynamic=False)
extended_kws = dict(op_params, **self.p.extra_keywords())
all_supplied_kws |= set(extended_kws)
all_allowed_kws |= set(transform.param)
# Collect union set of consumed. Versus union of available.
op = transform.instance(**{k:v for k,v in extended_kws.items()
if k in transform.param})
op._precomputed = self._precomputed
element = element.map(op, predicate)
self._precomputed = op._precomputed
unused_params = list(all_supplied_kws - all_allowed_kws)
if unused_params:
self.warning('Parameters %s not consumed by any element rasterizer.'
% ', '.join(unused_params))
return element
class datashade(rasterize, shade):
"""
Applies the aggregate and shade operations, aggregating all
elements in the supplied object and then applying normalization
and colormapping the aggregated data returning RGB elements.
See aggregate and shade operations for more details.
"""
def _process(self, element, key=None):
agg = rasterize._process(self, element, key)
shaded = shade._process(self, agg, key)
return shaded
class stack(Operation):
"""
The stack operation allows compositing multiple RGB Elements using
the defined compositing operator.
"""
compositor = param.ObjectSelector(objects=['add', 'over', 'saturate', 'source'],
default='over', doc="""
Defines how the compositing operation combines the images""")
def uint8_to_uint32(self, element):
img = np.dstack([element.dimension_values(d, flat=False)
for d in element.vdims])
if img.shape[2] == 3: # alpha channel not included
alpha = np.ones(img.shape[:2])
if img.dtype.name == 'uint8':
alpha = (alpha*255).astype('uint8')
img = np.dstack([img, alpha])
if img.dtype.name != 'uint8':
img = (img*255).astype(np.uint8)
N, M, _ = img.shape
return img.view(dtype=np.uint32).reshape((N, M))
def _process(self, overlay, key=None):
if not isinstance(overlay, CompositeOverlay):
return overlay
elif len(overlay) == 1:
return overlay.last if isinstance(overlay, NdOverlay) else overlay.get(0)
imgs = []
for rgb in overlay:
if not isinstance(rgb, RGB):
raise TypeError("The stack operation expects elements of type RGB, "
"not '%s'." % type(rgb).__name__)
rgb = rgb.rgb
dims = [kd.name for kd in rgb.kdims][::-1]
coords = {kd.name: rgb.dimension_values(kd, False)
for kd in rgb.kdims}
imgs.append(tf.Image(self.uint8_to_uint32(rgb), coords=coords, dims=dims))
try:
imgs = xr.align(*imgs, join='exact')
except ValueError:
raise ValueError('RGB inputs to the stack operation could not be aligned; '
'ensure they share the same grid sampling.')
stacked = tf.stack(*imgs, how=self.p.compositor)
arr = shade.uint32_to_uint8(stacked.data)[::-1]
data = (coords[dims[1]], coords[dims[0]], arr[:, :, 0],
arr[:, :, 1], arr[:, :, 2])
if arr.shape[-1] == 4:
data = data + (arr[:, :, 3],)
return rgb.clone(data, datatype=[rgb.interface.datatype]+rgb.datatype)
class SpreadingOperation(LinkableOperation):
"""
Spreading expands each pixel in an Image based Element a certain
number of pixels on all sides according to a given shape, merging
pixels using a specified compositing operator. This can be useful
to make sparse plots more visible.
"""
how = param.ObjectSelector(default='source',
objects=['source', 'over', 'saturate', 'add'], doc="""
The name of the compositing operator to use when combining
pixels.""")
shape = param.ObjectSelector(default='circle', objects=['circle', 'square'],
doc="""
The shape to spread by. Options are 'circle' [default] or 'square'.""")
@classmethod
def uint8_to_uint32(cls, img):
shape = img.shape
flat_shape = np.multiply.reduce(shape[:2])
rgb = img.reshape((flat_shape, 4)).view('uint32').reshape(shape[:2])
return rgb
def _apply_spreading(self, array):
"""Apply the spread function using the indicated parameters."""
raise NotImplementedError
def _process(self, element, key=None):
if not isinstance(element, RGB):
raise ValueError('spreading can only be applied to RGB Elements.')
rgb = element.rgb
new_data = {kd.name: rgb.dimension_values(kd, expanded=False)
for kd in rgb.kdims}
rgbarray = np.dstack([element.dimension_values(vd, flat=False)
for vd in element.vdims])
data = self.uint8_to_uint32(rgbarray)
array = self._apply_spreading(data)
img = datashade.uint32_to_uint8(array)
for i, vd in enumerate(element.vdims):
if i < img.shape[-1]:
new_data[vd.name] = np.flipud(img[..., i])
return element.clone(new_data)
class spread(SpreadingOperation):
"""
Spreading expands each pixel in an Image based Element a certain
number of pixels on all sides according to a given shape, merging
pixels using a specified compositing operator. This can be useful
to make sparse plots more visible.
See the datashader documentation for more detail:
http://datashader.org/api.html#datashader.transfer_functions.spread
"""
px = param.Integer(default=1, doc="""
Number of pixels to spread on all sides.""")
def _apply_spreading(self, array):
img = tf.Image(array)
return tf.spread(img, px=self.p.px,
how=self.p.how, shape=self.p.shape).data
class dynspread(SpreadingOperation):
"""
Spreading expands each pixel in an Image based Element a certain
number of pixels on all sides according to a given shape, merging
pixels using a specified compositing operator. This can be useful
to make sparse plots more visible. Dynamic spreading determines
how many pixels to spread based on a density heuristic.
See the datashader documentation for more detail:
http://datashader.org/api.html#datashader.transfer_functions.dynspread
"""
max_px = param.Integer(default=3, doc="""
Maximum number of pixels to spread on all sides.""")
threshold = param.Number(default=0.5, bounds=(0,1), doc="""
When spreading, determines how far to spread.
Spreading starts at 1 pixel, and stops when the fraction
of adjacent non-empty pixels reaches this threshold.
Higher values give more spreading, up to the max_px
allowed.""")
def _apply_spreading(self, array):
img = tf.Image(array)
return tf.dynspread(img, max_px=self.p.max_px,
threshold=self.p.threshold,
how=self.p.how, shape=self.p.shape).data
def split_dataframe(path_df):
"""
Splits a dataframe of paths separated by NaNs into individual
dataframes.
"""
splits = np.where(path_df.iloc[:, 0].isnull())[0]+1
return [df for df in np.split(path_df, splits) if len(df) > 1]
class _connect_edges(Operation):
split = param.Boolean(default=False, doc="""
Determines whether bundled edges will be split into individual edges
or concatenated with NaN separators.""")
def _bundle(self, position_df, edges_df):
raise NotImplementedError('_connect_edges is an abstract baseclass '
'and does not implement any actual bundling.')
def _process(self, element, key=None):
index = element.nodes.kdims[2].name
rename_edges = {d.name: v for d, v in zip(element.kdims[:2], ['source', 'target'])}
rename_nodes = {d.name: v for d, v in zip(element.nodes.kdims[:2], ['x', 'y'])}
position_df = element.nodes.redim(**rename_nodes).dframe([0, 1, 2]).set_index(index)
edges_df = element.redim(**rename_edges).dframe([0, 1])
paths = self._bundle(position_df, edges_df)
paths = paths.rename(columns={v: k for k, v in rename_nodes.items()})
paths = split_dataframe(paths) if self.p.split else [paths]
return element.clone((element.data, element.nodes, paths))
class bundle_graph(_connect_edges, hammer_bundle):
"""
Iteratively group edges and return as paths suitable for datashading.
Breaks each edge into a path with multiple line segments, and
iteratively curves this path to bundle edges into groups.
"""
def _bundle(self, position_df, edges_df):
from datashader.bundling import hammer_bundle
return hammer_bundle.__call__(self, position_df, edges_df, **self.p)
class directly_connect_edges(_connect_edges, connect_edges):
"""
Given a Graph object will directly connect all nodes.
"""
def _bundle(self, position_df, edges_df):
return connect_edges.__call__(self, position_df, edges_df)
| 41.781994 | 117 | 0.587801 |
565a9635535ed7a4f195e5a435dd8b1eae076cc3 | 2,146 | py | Python | rdkit/Dbase/DbModule.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 1,609 | 2015-01-05T02:41:13.000Z | 2022-03-30T21:57:24.000Z | rdkit/Dbase/DbModule.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 3,412 | 2015-01-06T12:13:33.000Z | 2022-03-31T17:25:41.000Z | rdkit/Dbase/DbModule.py | bp-kelley/rdkit | e0de7c9622ce73894b1e7d9568532f6d5638058a | [
"BSD-3-Clause"
] | 811 | 2015-01-11T03:33:48.000Z | 2022-03-28T11:57:49.000Z | # $Id$
#
# Copyright (C) 2003-2006 Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit import RDConfig
if hasattr(RDConfig, "usePgSQL") and RDConfig.usePgSQL:
from pyPgSQL import PgSQL
# as of this writing (March 2004), this results in a speedup in
# getting results back from the wrapper:
PgSQL.fetchReturnsList = 1
from pyPgSQL.PgSQL import *
sqlTextTypes = [PG_CHAR, PG_BPCHAR, PG_TEXT, PG_VARCHAR, PG_NAME]
sqlIntTypes = [PG_INT8, PG_INT2, PG_INT4]
sqlFloatTypes = [PG_FLOAT4, PG_FLOAT8]
sqlBinTypes = [PG_OID, PG_BLOB, PG_BYTEA]
getTablesSql = """select tablename from pg_tables where schemaname='public'"""
getTablesAndViewsSql = """SELECT c.relname as "Name"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_user u ON u.usesysid = c.relowner
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','v','S','')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
"""
getDbSql = """ select datname from pg_database where datallowconn """
fileWildcard = None
placeHolder = '%s'
binaryTypeName = "bytea"
binaryHolder = PgBytea
RDTestDatabase = "::RDTests"
elif hasattr(RDConfig, "useSqlLite") and RDConfig.useSqlLite:
try:
import sqlite3 as sqlite
except ImportError:
from pysqlite2 import dbapi2 as sqlite
sqlTextTypes = []
sqlIntTypes = []
sqlFloatTypes = []
sqlBinTypes = []
getTablesSql = """select name from SQLite_Master where type='table'"""
getTablesAndViewsSql = """select name from SQLite_Master where type in ('table','view')"""
getDbSql = None
dbFileWildcard = '*.sqlt'
fileWildcard = dbFileWildcard
placeHolder = '?'
binaryTypeName = "blob"
binaryHolder = memoryview
def connect(x, *args):
return sqlite.connect(x)
else:
raise ImportError("Neither sqlite nor PgSQL support found.")
| 34.612903 | 94 | 0.693383 |
e6bf4ccdc8ca755e4e309e102d25e5c3dd7f72f4 | 1,069 | py | Python | backend/src/migrations/__init__.py | wooshifu/Blog | a8ad6a7e1e1bb312b64f54414e5d945b73df5e1d | [
"MIT"
] | null | null | null | backend/src/migrations/__init__.py | wooshifu/Blog | a8ad6a7e1e1bb312b64f54414e5d945b73df5e1d | [
"MIT"
] | 3 | 2021-05-07T11:38:25.000Z | 2022-02-26T10:26:47.000Z | backend/src/migrations/__init__.py | wooshifu/Blog | a8ad6a7e1e1bb312b64f54414e5d945b73df5e1d | [
"MIT"
] | null | null | null | """
this directory is used for upgrade database schemas, if you changed the database DDL,
you should add a file named 'mxxxxxxxx.py' to define the upgrading process.
"""
import re
from pathlib import Path
from models.migration import Migration as MigrationModel
from .migration import Migration
__current_dir = Path(__file__).parent
__files = [f.stem for f in __current_dir.iterdir()]
_migration_file_pattern = re.compile(r'm\d{8}')
_migrations = sorted(filter(lambda file_name: re.match(_migration_file_pattern, file_name), __files))
_latest_migration = MigrationModel.query.order_by(MigrationModel.id.desc()).first()
if _latest_migration is None:
for migration in _migrations:
exec(f'from . import {migration}')
exec(f'{migration}.{migration.upper()}.{Migration.run_upgrade.__name__}("{migration}")')
else:
for migration in _migrations:
if migration > _latest_migration.migration:
exec(f'from . import {migration}')
exec(f'{migration}.{migration.upper()}.{Migration.run_upgrade.__name__}("{migration}")')
| 36.862069 | 101 | 0.738073 |
99baad001f9527c324aad5d74a710fcf9f75769a | 221 | py | Python | sqlite_dissect/file/__init__.py | Defense-Cyber-Crime-Center/sqlite-dissect | e1a6c19928bc092bf7aeaff71072634f77a452ea | [
"MIT"
] | 12 | 2021-10-21T21:23:51.000Z | 2022-03-13T03:01:53.000Z | sqlite_dissect/file/__init__.py | hartescout/sqlite-dissect | c4cd1047837a8bc6bbfa69f448149d046a7cc703 | [
"MIT"
] | 21 | 2021-09-13T17:00:33.000Z | 2022-03-31T12:56:56.000Z | sqlite_dissect/file/__init__.py | hartescout/sqlite-dissect | c4cd1047837a8bc6bbfa69f448149d046a7cc703 | [
"MIT"
] | 1 | 2021-10-21T22:00:07.000Z | 2021-10-21T22:00:07.000Z |
"""
__init__.py
This init script will initialize any needed logic for this package.
This package will control parsing and access to all (supported) sqlite files including the
database, rollback journal, and wal.
"""
| 18.416667 | 90 | 0.769231 |
f07d5d42b4b42b1b3eeaeb2f852b60ac52c6fadb | 688 | py | Python | RoboticsLanguage/Inputs/FaultToleranceSystem/Tests/test_FaultToleranceSystem.py | robotcaresystems/roboticslanguage | 3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed | [
"Apache-2.0"
] | 64 | 2018-05-15T14:36:44.000Z | 2022-03-09T05:00:31.000Z | RoboticsLanguage/Inputs/FaultToleranceSystem/Tests/test_FaultToleranceSystem.py | robotcaresystems/roboticslanguage | 3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed | [
"Apache-2.0"
] | 9 | 2018-04-17T21:12:27.000Z | 2019-11-08T20:53:32.000Z | RoboticsLanguage/Inputs/FaultToleranceSystem/Tests/test_FaultToleranceSystem.py | robotcaresystems/roboticslanguage | 3bb7a2bf64ab8e9068889713fbeb18a45cd5a3ed | [
"Apache-2.0"
] | 10 | 2018-03-27T12:09:12.000Z | 2021-02-16T08:07:26.000Z | #
# This is the Robotics Language compiler
#
# test_fault_tolerance_system.py: Unit testing file
#
# Created on: 08 March, 2019
# Author: Gabriel Lopes
# Licence: license
# Copyright: copyright
#
import unittest
from RoboticsLanguage.Inputs.FaultToleranceSystem import Parse
class TestFaultToleranceSystem(unittest.TestCase):
# -------------------------------------------------------------------------------------------------
# Fault Tolerance system tests
# -------------------------------------------------------------------------------------------------
def test_template(self):
self.assertEqual(1,1)
if __name__ == '__main__':
unittest.main()
| 25.481481 | 101 | 0.515988 |
7b79d1c27d728ef74a847c49e1332eeb7d316505 | 804 | py | Python | examples/settings.py | seankmartin/brainrender | c845fc33b9936aa193feb1d750464114d3bdcdb7 | [
"BSD-3-Clause"
] | null | null | null | examples/settings.py | seankmartin/brainrender | c845fc33b9936aa193feb1d750464114d3bdcdb7 | [
"BSD-3-Clause"
] | null | null | null | examples/settings.py | seankmartin/brainrender | c845fc33b9936aa193feb1d750464114d3bdcdb7 | [
"BSD-3-Clause"
] | null | null | null | """
Brainrender provides several default settins (e.g. for shader style)
which can be changed to personalize your rendering.
This example shows you how
"""
import brainrender
from brainrender import Scene
from rich import print
from myterial import orange
from pathlib import Path
print(f"[{orange}]Running example: {Path(__file__).name}")
brainrender.settings.BACKGROUND_COLOR = [
0.22,
0.22,
0.22,
] # change rendering background color
brainrender.settings.WHOLE_SCREEN = (
False # make the rendering window be smaller
)
brainrender.settings.SHOW_AXES = (
False # turn off the axes display
)
# make scenes with different shader styles
for shader in ("plastic", "cartoon"):
brainrender.settings.SHADER_STYLE = shader
scene = Scene(title=shader)
scene.render()
| 22.971429 | 68 | 0.738806 |
e523c7d017a09d14d197b814c5b8818a019d97fd | 5,434 | py | Python | exercise2/exercise2.py | ricardolopes86/nvm-exercises | e913d53d7d1298a29c856c94495d54a0d0c0cdea | [
"Apache-2.0"
] | null | null | null | exercise2/exercise2.py | ricardolopes86/nvm-exercises | e913d53d7d1298a29c856c94495d54a0d0c0cdea | [
"Apache-2.0"
] | null | null | null | exercise2/exercise2.py | ricardolopes86/nvm-exercises | e913d53d7d1298a29c856c94495d54a0d0c0cdea | [
"Apache-2.0"
] | null | null | null | """
Playing with numbers.
This awesome program will perform some operations to a list of numbers.
You should input *exactly* 6 numbers right after the command.
"""
import sys
import json
import datetime
import random
from functools import reduce
import operator
def format_filename():
"""
Just a simple method to format the filename for the JSON file that will be generated. It's based on the current date and time.
"""
current_time = datetime.datetime.now()
return ""+str(current_time.year)+"-"+str(current_time.month)+"-"+str(current_time.day)+"_"+str(current_time.hour)+"-"+str(current_time.minute)+"-"+str(current_time.second)
def write_to_json(data):
"""
The method to write the JSON generated in the multiplication method.
"""
file_name = "{}.json".format(format_filename())
with open(file_name, "w") as json_file:
json.dump(data, json_file)
print("The content was saved to a file called: {}\n".format(file_name))
def multiplication(numbers_list):
"""
Multiply the numbers in the array/list. Print them as JSON and write it to a file.
"""
multi = 1
multiplication_data = {
"InputNumber1": numbers[0],
"InputNumber2": numbers[1],
"InputNumber3": numbers[2],
"InputNumber4": numbers[3],
"InputNumber5": numbers[4],
"InputNumber6": numbers[5],
"Multiplication": multi
}
for number in numbers_list:
multi = int(number) * multi
multiplication_data["Multiplication"] = multi
print("Printing your JSON below:")
print(json.dumps(multiplication_data, indent=4, sort_keys=True))
write_to_json(multiplication_data)
def subtract(num):
"""
Will return the result of subtracting all the numbers in the array/list.
"""
numbers_diff = reduce(operator.sub, num)
return numbers_diff
def ramdon_number(numbers_list):
"""
Pick a random number using the random module and the length of the array.
"""
index = random.randint(0, (len(numbers_list)-1))
print("Picking a random number from the list of your numbers. And the chosen one is: {}".format(numbers[index]))
def merge_sort(numbers_list):
"""
Classic merge sort algorithm to sort an array/list.
"""
if len(numbers_list) > 1:
middle = len(numbers_list) // 2 # divide array length in half and use the "//" operator to *floor* the result
left_numbers = numbers_list[:middle] # fill in left array
right_numbers = numbers_list[middle:] # fill in right array
merge_sort(left_numbers) # Sorting the first half
merge_sort(right_numbers) # Sorting the second half
left_index = 0
right_index = 0
current_index = 0
# compare each index of the subnumberss adding the lowest value to the current_index
while left_index < len(left_numbers) and right_index < len(right_numbers):
if left_numbers[left_index] < right_numbers[right_index]:
numbers[current_index] = left_numbers[left_index]
left_index += 1
else:
numbers[current_index] = right_numbers[right_index]
right_index += 1
current_index += 1
# copy remaining elements of left_numbers[] if any
while left_index < len(left_numbers):
numbers[current_index] = left_numbers[left_index]
left_index += 1
current_index += 1
# copy remaining elements of right_numbers[] if any
while right_index < len(right_numbers):
numbers[current_index] = right_numbers[right_index]
right_index += 1
current_index += 1
def sorted_low_to_high(numbers_list):
"""
Call a merge sort function to sort the list/array
"""
merge_sort(numbers_list)
print(numbers_list)
def sorted_reverse(numbers_list):
"""
Using embedded sorted() function to reverse the array.
"""
print(sorted(numbers_list, reverse=True))
def menu(numbers_list):
"""
Shows menu after successfully validate the inputs
"""
numbers_selected = "The numbers you've selected are: {}".format(numbers_list)
options = """
1 - Multiply, show result and write to JSON file
2 - Subtract
3 - Show random number
4 - Show sorted list (from lower to highest)
5 - Show reverse sorted list (from highest to lower)\n"""
while True:
print(numbers_selected, options)
chosen = input("Please, select one:")
if chosen == '1':
multiplication(numbers_list)
elif chosen == '2':
result = subtract(numbers_list)
print("The subtraction value is: {}".format(result))
elif chosen == '3':
ramdon_number(numbers_list)
elif chosen == '4':
sorted_low_to_high(numbers_list)
elif chosen == '5':
sorted_reverse(numbers_list)
else:
print("Unknown Option Selected!")
if __name__ == "__main__":
try:
if len(sys.argv) < 7:
raise Exception("Arguments must have at least 6 numbers")
if len(sys.argv) > 7:
raise Exception("Arguments can't be longer than 6 numbers")
except Exception as error:
print(error)
else:
numbers = []
for x in range(len(sys.argv[1:7])):
numbers.append(int(sys.argv[x+1]))
menu(numbers)
| 33.54321 | 175 | 0.640228 |
ef24d5296ce009c73f9f1266ec0dda0671283ed6 | 1,352 | py | Python | python/AnalysisLaunchers/delete_analysis.py | FabricGenomics/omicia_api_examples | b761d40744032720bdf1a4f59877e16b8b1dfcf0 | [
"MIT"
] | 2 | 2017-06-13T13:59:17.000Z | 2021-12-17T18:52:08.000Z | python/AnalysisLaunchers/delete_analysis.py | FabricGenomics/omicia_api_examples | b761d40744032720bdf1a4f59877e16b8b1dfcf0 | [
"MIT"
] | 1 | 2017-11-22T00:20:19.000Z | 2017-11-22T00:41:05.000Z | python/AnalysisLaunchers/delete_analysis.py | FabricGenomics/omicia_api_examples | b761d40744032720bdf1a4f59877e16b8b1dfcf0 | [
"MIT"
] | 3 | 2020-03-05T18:41:36.000Z | 2021-01-14T08:31:30.000Z | """
Delete analysis by id
Example usages:
python delete_analysis.py --id 1802
"""
import os
import requests
from requests.auth import HTTPBasicAuth
import sys
import simplejson as json
import argparse
# Load environment variables for request authentication parameters
if "FABRIC_API_PASSWORD" not in os.environ:
sys.exit("FABRIC_API_PASSWORD environment variable missing")
if "FABRIC_API_LOGIN" not in os.environ:
sys.exit("FABRIC_API_LOGIN environment variable missing")
FABRIC_API_LOGIN = os.environ['FABRIC_API_LOGIN']
FABRIC_API_PASSWORD = os.environ['FABRIC_API_PASSWORD']
FABRIC_API_URL = os.environ.get('FABRIC_API_URL', 'https://api.fabricgenomics.com')
auth = HTTPBasicAuth(FABRIC_API_LOGIN, FABRIC_API_PASSWORD)
def delete_analysis(args):
"""Use the Fabric API to get variants
"""
url = '{}/analysis/{}'.format(FABRIC_API_URL, args.id)
result = requests.delete(url, auth=auth)
return result.text
def main():
"""Main function. Delete analysis by ID.
"""
parser = argparse.ArgumentParser(
description='Delete a Variant, VAAST or Phevor Analysis')
parser.add_argument('--id', required=True, metavar='Analysis ID', type=int)
args = parser.parse_args()
sys.stdout.write(delete_analysis(args))
sys.stdout.write('\n')
if __name__ == "__main__":
main()
| 26.509804 | 83 | 0.727811 |
c23c5f4d2676303d1d583c76a15b6d9a78ec89da | 4,111 | py | Python | loca_navi_demo.py | XiaoLiSean/Cognitive-Map | 6b2019e5b3a46902b06c8d5d1e86b39425042de9 | [
"MIT"
] | null | null | null | loca_navi_demo.py | XiaoLiSean/Cognitive-Map | 6b2019e5b3a46902b06c8d5d1e86b39425042de9 | [
"MIT"
] | null | null | null | loca_navi_demo.py | XiaoLiSean/Cognitive-Map | 6b2019e5b3a46902b06c8d5d1e86b39425042de9 | [
"MIT"
] | 1 | 2021-11-04T06:25:31.000Z | 2021-11-04T06:25:31.000Z | import argparse, copy
import multiprocessing, time
from lib.navigation import Navigation
from Map.map_plotter import Plotter
from distutils.util import strtobool
parser = argparse.ArgumentParser()
parser.add_argument("--scene_type", type=int, default=1, help="Choose scene type for simulation, 1 for Kitchens, 2 for Living rooms, 3 for Bedrooms, 4 for Bathrooms")
parser.add_argument("--scene_num", type=int, default=30, help="Choose scene num for simulation, from 1 - 30")
parser.add_argument("--grid_size", type=float, default=0.25, help="Grid size of AI2THOR simulation")
parser.add_argument("--rotation_step", type=float, default=10, help="Rotation step of AI2THOR simulation")
parser.add_argument("--sleep_time", type=float, default=0.005, help="Sleep time between two actions")
parser.add_argument("--save_directory", type=str, default='./data', help="Data saving directory")
parser.add_argument("--overwrite_data", type=lambda x: bool(strtobool(x)), default=False, help="overwrite the existing data or not")
parser.add_argument("--log_level", type=int, default=2, help="Level of showing log 1-5 where 5 is most detailed")
parser.add_argument("--debug", type=lambda x: bool(strtobool(x)), default=False, help="Output debug info if True")
parser.add_argument("--test_data", type=lambda x: bool(strtobool(x)), default=False, help="True for collecting test dataset")
parser.add_argument("--special", type=lambda x: bool(strtobool(x)), default=False, help="True for collecting special long range dataset")
parser.add_argument("--AI2THOR", type=lambda x: bool(strtobool(x)), default=False, help="True for RobotTHOR false for ITHOR")
args = parser.parse_args()
def navigation_fcn(server, comfirmed, initialized):
navigation = Navigation(netName='rnet', scene_type=args.scene_type, scene_num=args.scene_num, save_directory=args.save_directory, AI2THOR=args.AI2THOR, server=server, comfirmed=comfirmed)
navigation.Update_node_generator()
navigation.Update_topo_map_env()
navigation.Update_planner_env()
# Send information to initialize plot map
scene_info = navigation.Robot._AI2THOR_controller.get_scene_info()
server.send(scene_info)
# Navigation task
navigation.node_generator.Shuffle_scene()
navigation.Closed_loop_nav(current_node_index=10, current_orientation=0, goal_node_index=5, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=10, current_orientation=180, goal_node_index=9, goal_orientation=180)
# navigation.Closed_loop_nav(current_node_index=9, current_orientation=180, goal_node_index=3, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=1, current_orientation=0, goal_node_index=16, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=16, current_orientation=0, goal_node_index=3, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=3, current_orientation=0, goal_node_index=4, goal_orientation=0)
# navigation.Closed_loop_nav(current_node_index=4, current_orientation=0, goal_node_index=4, goal_orientation=90)
# navigation.nav_test_simplified()
# while True:
# if initialized.value:
# navigation.Closed_loop_nav(goal_node_index=3, goal_orientation=270)
# navigation.Closed_loop_nav(goal_node_index=2, goal_orientation=270)
# break
def visualization_fcn(client, comfirmed, initialized):
scene_info = client.recv()
visualization_panel = Plotter(*scene_info, client=client, comfirmed=comfirmed)
initialized.value = 1
while True:
visualization_panel.show_map()
if __name__ == '__main__':
comfirmed = multiprocessing.Value('i') # Int value: 1 for confirm complete task and other process can go on while 0 otherwise
comfirmed.value = 0
initialized = multiprocessing.Value('i') # Int value
initialized.value = 0
server, client = multiprocessing.Pipe() # server send date and client receive data
navi_node = multiprocessing.Process(target=navigation_fcn, args=(server, comfirmed, initialized))
visual_node = multiprocessing.Process(target=visualization_fcn, args=(client, comfirmed, initialized))
navi_node.start()
visual_node.start()
navi_node.join()
visual_node.join()
| 60.455882 | 188 | 0.793238 |
8cd738a48d07eba7892449bedcd882980b561d53 | 42,424 | py | Python | src/robotide/ui/treeplugin.py | cbeytas/RIDE | 183747c66b80fcc8e5b79139278f928858944c5b | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-30T21:24:19.000Z | 2021-11-30T21:24:19.000Z | src/robotide/ui/treeplugin.py | cbeytas/RIDE | 183747c66b80fcc8e5b79139278f928858944c5b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robotide/ui/treeplugin.py | cbeytas/RIDE | 183747c66b80fcc8e5b79139278f928858944c5b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from wx.lib.agw import customtreectrl
from wx.lib.mixins import treemixin
from wx import Colour
from wx.lib.agw.aui import GetManager
TREETEXTCOLOUR = Colour(0xA9, 0xA9, 0xA9)
from robotide.lib.robot.utils.compat import with_metaclass
from robotide.controller.ui.treecontroller import TreeController, \
TestSelectionController
from robotide.context import IS_WINDOWS
from robotide.action.actioninfo import ActionInfo
from robotide.controller.filecontrollers import ResourceFileController
from robotide.publish.messages import RideTestRunning, RideTestPaused, \
RideTestPassed, RideTestFailed, RideTestExecutionStarted, \
RideImportSetting, RideExcludesChanged, RideIncludesChanged, \
RideOpenSuite, RideNewProject
from robotide.ui.images import RUNNING_IMAGE_INDEX, PASSED_IMAGE_INDEX, \
FAILED_IMAGE_INDEX, PAUSED_IMAGE_INDEX, ROBOT_IMAGE_INDEX
from robotide.ui.treenodehandlers import TestCaseHandler
from robotide.publish import PUBLISHER, RideTreeSelection, RideFileNameChanged,\
RideItem, RideUserKeywordAdded, RideTestCaseAdded, RideUserKeywordRemoved,\
RideTestCaseRemoved, RideDataFileRemoved, RideDataChangedToDirty,\
RideDataDirtyCleared, RideVariableRemoved, RideVariableAdded,\
RideVariableMovedUp, RideVariableMovedDown, RideVariableUpdated,\
RideOpenResource, RideSuiteAdded, RideSelectResource, RideDataFileSet
from robotide.controller.ctrlcommands import MoveTo
from robotide.pluginapi import Plugin, ActionInfo
from robotide.widgets import PopupCreator
from robotide import utils
from .treenodehandlers import ResourceRootHandler, action_handler_class, ResourceFileHandler
from .images import TreeImageList
# Metaclass fix from http://code.activestate.com/recipes/204197-solving-the-metaclass-conflict/
from robotide.utils.noconflict import classmaker
_TREE_ARGS = {'style': wx.TR_DEFAULT_STYLE}
_TREE_ARGS['agwStyle'] = customtreectrl.TR_DEFAULT_STYLE | customtreectrl.TR_HIDE_ROOT | \
customtreectrl.TR_EDIT_LABELS
_TREE_ARGS['agwStyle'] |= customtreectrl.TR_TOOLTIP_ON_LONG_ITEMS
if IS_WINDOWS:
_TREE_ARGS['style'] |= wx.TR_EDIT_LABELS
class TreePlugin(Plugin):
"""Provides a tree view for Test Suites """
datafile = property(lambda self: self.get_selected_datafile())
defaults = {"opened": True,
"docked": True
}
def __init__(self, application):
Plugin.__init__(self, application, default_settings=self.defaults)
self.settings = application.settings._config_obj['Plugins']['Tree']
self._parent = None
self._tree = self.tree
self._mgr = GetManager(self._tree)
"""
self._action_registerer = action_registerer
self.tree = parent.tree
"""
# parent, action_registerer, , default_settings={'collapsed':True}
def register_frame(self, parent=None):
if parent:
self._parent = parent
self._mgr.InsertPane(self._tree,
wx.lib.agw.aui.AuiPaneInfo().Name("tree_content").
Caption("Test Suites").LeftDockable(True).
CloseButton(True))
self._mgr.Update()
# print(f"DEBUG: TreePlugin frame {self._parent.GetTitle()} tree {self._tree.GetName()}")
def enable(self):
self.register_action(ActionInfo('View','View Test Suites Explorer', self.OnShowTree,
shortcut='F12',
doc='Show Test Suites tree panel',
position=1))
self.subscribe(self.OnTreeSelection, RideTreeSelection)
# self.save_setting('opened', True)
if self.opened:
self.OnShowTree(None)
# print(f"DEBUG: TreePlugin end enable tree focused? {self.is_focused()}")
# self.subscribe(self.OnTabChanged, RideNotebookTabChanged)
# self.subscribe(self._update_preview, RideTestCaseAdded)
# self.subscribe(self._update_preview, RideUserKeywordAdded)
# self.add_self_as_tree_aware_plugin()
def close_tree(self):
self._mgr.DetachPane(self._tree)
self._tree.Hide()
self._mgr.Update()
self.save_setting('opened', False)
# print(f"DEBUG: TreePlugin Called close")
def disable(self):
self.close_tree()
# self.save_setting('opened', False)
self.unsubscribe_all()
self.unregister_actions()
def is_focused(self):
return self._tree.HasFocus()
def populate(self, model):
self._tree.populate(model)
def set_editor(self, editor):
self._tree.set_editor(editor)
def OnShowTree(self, event):
if not self._parent:
self._parent = self.frame
if not self._tree: # This is not needed because tree is always created
self._tree = Tree(self, self._parent.actions, self._parent._application.settings)
print(f"DEBUG: TreePlugin Show created tree {self._tree.GetName()}")
self._pane = self._mgr.GetPane(self._tree)
self._tree.Show(True)
self._mgr.DetachPane(self._tree)
# self._mgr.Update()
self._mgr.AddPane(self._tree,
wx.lib.agw.aui.AuiPaneInfo().Name("tree_content").
Caption("Test Suites").LeftDockable(True).
CloseButton(True))
self._tree.Raise()
self._mgr.Update()
self.save_setting('opened', True)
self._update_tree()
def OnTreeSelection(self, event):
if self.is_focused():
self._tree.tree_node_selected(event.item)
def OnTabChanged(self, event):
self._update_tree()
def _update_tree(self, event=None):
# if self._tree.is_focused():
self._tree._refresh_view()
class Tree(with_metaclass(classmaker(), treemixin.DragAndDrop,
customtreectrl.CustomTreeCtrl,
utils.RideEventHandler)):
_RESOURCES_NODE_LABEL = 'External Resources'
def __init__(self, parent, action_registerer, settings=None):
self._checkboxes_for_tests = False
self._test_selection_controller = \
self._create_test_selection_controller()
self._controller = TreeController(
self, action_registerer, settings=settings,
test_selection=self._test_selection_controller)
treemixin.DragAndDrop.__init__(self, parent, **_TREE_ARGS)
self._controller.register_tree_actions()
self._bind_tree_events()
self._images = TreeImageList()
self._animctrl = None
self._silent_mode = False
self.SetImageList(self._images)
self.label_editor = TreeLabelEditListener(self, action_registerer)
self._controller.bind_keys()
self._subscribe_to_messages()
self._popup_creator = PopupCreator()
self._dragging = False
self._clear_tree_data()
self._editor = None
self._execution_results = None
self._resources = []
self.SetBackgroundColour('white') # TODO get background color from def
if not hasattr(self, 'OnCancelEdit'):
self.OnCancelEdit = self._on_cancel_edit
def _create_test_selection_controller(self):
tsc = TestSelectionController()
PUBLISHER.subscribe(tsc.clear_all, RideOpenSuite)
PUBLISHER.subscribe(tsc.clear_all, RideNewProject)
return tsc
def _on_cancel_edit(self, item):
le = customtreectrl.TreeEvent(
customtreectrl.wxEVT_TREE_END_LABEL_EDIT, self.GetId())
le._item = item
le.SetEventObject(self)
le._label = ""
le._editCancelled = True
self.GetEventHandler().ProcessEvent(le)
def _bind_tree_events(self):
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.Bind(wx.EVT_TREE_SEL_CHANGED, self.OnSelChanged)
self.Bind(wx.EVT_TREE_ITEM_EXPANDING, self.OnTreeItemExpanding)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnRightClick)
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.OnItemActivated)
self.Bind(customtreectrl.EVT_TREE_ITEM_CHECKED, self.OnTreeItemChecked)
self.Bind(wx.EVT_TREE_ITEM_COLLAPSING, self.OnTreeItemCollapsing)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def OnDoubleClick(self, event):
item, pos = self.HitTest(self.ScreenToClient(wx.GetMousePosition()))
if item:
handler = self._controller.get_handler(item)
handler.double_clicked()
event.Skip()
def set_editor(self, editor):
self._editor = editor
def StartDragging(self):
self._dragging = True
treemixin.DragAndDrop.StartDragging(self)
def OnEndDrag(self, event):
self._dragging = False
treemixin.DragAndDrop.OnEndDrag(self, event)
def register_context_menu_hook(self, callable):
self._popup_creator.add_hook(callable)
def unregister_context_menu_hook(self, callable):
self._popup_creator.remove_hook(callable)
def _subscribe_to_messages(self):
subscriptions = [
(self._item_changed, RideItem),
(self._resource_added, RideOpenResource),
(self._select_resource, RideSelectResource),
(self._suite_added, RideSuiteAdded),
(self._keyword_added, RideUserKeywordAdded),
(self._test_added, RideTestCaseAdded),
(self._variable_added, RideVariableAdded),
(self._leaf_item_removed, RideUserKeywordRemoved),
(self._leaf_item_removed, RideTestCaseRemoved),
(self._leaf_item_removed, RideVariableRemoved),
(self._datafile_removed, RideDataFileRemoved),
(self._datafile_set, RideDataFileSet),
(self._data_dirty, RideDataChangedToDirty),
(self._data_undirty, RideDataDirtyCleared),
(self._variable_moved_up, RideVariableMovedUp),
(self._variable_moved_down, RideVariableMovedDown),
(self._variable_updated, RideVariableUpdated),
(self._filename_changed, RideFileNameChanged),
(self._testing_started, RideTestExecutionStarted),
(self._test_result, RideTestRunning),
(self._test_result, RideTestPaused),
(self._test_result, RideTestPassed),
(self._test_result, RideTestFailed),
(self._handle_import_setting_message, RideImportSetting),
(self._mark_excludes, RideExcludesChanged),
(self._mark_excludes, RideIncludesChanged),
]
for listener, topic in subscriptions:
PUBLISHER.subscribe(listener, topic)
def _mark_excludes(self, message):
tree = self._controller.find_node_by_controller(message.old_controller)
self._render_datafile(self.GetItemParent(tree), message.new_controller)
self._remove_datafile_node(tree)
def _set_item_excluded(self, node):
self.SetItemTextColour(node, wx.TheColourDatabase.Find("GRAY"))
self.SetItemItalic(node, True)
self.SetItemText(node, "%s (excluded)" % self.GetItemText(node))
def _handle_import_setting_message(self, message):
if message.is_resource():
self._set_resource_color(
message.import_controller.get_imported_controller())
self._set_resource_color(
message.import_controller.get_previous_imported_controller())
def _set_resource_color(self, resource_controller):
if not resource_controller:
return
node = self._controller.find_node_by_controller(resource_controller)
if node:
self.SetItemTextColour(
node, self._get_resource_text_color(resource_controller))
def _get_resource_text_color(self, resource_controller):
if resource_controller.is_used():
return self.GetDefaultAttributes().colFg
else:
return wx.LIGHT_GREY
def _testing_started(self, message):
self._for_all_drawn_tests(
self._root, lambda t: self.SetItemImage(t, ROBOT_IMAGE_INDEX))
self._execution_results = message.results
self._images.set_execution_results(message.results)
def _test_result(self, message):
wx.CallAfter(self._set_icon_from_execution_results, message.item)
def _set_icon_from_execution_results(self, controller):
node = self._controller.find_node_by_controller(controller)
if not node:
return
img_index = self._get_icon_index_for(controller)
# Always set the static icon
self.SetItemImage(node, img_index)
if self._animctrl:
self._animctrl.Stop()
self._animctrl.Animation.Destroy()
self._animctrl.Destroy()
self._animctrl = None
self.DeleteItemWindow(node)
if img_index in (RUNNING_IMAGE_INDEX, PAUSED_IMAGE_INDEX):
from wx.adv import Animation, AnimationCtrl
import os
_BASE = os.path.join(os.path.dirname(__file__), '..', 'widgets')
if img_index == RUNNING_IMAGE_INDEX:
img = os.path.join(_BASE, 'robot-running.gif')
else:
img = os.path.join(_BASE, 'robot-pause.gif')
ani = Animation(img)
obj = self
rect = (node.GetX()+20, node.GetY()) # Overlaps robot icon
self._animctrl = AnimationCtrl(obj, -1, ani, rect)
self._animctrl.SetBackgroundColour(obj.GetBackgroundColour())
self.SetItemWindow(node, self._animctrl, False)
self._animctrl.Play()
# Make visible the running or paused test
self.EnsureVisible(node)
def _get_icon_index_for(self, controller):
if not self._execution_results:
return ROBOT_IMAGE_INDEX
if self._execution_results.is_paused(controller):
return PAUSED_IMAGE_INDEX
if self._execution_results.is_running(controller):
return RUNNING_IMAGE_INDEX
if self._execution_results.has_passed(controller):
return PASSED_IMAGE_INDEX
if self._execution_results.has_failed(controller):
return FAILED_IMAGE_INDEX
return ROBOT_IMAGE_INDEX
def populate(self, model):
self._clear_tree_data()
self._populate_model(model)
self._refresh_view()
self.SetFocus() # Needed for keyboard shortcuts
def _clear_tree_data(self):
self.DeleteAllItems()
self._root = self.AddRoot('')
self._resource_root = self._create_resource_root()
self._datafile_nodes = []
self._resources = []
def _create_resource_root(self):
return self._create_node(self._root, self._RESOURCES_NODE_LABEL,
self._images.directory)
def _populate_model(self, model):
handler = ResourceRootHandler(model, self, self._resource_root,
self._controller.settings)
self.SetPyData(self._resource_root, handler)
if model.data:
self._render_datafile(self._root, model.data, 0)
for res in model.external_resources:
if not res.parent:
self._render_datafile(self._resource_root, res)
def _resource_added(self, message):
ctrl = message.datafile
if self._controller.find_node_by_controller(ctrl):
return
parent = None
if ctrl.parent:
parent = self._get_dir_node(ctrl.parent)
else:
parent = self._resource_root
self._render_datafile(parent, ctrl)
def _get_dir_node(self, ctrl):
if ctrl is None:
return self._root
dir_node = self._get_datafile_node(ctrl.data)
if dir_node is None:
parent = self._get_dir_node(ctrl.parent)
self._render_datafile(parent, ctrl)
dir_node = self._get_datafile_node(ctrl.data)
return dir_node
def _select_resource(self, message):
self.select_controller_node(message.item)
def select_controller_node(self, controller):
self.SelectItem(self._controller.find_node_by_controller(controller))
def _suite_added(self, message):
self.add_datafile(message.parent, message.suite)
def _refresh_view(self):
self.Refresh()
if self._resource_root:
self.Expand(self._resource_root)
if self._datafile_nodes:
self.SelectItem(self._datafile_nodes[0])
self._expand_and_render_children(self._datafile_nodes[0])
def _render_datafile(self, parent_node, controller, index=None):
node = self._create_node_with_handler(parent_node, controller, index)
if not node:
return None
if controller.dirty:
self._controller.mark_node_dirty(node)
self._datafile_nodes.append(node)
self.SetItemHasChildren(node, True)
for child in controller.children:
self._render_datafile(node, child)
return node
def _normalize(self, path):
return os.path.normcase(os.path.normpath(os.path.abspath(path)))
def _create_node_with_handler(self, parent_node, controller, index=None):
if IS_WINDOWS and isinstance(controller, ResourceFileController):
resourcefile = self._normalize(controller.filename)
pname = parent_node.GetText()
self._resources.append((pname,resourcefile))
if IS_WINDOWS:
count = 0
for (p, r) in self._resources:
if (p, r) == (pname, resourcefile):
count += 1
if count > 3:
return None
handler_class = action_handler_class(controller)
with_checkbox = (handler_class == TestCaseHandler and self._checkboxes_for_tests)
node = self._create_node(parent_node, controller.display_name, self._images[controller],
index, with_checkbox=with_checkbox)
if isinstance(controller, ResourceFileController) and not controller.is_used():
self.SetItemTextColour(node, TREETEXTCOLOUR) # wxPython3 hack
action_handler = handler_class(controller, self, node, self._controller.settings)
self.SetPyData(node, action_handler)
# if we have a TestCase node we have to make sure that
# we retain the checked state
if (handler_class == TestCaseHandler and self._checkboxes_for_tests) \
and self._test_selection_controller.is_test_selected(controller):
self.CheckItem(node, True)
if controller.is_excluded():
self._set_item_excluded(node)
return node
def set_checkboxes_for_tests(self):
self._checkboxes_for_tests = True
def _expand_and_render_children(self, node):
assert node is not None
self._render_children(node)
self.Expand(node)
def _render_children(self, node):
handler = self._controller.get_handler(node)
if not handler or not handler.can_be_rendered:
return
self._create_child_nodes(
node, handler, lambda item: item.is_test_suite)
handler.set_rendered()
def _create_child_nodes(self, node, handler, predicate):
for childitem in self._children_of(handler):
index = self._get_insertion_index(node, predicate)
self._create_node_with_handler(node, childitem, index)
def _children_of(self, handler):
return [v for v in handler.variables if v.has_data()] + \
list(handler.tests) + list(handler.keywords)
def _create_node(self, parent_node, label, img, index=None, with_checkbox=False):
node = self._wx_node(parent_node, index, label, with_checkbox)
self.SetItemImage(node, img.normal, wx.TreeItemIcon_Normal)
self.SetItemImage(node, img.expanded, wx.TreeItemIcon_Expanded)
return node
def _wx_node(self, parent_node, index, label, with_checkbox):
ct_type = 1 if with_checkbox else 0
if index is not None:
# blame wxPython for this ugliness
if isinstance(index, int):
return self.InsertItemByIndex(
parent_node, index, label, ct_type=ct_type)
else:
return self.InsertItem(
parent_node, index, label, ct_type=ct_type)
return self.AppendItem(parent_node, label, ct_type=ct_type)
def add_datafile(self, parent, suite):
snode = self._render_datafile(
self._get_datafile_node(parent.data), suite)
self.SelectItem(snode)
def add_test(self, parent_node, test):
self._add_dataitem(
parent_node, test, lambda item: item.is_user_keyword)
def add_keyword(self, parent_node, kw):
self._add_dataitem(parent_node, kw, lambda item: item.is_test_suite)
def _add_dataitem(self, parent_node, dataitem, predicate):
node = self._get_or_create_node(parent_node, dataitem, predicate)
self._select(node)
self._controller.mark_node_dirty(parent_node)
def _get_or_create_node(self, parent_node, dataitem, predicate):
if not self.IsExpanded(parent_node):
self._expand_and_render_children(parent_node)
return self._controller.find_node_with_label(
parent_node, dataitem.display_name)
index = self._get_insertion_index(parent_node, predicate)
return self._create_node_with_handler(parent_node, dataitem, index)
def _select(self, node):
if node:
wx.CallAfter(self.SelectItem, node)
def _get_insertion_index(self, parent_node, predicate):
if not predicate:
return None
item, cookie = self.GetFirstChild(parent_node)
while item:
if predicate(self._controller.get_handler(item)):
index = self.GetPrevSibling(item)
if not index:
index = 0
return index
item, cookie = self.GetNextChild(parent_node, cookie)
return None
def _keyword_added(self, message):
self.add_keyword(self._get_datafile_node(self.get_selected_datafile()),
message.item)
def _variable_added(self, message):
self._get_or_create_node(
self._get_datafile_node(self.get_selected_datafile()),
message.item,
lambda item: not item.is_variable or item.index > message.index)
def _leaf_item_removed(self, message):
node = self._controller.find_node_by_controller(message.item)
parent_node = self._get_datafile_node(message.datafile)
# DEBUG The below call causes not calling delete_node
# self._test_selection_controller.select(message.item, False)
self._controller.mark_node_dirty(parent_node)
self.delete_node(node)
def _test_added(self, message):
self.add_test(self._get_datafile_node(self.get_selected_datafile()),
message.item)
def _datafile_removed(self, message):
dfnode = self._get_datafile_node(message.datafile.data)
self._datafile_nodes.remove(dfnode)
self.DeleteChildren(dfnode)
self.Delete(dfnode)
def _datafile_set(self, message):
wx.CallAfter(self._refresh_datafile_when_file_set, message.item)
def _filename_changed(self, message):
df = message.datafile
node = self._controller.find_node_by_controller(df)
if not node:
raise AssertionError('No node found with controller "%s"' % df)
wx.CallAfter(self.SetItemText, node, df.display_name)
def add_keyword_controller(self, controller):
parent = self._get_datafile_node(self.get_selected_datafile())
self.add_keyword(parent, controller)
def delete_node(self, node):
if node is None:
return
parent = self.GetItemParent(node)
self._controller.mark_node_dirty(parent)
if self.IsSelected(node):
wx.CallAfter(self.SelectItem, parent)
wx.CallAfter(self.Delete, node)
def _data_dirty(self, message):
self._controller.mark_controller_dirty(message.datafile)
def _data_undirty(self, message):
self.unset_dirty()
def unset_dirty(self):
for node in self._datafile_nodes:
text = self.GetItemText(node)
handler = self._controller.get_handler(node)
if text.startswith('*') and not handler.controller.dirty:
self.SetItemText(node, text[1:])
def select_node_by_data(self, controller):
"""Find and select the tree item associated with the given controller.
Controller can be any of the controllers that are represented in the
tree."""
parent_node = self._get_datafile_node(controller.datafile)
if not parent_node:
return None
if not self.IsExpanded(parent_node):
self._expand_and_render_children(parent_node)
node = self._controller.find_node_by_controller(controller)
if node != self.GetSelection():
self.SelectItem(node)
return node
def select_user_keyword_node(self, uk):
parent_node = self._get_datafile_node(uk.parent.parent)
if not parent_node:
return
if not self.IsExpanded(parent_node):
self._expand_and_render_children(parent_node)
node = self._controller.find_node_with_label(
parent_node, utils.normalize(uk.name))
if node != self.GetSelection():
self.SelectItem(node)
def _get_datafile_node(self, datafile):
for node in self._datafile_nodes:
if self._controller.get_handler(node).item == datafile:
return node
return None
def get_selected_datafile(self):
"""Returns currently selected data file.
If a test or user keyword node is selected, returns parent of that
item."""
datafile = self._get_selected_datafile_node()
if not datafile:
return None
return self._controller.get_handler(datafile).item
def get_selected_datafile_controller(self):
"""Returns controller associated with currently active data file.
If a test or user keyword node is selected, returns parent of that
item."""
dfnode = self._get_selected_datafile_node()
if dfnode:
return self._controller.get_handler(dfnode).controller
else:
return None
def _get_selected_datafile_node(self):
node = self.GetSelection()
if not node or node in (self._resource_root, self._root):
return None
while node not in self._datafile_nodes:
node = self.GetItemParent(node)
return node
def get_selected_item(self):
"""Returns model object associated with currently selected tree node.
"""
selection = self.GetSelection()
if not selection:
return None
handler = self._controller.get_handler(selection)
return handler and handler.controller or None
def move_up(self, node):
prev = self.GetPrevSibling(node)
if prev.IsOk():
self._switch_items(prev, node, node)
def move_down(self, node):
next = self.GetNextSibling(node)
if next.IsOk():
self._switch_items(node, next, node)
def _switch_items(self, first, second, currently_selected):
"""Changes the order of given items, first is expected to be directly
above the second"""
selection = self.GetItemPyData(currently_selected).controller
controller = self._controller.get_handler(first).controller
self.Delete(first)
self._create_node_with_handler(self.GetItemParent(second), controller, second)
self.select_node_by_data(selection)
def _refresh_datafile_when_file_set(self, controller):
# Prevent tab selections based on tree item selected events
self._start_silent_mode()
current = self.get_selected_datafile_controller()
if not current: # If tree is not yet in use - do not expand anything.
self._end_silent_mode()
return
item = self.GetSelection()
current_txt = self.GetItemText(item) if item.IsOk() else ''
# after refresh current and current_txt might have been changed
node = self._refresh_datafile(controller)
if node is None:
# TODO: Find out why this sometimes happens
return
self._expand_and_render_children(node)
if current == controller:
select_item = self._controller.find_node_with_label(
node, current_txt)
if select_item is None:
select_item = node
wx.CallAfter(self.SelectItem, select_item)
wx.CallAfter(self._end_silent_mode)
else:
self._end_silent_mode()
def _uncheck_tests(self, controller):
self._test_selection_controller.unselect_all(controller.tests)
def _start_silent_mode(self):
self._silent_mode = True
def _end_silent_mode(self):
self._silent_mode = False
def refresh_datafile(self, controller, event):
to_be_selected = self._get_pending_selection(event)
new_node = self._refresh_datafile(controller)
self._handle_pending_selection(to_be_selected, new_node)
def _refresh_datafile(self, controller):
orig_node = self._get_data_controller_node(controller)
if orig_node is not None:
insertion_index = self._get_datafile_index(orig_node)
parent = self.GetItemParent(orig_node)
self._remove_datafile_node(orig_node)
return self._render_datafile(parent, controller, insertion_index)
def _get_pending_selection(self, event):
if hasattr(event, 'Item'):
item = event.GetItem()
event.Veto()
elif hasattr(event, 'Position'):
item, flags = self.HitTest(event.Position)
if not self._click_on_item(item, flags):
return
else:
return
return self.GetItemText(item)
def _get_data_controller_node(self, controller):
for node in self._datafile_nodes:
if self.GetItemPyData(node).controller == controller:
return node
return None
def _click_on_item(self, item, flags):
return item is not None and item.IsOk() and \
flags & wx.TREE_HITTEST_ONITEM
def _get_datafile_index(self, node):
insertion_index = self.GetPrevSibling(node)
if not insertion_index:
insertion_index = 0
return insertion_index
def _remove_datafile_node(self, node):
for child in self.GetItemChildren(node):
if child in self._datafile_nodes:
self._remove_datafile_node(child)
self._datafile_nodes.remove(node)
self.Delete(node)
def _handle_pending_selection(self, to_be_selected, parent_node):
if to_be_selected:
self._expand_and_render_children(parent_node)
select_item = self._controller.find_node_with_label(
parent_node, to_be_selected)
wx.CallAfter(self.SelectItem, select_item)
def OnSelChanged(self, event):
node = event.GetItem()
if not node.IsOk() or self._dragging:
event.Skip()
return
self._controller.add_to_history(node)
handler = self._controller.get_handler(node)
if handler and handler.item:
RideTreeSelection(
node=node,
item=handler.controller,
silent=self._silent_mode).publish()
self.SetFocus()
def OnTreeItemExpanding(self, event):
node = event.GetItem()
if node.IsOk():
self._render_children(node)
# This exists because CustomTreeItem does not remove animations
def OnTreeItemCollapsing(self, event):
item = event.GetItem()
self._hide_item(item)
event.Skip()
def _hide_item(self, item):
for item in item.GetChildren():
itemwindow = item.GetWindow()
if itemwindow:
itemwindow.Hide()
if self.ItemHasChildren(item):
self._hide_item(item)
def SelectAllTests(self, item):
self._for_all_tests(item, lambda t: self.CheckItem(t))
def SelectTests(self, tests):
def foo(t):
if self.GetPyData(t).controller in tests:
self.CheckItem(t)
self._for_all_tests(self._root, foo)
def ExpandAllSubNodes(self, item):
self._expand_or_collapse_nodes(item, self.Expand)
def CollapseAllSubNodes(self, item):
self._expand_or_collapse_nodes(item, self.Collapse)
def _expand_or_collapse_nodes(self, item, callback):
if not self.HasAGWFlag(customtreectrl.TR_HIDE_ROOT) or \
item != self.GetRootItem():
callback(item)
for child in item.GetChildren():
self._expand_or_collapse_nodes(child, callback)
def _for_all_tests(self, item, func):
item_was_expanded = self.IsExpanded(item)
if not self.HasAGWFlag(customtreectrl.TR_HIDE_ROOT) or \
item != self.GetRootItem():
if isinstance(item.GetData(), ResourceRootHandler or
ResourceFileHandler):
return
is_item_expanded = self.IsExpanded(item)
if not is_item_expanded:
self.Expand(item)
if self._is_test_node(item):
func(item)
if not self.IsExpanded(item):
return
for child in item.GetChildren():
self._for_all_tests(child, func)
if not item_was_expanded:
self.Collapse(item)
def _for_all_drawn_tests(self, item, func):
if self._is_test_node(item):
func(item)
for child in item.GetChildren():
self._for_all_drawn_tests(child, func)
def _is_test_node(self, node):
return node.GetType() == 1
def DeselectAllTests(self, item):
self._for_all_tests(item, lambda t: self.CheckItem(t, checked=False))
def DeselectTests(self, tests):
def foo(t):
if self.GetPyData(t).controller in tests:
self.CheckItem(t, checked=False)
self._for_all_tests(self._root, foo)
def SelectFailedTests(self, item):
def func(t):
# FIXME: This information should be in domain model!
is_checked = self.GetItemImage(t) == FAILED_IMAGE_INDEX
self.CheckItem(t, checked=is_checked)
self._for_all_tests(item, func)
def SelectPassedTests(self, item):
def func(t):
is_checked = self.GetItemImage(t) == PASSED_IMAGE_INDEX
self.CheckItem(t, checked=is_checked)
self._for_all_tests(item, func)
def OnClose(self, event):
print("DEBUG: Tree OnClose hidding")
self.Hide()
def OnTreeItemChecked(self, event):
node = event.GetItem()
handler = self._controller.get_handler(node=node)
self._test_selection_controller.select(
handler.controller, node.IsChecked())
def OnItemActivated(self, event):
node = event.GetItem()
if self.IsExpanded(node):
self.Collapse(node)
elif self.ItemHasChildren(node):
self._expand_and_render_children(node)
def OnLeftArrow(self, event):
node = self.GetSelection()
if self.IsExpanded(node):
self.Collapse(node)
else:
event.Skip()
def OnRightClick(self, event):
handler = None
if hasattr(event, 'GetItem'):
handler = self._controller.get_handler(event.GetItem())
if handler:
if not self.IsExpanded(handler.node):
self.Expand(handler.node)
handler.show_popup()
def OnNewTestCase(self, event):
handler = self._controller.get_handler()
if handler:
handler.OnNewTestCase(event)
def OnDrop(self, target, dragged):
dragged = self._controller.get_handler(dragged)
target = self._controller.get_handler(target)
if target and target.accepts_drag(dragged):
dragged.controller.execute(MoveTo(target.controller))
self.Refresh() # DEBUG Always refresh
def IsValidDragItem(self, item):
return self._controller.get_handler(item).is_draggable
def OnMoveUp(self, event):
handler = self._controller.get_handler()
if handler.is_draggable:
handler.OnMoveUp(event)
def OnMoveDown(self, event):
handler = self._controller.get_handler()
if handler.is_draggable:
handler.OnMoveDown(event)
def _item_changed(self, data):
controller = data.item
node = self._controller.find_node_by_controller(controller)
if node:
self.SetItemText(node, data.item.name)
self._test_selection_controller.send_selection_changed_message()
if controller.dirty:
self._controller.mark_node_dirty(
self._get_datafile_node(controller.datafile))
def _variable_moved_up(self, data):
if self._should_update_variable_positions(data):
self._do_action_if_datafile_node_is_expanded(self.move_up, data)
def _variable_moved_down(self, data):
if self._should_update_variable_positions(data):
self._do_action_if_datafile_node_is_expanded(self.move_down, data)
def _should_update_variable_positions(self, message):
return message.item != message.other and message.item.has_data() and \
message.other.has_data()
def _do_action_if_datafile_node_is_expanded(self, action, data):
if self.IsExpanded(self._get_datafile_node(data.item.datafile)):
node = self._controller.find_node_by_controller(data.item)
action(node)
def _variable_updated(self, data):
self._item_changed(data)
def highlight(self, data, text):
self.select_node_by_data(data)
self._editor.highlight(text)
def node_is_resource_file(self, node):
return self._controller.get_handler(node).__class__ == \
ResourceFileHandler
class TreeLabelEditListener(object):
def __init__(self, tree, action_registerer):
self._tree = tree
tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.OnBeginLabelEdit)
tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.OnLabelEdited)
tree.Bind(wx.EVT_LEFT_DOWN, self.OnLeftDown)
if IS_WINDOWS:
# Delete key does not work in windows without registration
delete_key_action = ActionInfo(
None, None, action=self.OnDelete, shortcut='Del')
action_registerer.register_shortcut(delete_key_action)
self._editing_label = False
self._on_label_edit_called = False
def OnBeginLabelEdit(self, event):
# See http://code.google.com/p/robotframework-ride/issues/detail?id=756
self._editing_label = True
if not self._on_label_edit_called:
self.OnLabelEdit()
event.Veto()
# On windows CustomTreeCtrl will create Editor component
# And we want this to be done by the handler -- as it knows if
# there should be one or not. And because this will make it work
# the same way as when pressing F2 .. so in other words there is
# a bug if we don't Veto this event
def OnLabelEdit(self, event=None):
if not self._on_label_edit_called:
self._on_label_edit_called = True
handler = self._tree._controller.get_handler()
if handler and not handler.begin_label_edit():
self._on_label_edit_called = False
self._editing_label = False
def OnLabelEdited(self, event):
self._editing_label = False
self._on_label_edit_called = False
self._tree._controller.get_handler(event.GetItem()) \
.end_label_edit(event)
# Reset edit control as it doesn't seem to reset it in case the focus
# goes directly away from the tree control
# Use CallAfter to prevent messing up the current end label edit
# .. and the another CallAfter because of
# customtreectrl.TreeTextCtrl#OnChar will call CallAfter(self.Finish)
# when Enter is pressed --> Results in PyDeadObject if called after
# ResetEditControl..
wx.CallAfter(wx.CallAfter, self._stop_editing)
def _stop_editing(self):
control = self._tree.GetEditControl()
if control and wx.Window.FindFocus():
control.StopEditing()
def OnDelete(self, event):
editor = self._tree.GetEditControl()
if editor and wx.Window.FindFocus() == editor:
start, end = editor.GetSelection()
editor.Remove(start, max(end, start + 1))
def OnLeftDown(self, event):
# See http://code.google.com/p/robotframework-ride/issues/detail?id=756
if IS_WINDOWS and self._editing_label:
# This method works only on Windows, luckily the issue 756 exists
# only on Windows
self._tree.OnCancelEdit(self._tree.GetSelection())
event.Skip()
def _get_handler(self, item=None):
return self._tree._get_handler(item)
| 39.390901 | 101 | 0.656138 |
d9e348fcb464f510f3cae88ca287ea89386c7f17 | 11,870 | py | Python | test/mutable_edge_test.py | zli117/Evolution | b5be1552338fa57b9a3e4743c8e917e30d2caada | [
"MIT"
] | 4 | 2019-07-02T21:52:30.000Z | 2021-10-31T19:39:21.000Z | test/mutable_edge_test.py | zli117/Evolution | b5be1552338fa57b9a3e4743c8e917e30d2caada | [
"MIT"
] | null | null | null | test/mutable_edge_test.py | zli117/Evolution | b5be1552338fa57b9a3e4743c8e917e30d2caada | [
"MIT"
] | 1 | 2020-01-25T22:29:49.000Z | 2020-01-25T22:29:49.000Z | from typing import cast
import tensorflow as tf
from evolution.encoding.base import Edge
from evolution.encoding.base import IdentityOperation
from evolution.encoding.base import MaxPool2D
from evolution.encoding.base import PointConv2D
from evolution.encoding.base import Vertex
from evolution.encoding.mutable_edge import MutableEdge
def test_complex_op_creation():
complex_operation = MutableEdge((PointConv2D((1, 4)),))
assert len(complex_operation.available_operations) == 1
assert len(complex_operation.vertices_topo_order) == 2
assert (complex_operation.vertices_topo_order[0]
is complex_operation.output_vertex)
assert (complex_operation.vertices_topo_order[1]
is complex_operation.input_vertex)
def test_sort_vertices(basic_graph):
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph
complex_operation.sort_vertices()
assert len(complex_operation.vertices_topo_order) == 4
assert (complex_operation.vertices_topo_order[0]
is complex_operation.output_vertex)
assert (complex_operation.vertices_topo_order[1] is vertex2)
assert (complex_operation.vertices_topo_order[2] is vertex1)
assert (complex_operation.vertices_topo_order[3]
is complex_operation.input_vertex)
assert len(complex_operation.input_vertex.out_bound_edges) == 2
edge8 = IdentityOperation()
vertex2.out_bound_edges.append(edge8)
edge8.end_vertex = vertex3
complex_operation.sort_vertices()
assert vertex3 in complex_operation.vertices_topo_order
assert complex_operation.output_vertex.order == 0
assert vertex3.order == 1
assert vertex2.order == 2
assert vertex1.order == 3
assert complex_operation.input_vertex.order == 4
assert len(complex_operation.output_vertex.out_bound_edges) == 0
assert len(complex_operation.input_vertex.out_bound_edges) == 2
assert len(vertex1.out_bound_edges) == 2
assert len(vertex2.out_bound_edges) == 2
assert len(vertex3.out_bound_edges) == 1
def test_add_edge1(basic_graph, mocker):
# Make sure there's no cycle
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph
def mock(*args, **kwargs):
if kwargs['size'] == 2:
return [complex_operation.output_vertex,
complex_operation.input_vertex]
if kwargs['size'] == 1:
assert isinstance(args[0][0], PointConv2D)
return [MaxPool2D()]
mocker.patch('numpy.random.choice', side_effect=mock)
complex_operation.mutation_add_edge()
assert len(complex_operation.vertices_topo_order) == 4
for vertex in complex_operation.vertices_topo_order:
order = vertex.order
for edge in vertex.out_bound_edges:
if order < edge.end_vertex.order:
# Edge in back direction => Circle
assert False
for edge in complex_operation.input_vertex.out_bound_edges:
if edge.end_vertex is complex_operation.output_vertex:
print(type(edge), type(edge.end_vertex))
assert isinstance(edge, MaxPool2D)
break
else:
# The new edge is not there
assert False
def test_add_edge2(basic_graph, mocker):
# Make sure it won't break if there are multiple edges between two vertices
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph
complex_operation.sort_vertices()
def mock(*args, **kwargs):
if kwargs['size'] == 2:
return [vertex1, vertex2]
if kwargs['size'] == 1:
assert isinstance(args[0][0], PointConv2D)
return [MaxPool2D()]
mocker.patch('numpy.random.choice', side_effect=mock)
complex_operation.mutation_add_edge()
assert len(vertex1.out_bound_edges) == 3
to_vertex2_count = 0
for edge in vertex1.out_bound_edges:
if edge.end_vertex is vertex2:
to_vertex2_count += 1
assert to_vertex2_count == 2
class MockEdge(Edge):
def __init__(self, mutated: bool = True):
super().__init__()
self.mutated = mutated
self.deep_copy_count = 0
def mutate(self) -> bool:
return self.mutated
def build(self, x: tf.Tensor) -> tf.Tensor:
return x
def invalidate_layer_count(self) -> None:
pass
@property
def level(self) -> int:
return 1
def deep_copy(self) -> Edge:
self.deep_copy_count += 1
return self
def test_mutate_edge(basic_graph, mocker):
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph
edge_to_replace = MaxPool2D()
complex_operation.input_vertex.out_bound_edges.append(edge_to_replace)
edge_to_replace.end_vertex = complex_operation.output_vertex
new_edge = MockEdge()
complex_operation.sort_vertices()
def mock(*args, **_):
if isinstance(args[0][0], Vertex):
return [complex_operation.input_vertex]
if edge_to_replace in args[0]:
return [edge_to_replace]
else:
return [new_edge]
mocker.patch('numpy.random.choice', side_effect=mock)
before_out_edges = list(complex_operation.input_vertex.out_bound_edges)
before_out_edges.remove(edge_to_replace)
complex_operation.mutation_mutate_edge()
assert edge_to_replace.end_vertex is None
assert new_edge in complex_operation.input_vertex.out_bound_edges
assert new_edge.deep_copy_count == 1
assert (len(complex_operation.input_vertex.out_bound_edges)
== len(before_out_edges) + 1)
# Everything before not mutated is still there
for edge in before_out_edges:
assert edge in complex_operation.input_vertex.out_bound_edges
def test_remove_edge_fail1():
complex_operation = MutableEdge((PointConv2D((1, 4)), MaxPool2D()))
assert not complex_operation.mutation_remove_edge()
def test_remove_edge_fail2():
complex_operation = MutableEdge((PointConv2D((1, 4)), MaxPool2D()))
edge1 = IdentityOperation()
edge2 = IdentityOperation()
complex_operation.input_vertex.out_bound_edges.clear()
complex_operation.input_vertex.out_bound_edges.append(edge1)
middle_vertex = Vertex()
complex_operation.vertices_topo_order.append(middle_vertex)
edge1.end_vertex = middle_vertex
middle_vertex.out_bound_edges.append(edge2)
edge2.end_vertex = complex_operation.output_vertex
assert not complex_operation.mutation_remove_edge()
def test_remove_edge_success():
complex_operation = MutableEdge((PointConv2D((1, 4)), MaxPool2D()))
edge1 = IdentityOperation()
edge2 = IdentityOperation()
complex_operation.input_vertex.out_bound_edges.clear()
complex_operation.input_vertex.out_bound_edges.append(edge1)
middle_vertex = Vertex()
complex_operation.vertices_topo_order.append(middle_vertex)
edge1.end_vertex = middle_vertex
middle_vertex.out_bound_edges.append(edge2)
edge2.end_vertex = complex_operation.output_vertex
# Edge from input to output. So now we can remove one edge
edge3 = IdentityOperation()
complex_operation.input_vertex.out_bound_edges.append(edge3)
edge3.end_vertex = complex_operation.output_vertex
assert complex_operation.mutation_remove_edge()
assert len(complex_operation.input_vertex.out_bound_edges) == 1
def test_mutation_add_node(basic_graph_no_v12, mocker):
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph_no_v12
complex_operation.sort_vertices()
v1_order = vertex1.order
v2_order = vertex2.order
# Artificially make v2 lower order than v1 since they are parallel. The
# order could be arbitrary
vertex2.order = min(v1_order, v2_order)
vertex1.order = max(v1_order, v2_order)
edge1 = MaxPool2D()
edge2 = MockEdge()
def mock(*args, **_):
if isinstance(args[0][0], Vertex):
return [vertex1, vertex2]
if isinstance(args[0][0], Edge):
return [edge1, edge2]
mocker.patch('numpy.random.choice', side_effect=mock)
complex_operation.mutation_add_vertex()
assert edge2.end_vertex is vertex2
assert edge2.deep_copy_count == 1
assert vertex2.order < vertex1.order
def test_mutation_add_node_max_vertex():
complex_operation = MutableEdge((PointConv2D((1, 4)),), max_vertices=2)
assert not complex_operation.mutation_add_vertex()
complex_operation = MutableEdge((PointConv2D((1, 4)),), max_vertices=3)
assert complex_operation.mutation_add_vertex()
assert len(complex_operation.vertices_topo_order) == 3
assert not complex_operation.mutation_add_vertex()
def test_remove_node_success(basic_graph_no_v12, mocker):
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph_no_v12
vertex = Vertex()
edge1 = IdentityOperation()
edge2 = IdentityOperation()
vertex1.out_bound_edges.append(edge1)
edge1.end_vertex = vertex
vertex.out_bound_edges.append(edge2)
edge2.end_vertex = vertex2
complex_operation.sort_vertices()
mocker.patch('numpy.random.permutation', return_value=[vertex, vertex2])
assert vertex in complex_operation.vertices_topo_order
assert complex_operation.mutation_remove_vertex()
assert vertex2 in complex_operation.vertices_topo_order
assert vertex not in complex_operation.vertices_topo_order
assert len(vertex1.out_bound_edges) == 1
def test_remove_node_fail():
complex_operation = MutableEdge((PointConv2D((1, 4)),))
assert not complex_operation.mutation_remove_vertex()
complex_operation.input_vertex.out_bound_edges.clear()
vertex1 = Vertex()
vertex2 = Vertex()
edge1 = IdentityOperation()
edge2 = IdentityOperation()
edge3 = IdentityOperation()
complex_operation.input_vertex.out_bound_edges.append(edge1)
edge1.end_vertex = vertex1
vertex1.out_bound_edges.append(edge2)
edge2.end_vertex = vertex2
vertex2.out_bound_edges.append(edge3)
edge3.end_vertex = complex_operation.output_vertex
complex_operation.sort_vertices()
assert len(complex_operation.vertices_topo_order) == 4
assert not complex_operation.mutation_remove_vertex()
def test_max_vertices():
try:
MutableEdge((PointConv2D((1, 4)),), max_vertices=1)
assert False
except RuntimeError:
pass
try:
MutableEdge((PointConv2D((1, 4)),), max_vertices=2)
except RuntimeError:
assert False
def test_deep_copy(basic_graph):
complex_operation, vertex1, vertex2, vertex3, vertex4 = basic_graph
higher_level = MutableEdge((complex_operation,),
initialize_with_identity=False,
max_vertices=10)
higher_level_copy = cast(MutableEdge, higher_level.deep_copy())
assert higher_level_copy.max_vertices == 10
complex_edge = cast(MutableEdge,
higher_level.input_vertex.out_bound_edges[0])
complex_edge_copy = cast(MutableEdge,
higher_level_copy.input_vertex.out_bound_edges[0])
assert (len(complex_edge.vertices_topo_order)
== len(complex_edge_copy.vertices_topo_order))
for i, vertex in enumerate(complex_edge.vertices_topo_order):
for j, edge in enumerate(vertex.out_bound_edges):
copy_vertex = complex_edge_copy.vertices_topo_order[i]
copy_edge = copy_vertex.out_bound_edges[j]
assert copy_edge.end_vertex.order == edge.end_vertex.order
assert copy_vertex is not vertex
assert copy_edge is not edge
assert (len(higher_level_copy.available_operations)
== len(higher_level.available_operations))
for i in range(len(higher_level_copy.available_operations)):
assert (higher_level_copy.available_operations[i]
is not higher_level.available_operations[i])
| 34.306358 | 79 | 0.71904 |
545a5fee7760a84ab2c40761aa0655d0bbcc471b | 1,356 | py | Python | AnalizeBonusViagem/Mundo3/aula17listas.py | D-Wolter/Pycharm-Projects | aeb3a2464cf6c6578e5b47b13b54ff3ebd6e52dd | [
"MIT"
] | null | null | null | AnalizeBonusViagem/Mundo3/aula17listas.py | D-Wolter/Pycharm-Projects | aeb3a2464cf6c6578e5b47b13b54ff3ebd6e52dd | [
"MIT"
] | null | null | null | AnalizeBonusViagem/Mundo3/aula17listas.py | D-Wolter/Pycharm-Projects | aeb3a2464cf6c6578e5b47b13b54ff3ebd6e52dd | [
"MIT"
] | null | null | null |
#adicionar
lanche = ['batata','coca','hamburger']
lanche.append('nugets')#adiciona ao fim
print(lanche)#['batata', 'coca', 'hamburger', 'nugets']
lanche.insert(0,'bigMAc')#adiciona no numero do index desejado
print(lanche)#['bigMAc', 'batata', 'coca', 'hamburger', 'nugets']
#apagar remover
del lanche[3]#indica numero index
print(lanche)#['bigMAc', 'batata', 'coca', 'nugets']
lanche.pop(3)#indica numero index (vazio deleta o ultimo
print(lanche)#['bigMAc', 'batata', 'coca']
lanche.remove('coca')#indica valor literario
print(lanche)#['bigMAc', 'batata']
if 'batata' in lanche:
lanche.remove('batata')
print(lanche)#['bigMAc']
valores = list(range(4, 11))#criar uma lista do 4 ate o 10
print(valores)#[4, 5, 6, 7, 8, 9, 10]
valores1 = [8,4,6,3,7,1,5]
print(valores1)#[8, 4, 6, 3, 7, 1, 5]
valores1.sort()
print(valores1)#[1, 3, 4, 5, 6, 7, 8]
valores1.sort(reverse=True)
print(valores1)#[8, 7, 6, 5, 4, 3, 1]
print(len(valores1))#7
num = [2,5,9,1]
copia = num[:]
num[2] = 3
num.append(7)
#NUM.POP(2) DELETA INDEX 2
num.insert(2, 2)
print(copia)
if 5 in num:
num.remove(5)
else:
print('nao achei o numero 5')
print(num)
print(f'Essa lista tem {len(num)} elementos.')
numeros = []
numeros.append(5)
numeros.append(9)
numeros.append(4)
for cont, vlr in enumerate(numeros):
print(f'na posiçao {cont+1} encontrei o valor {vlr}!') | 23.789474 | 65 | 0.667404 |
e6555fb3c7703dba771283562f37c0d6e2a391ae | 1,610 | py | Python | mplwidget.py | ryanjphelan/ENGG4801_RyanPhelan | 473c5e834cfee8c921722c4d78292b34c76c6514 | [
"MIT"
] | null | null | null | mplwidget.py | ryanjphelan/ENGG4801_RyanPhelan | 473c5e834cfee8c921722c4d78292b34c76c6514 | [
"MIT"
] | null | null | null | mplwidget.py | ryanjphelan/ENGG4801_RyanPhelan | 473c5e834cfee8c921722c4d78292b34c76c6514 | [
"MIT"
] | null | null | null | # ------------------------------------------------- -----
# -------------------- mplwidget.py --------------------
# -------------------------------------------------- ----
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtGui, QtWidgets, uic
from PyQt5.QtWidgets import QInputDialog, QLineEdit, QFileDialog, QGridLayout
from PyQt5.QtGui import QIcon
from matplotlib.backends.backend_qt5agg import FigureCanvas
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import ( NavigationToolbar2QT as NavigationToolbar )
class MplWidget(QWidget):
def __init__(self, parent = None):
QWidget.__init__(self, parent)
self.canvas = FigureCanvas(Figure(constrained_layout=True))
self.canvas.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.vertical_layout = QVBoxLayout()
self.vertical_layout.addStretch(1)
#self.canvas.axes = self.canvas.figure.add_subplot(111)
self.vertical_layout.addWidget(self.canvas, QtCore.Qt.AlignTop)
self.k = None
self.toolbar = NavigationToolbar(self.canvas, self)
self.vertical_layout.addWidget(self.toolbar, stretch=0)
self.setLayout(self.vertical_layout)
#self.addToolBar(NavigationToolbar(self.MplWidget.canvas, self))
def setAxes(self, k):
self.k = k
self.canvas.axes = self.canvas.figure.add_subplot(k, 1, 1)
return self.canvas.axes
def getAxes(self, pos):
self.canvas.axes = self.canvas.figure.add_subplot(self.k, 1, pos)
return self.canvas.axes
def getOnlyAxes(self):
return self.canvas.axes
def clearAxes(self):
self.canvas.axes = None | 35 | 97 | 0.684472 |
86fd11b7a034e819057deb3cd0d3d0f1e1bd538e | 4,278 | py | Python | test/functional/interface_wescoin_cli.py | ahamium/WESCOIN | a15d64caa24dec050f997fe2031d518ee1d76836 | [
"MIT"
] | null | null | null | test/functional/interface_wescoin_cli.py | ahamium/WESCOIN | a15d64caa24dec050f997fe2031d518ee1d76836 | [
"MIT"
] | null | null | null | test/functional/interface_wescoin_cli.py | ahamium/WESCOIN | a15d64caa24dec050f997fe2031d518ee1d76836 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wescoin-cli"""
from test_framework.test_framework import WescoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestWescoinCli(WescoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Wescoin Core RPC client version" in cli_response)
self.log.info("Compare responses from gewalletinfo RPC and `wescoin-cli getwalletinfo`")
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `wescoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `wescoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestWescoinCli().main()
| 54.846154 | 160 | 0.705704 |
1d6f9a823384d2ce692cf5cbd6a5ecd95a429ecd | 34,955 | py | Python | haproxy/datadog_checks/haproxy/haproxy.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | haproxy/datadog_checks/haproxy/haproxy.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | haproxy/datadog_checks/haproxy/haproxy.py | remicalixte/integrations-core | b115e18c52820fe1a92495f538fdc14ddf83cfe1 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2012-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from __future__ import division
import copy
import re
import socket
import time
from collections import defaultdict, namedtuple
from six import PY2, iteritems
from six.moves.urllib.parse import urlparse
from datadog_checks.base import AgentCheck, is_affirmative, to_string
from datadog_checks.base.errors import CheckException
STATS_URL = "/;csv;norefresh"
EVENT_TYPE = SOURCE_TYPE_NAME = 'haproxy'
BUFSIZE = 8192
class Services(object):
BACKEND = 'BACKEND'
FRONTEND = 'FRONTEND'
ALL = (BACKEND, FRONTEND)
# Statuses that we normalize to and that are reported by
# `haproxy.count_per_status` by default (unless `collate_status_tags_per_host` is enabled)
ALL_STATUSES = ('up', 'open', 'down', 'maint', 'nolb')
AVAILABLE = 'available'
UNAVAILABLE = 'unavailable'
COLLATED_STATUSES = (AVAILABLE, UNAVAILABLE)
BACKEND_STATUS_TO_COLLATED = {'up': AVAILABLE, 'down': UNAVAILABLE, 'maint': UNAVAILABLE, 'nolb': UNAVAILABLE}
STATUS_TO_COLLATED = {
'up': AVAILABLE,
'open': AVAILABLE,
'down': UNAVAILABLE,
'maint': UNAVAILABLE,
'nolb': UNAVAILABLE,
}
STATUS_TO_SERVICE_CHECK = {
'up': AgentCheck.OK,
'down': AgentCheck.CRITICAL,
'no_check': AgentCheck.UNKNOWN,
'maint': AgentCheck.OK,
}
class StickTable(namedtuple("StickTable", ["name", "type", "size", "used"])):
SHOWTABLE_RE = re.compile(
r"# table: (?P<name>[^ ,]+), type: (?P<type>[^ ,]+), size:(?P<size>[0-9]+), used:(?P<used>[0-9]+)$"
)
@classmethod
def parse(cls, line):
items = cls.SHOWTABLE_RE.match(line)
if not items:
return None
return StickTable(
name=items.group('name'),
type=items.group('type'),
size=int(items.group('size')),
used=int(items.group('used')),
)
class HAProxy(AgentCheck):
def __init__(self, name, init_config, instances):
super(HAProxy, self).__init__(name, init_config, instances)
# Host status needs to persist across all checks.
# We'll create keys when they are referenced. See:
# https://en.wikipedia.org/wiki/Autovivification
# https://gist.github.com/hrldcpr/2012250
self.host_status = defaultdict(lambda: defaultdict(lambda: None))
METRICS = {
"qcur": ("gauge", "queue.current"),
"scur": ("gauge", "session.current"),
"slim": ("gauge", "session.limit"),
"spct": ("gauge", "session.pct"), # Calculated as: (scur/slim)*100
"stot": ("rate", "session.rate"),
"bin": ("rate", "bytes.in_rate"),
"bout": ("rate", "bytes.out_rate"),
"dreq": ("rate", "denied.req_rate"),
"dresp": ("rate", "denied.resp_rate"),
"ereq": ("rate", "errors.req_rate"),
"econ": ("rate", "errors.con_rate"),
"eresp": ("rate", "errors.resp_rate"),
"wretr": ("rate", "warnings.retr_rate"),
"wredis": ("rate", "warnings.redis_rate"),
"lastchg": ("gauge", "uptime"),
"req_rate": ("gauge", "requests.rate"), # HA Proxy 1.4 and higher
"req_tot": ("rate", "requests.tot_rate"), # HA Proxy 1.4 and higher
"hrsp_1xx": ("rate", "response.1xx"), # HA Proxy 1.4 and higher
"hrsp_2xx": ("rate", "response.2xx"), # HA Proxy 1.4 and higher
"hrsp_3xx": ("rate", "response.3xx"), # HA Proxy 1.4 and higher
"hrsp_4xx": ("rate", "response.4xx"), # HA Proxy 1.4 and higher
"hrsp_5xx": ("rate", "response.5xx"), # HA Proxy 1.4 and higher
"hrsp_other": ("rate", "response.other"), # HA Proxy 1.4 and higher
"qtime": ("gauge", "queue.time"), # HA Proxy 1.5 and higher
"ctime": ("gauge", "connect.time"), # HA Proxy 1.5 and higher
"rtime": ("gauge", "response.time"), # HA Proxy 1.5 and higher
"ttime": ("gauge", "session.time"), # HA Proxy 1.5 and higher
"conn_rate": ("gauge", "connections.rate"), # HA Proxy 1.7 and higher
"conn_tot": ("rate", "connections.tot_rate"), # HA Proxy 1.7 and higher
"intercepted": ("rate", "requests.intercepted"), # HA Proxy 1.7 and higher
}
SERVICE_CHECK_NAME = 'haproxy.backend_up'
HTTP_CONFIG_REMAPPER = {'disable_ssl_validation': {'name': 'tls_verify', 'invert': True, 'default': False}}
def check(self, instance):
url = instance.get('url')
self.log.debug('Processing HAProxy data for %s', url)
parsed_url = urlparse(url)
tables = None
if parsed_url.scheme == 'unix' or parsed_url.scheme == 'tcp':
info, data, tables = self._fetch_socket_data(parsed_url)
self._set_version_metadata(self._collect_version_from_socket(info))
uptime = self._collect_uptime_from_socket(info)
else:
try:
uptime = self._collect_info_from_http(url)
except Exception as e:
self.log.warning("Couldn't collect version or uptime information: %s", e)
uptime = None
data = self._fetch_url_data(url)
collect_aggregates_only = instance.get('collect_aggregates_only', True)
collect_status_metrics = is_affirmative(instance.get('collect_status_metrics', False))
collect_status_metrics_by_host = is_affirmative(instance.get('collect_status_metrics_by_host', False))
collate_status_tags_per_host = is_affirmative(instance.get('collate_status_tags_per_host', False))
count_status_by_service = is_affirmative(instance.get('count_status_by_service', True))
tag_service_check_by_host = is_affirmative(instance.get('tag_service_check_by_host', False))
enable_service_check = is_affirmative(instance.get('enable_service_check', False))
startup_grace_period = float(instance.get('startup_grace_seconds', 0))
services_incl_filter = instance.get('services_include', [])
services_excl_filter = instance.get('services_exclude', [])
tags_regex = instance.get('tags_regex', None)
custom_tags = instance.get('tags', [])
active_tag_bool = instance.get('active_tag', False)
active_tag = []
if active_tag_bool:
active_tag.append("active:%s" % ('true' if 'act' in data else 'false'))
process_events = instance.get('status_check', self.init_config.get('status_check', False))
if uptime is not None and uptime < startup_grace_period:
return
if tables:
self._process_stick_table_metrics(
tables,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=custom_tags,
)
self._process_data(
data,
collect_aggregates_only,
process_events,
url=url,
collect_status_metrics=collect_status_metrics,
collect_status_metrics_by_host=collect_status_metrics_by_host,
tag_service_check_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
collate_status_tags_per_host=collate_status_tags_per_host,
count_status_by_service=count_status_by_service,
custom_tags=custom_tags,
tags_regex=tags_regex,
active_tag=active_tag,
enable_service_check=enable_service_check,
)
def _fetch_url_data(self, url):
''' Hit a given http url and return the stats lines '''
# Try to fetch data from the stats URL
url = "%s%s" % (url, STATS_URL)
self.log.debug("Fetching haproxy stats from url: %s", url)
response = self.http.get(url)
response.raise_for_status()
return self._decode_response(response)
def _decode_response(self, response):
# it only needs additional decoding in py3, so skip it if it's py2
if PY2:
return response.content.splitlines()
else:
content = response.content
# If the content is a string, it can't be decoded again
# But if it's bytes, it can be decoded.
# So, check if it has the decode method
decode_fn = getattr(content, "decode", None)
if callable(decode_fn):
content = content.decode('utf-8')
return content.splitlines()
UPTIME_PARSER = re.compile(r"(?P<days>\d+)d (?P<hours>\d+)h(?P<minutes>\d+)m(?P<seconds>\d+)s")
@classmethod
def _parse_uptime(cls, uptime):
matched_uptime = re.search(cls.UPTIME_PARSER, uptime)
return (
int(matched_uptime.group('days')) * 86400
+ int(matched_uptime.group('hours')) * 3600
+ int(matched_uptime.group('minutes')) * 60
+ int(matched_uptime.group('seconds'))
)
def _collect_info_from_http(self, url):
# the csv format does not offer version info, therefore we need to get the HTML page
self.log.debug("collecting version info for HAProxy from %s", url)
r = self.http.get(url)
r.raise_for_status()
raw_version = ""
raw_uptime = ""
uptime = None
for line in self._decode_response(r):
if "HAProxy version" in line:
raw_version = line
if "uptime = " in line:
raw_uptime = line
if raw_uptime and raw_version:
break
if raw_version == "":
self.log.debug("unable to find HAProxy version info")
else:
version = re.search(r"HAProxy version ([^,]+)", raw_version).group(1)
self.log.debug("HAProxy version is %s", version)
self.set_metadata('version', version)
if raw_uptime == "":
self.log.debug("unable to find HAProxy uptime")
else:
# It is not documented whether this output format is under any
# compatibility guarantee, but it hasn't yet changed since it was
# introduced
uptime = self._parse_uptime(raw_uptime)
return uptime
def _run_socket_commands(self, parsed_url, commands):
if parsed_url.scheme == 'tcp':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
splitted_loc = parsed_url.netloc.split(':')
host = splitted_loc[0]
port = int(splitted_loc[1])
sock.connect((host, port))
else:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(parsed_url.path)
sock.send(b';'.join(commands) + b"\r\n")
response = ""
output = sock.recv(BUFSIZE)
while output:
response += output.decode("ASCII")
output = sock.recv(BUFSIZE)
sock.close()
responses = response.split('\n\n')
if len(responses) != len(commands) + 1 or responses[len(responses) - 1] != '':
raise CheckException("Got a different number of responses than expected")
return tuple(r.splitlines() for r in responses[: len(commands)])
def _fetch_socket_data(self, parsed_url):
''' Hit a given stats socket and return the stats lines '''
self.log.debug("Fetching haproxy stats from socket: %s", parsed_url.geturl())
info, stat = self._run_socket_commands(parsed_url, (b"show info", b"show stat"))
# the "show table" command was introduced in 1.5. Sending "show table"
# to a haproxy <1.5 results in no output at all even when multiple
# commands were sent, so we have to check the version and only send the
# command when supported
tables = []
try:
raw_version = self._collect_version_from_socket(info)
haproxy_major_version = tuple(int(vernum) for vernum in raw_version.split('.')[:2])
if len(haproxy_major_version) == 2 and haproxy_major_version >= (1, 5):
(tables,) = self._run_socket_commands(parsed_url, (b"show table",))
except (IndexError, ValueError) as e:
self.log.error("Could not parse version number '%s': %s", raw_version, e)
pass
return info, stat, tables
def _collect_version_from_socket(self, info):
for line in info:
key, value = line.split(':')
if key == 'Version':
return value
return ''
def _set_version_metadata(self, version):
if not version:
self.log.debug("unable to collect version info from socket")
else:
self.log.debug("HAProxy version is %s", version)
self.set_metadata('version', version)
def _collect_uptime_from_socket(self, info):
for line in info:
key, value = line.split(':')
if key == 'Uptime_sec':
return int(value)
def _process_data(
self,
data,
collect_aggregates_only,
process_events,
url=None,
collect_status_metrics=False,
collect_status_metrics_by_host=False,
tag_service_check_by_host=False,
services_incl_filter=None,
services_excl_filter=None,
collate_status_tags_per_host=False,
count_status_by_service=True,
custom_tags=None,
tags_regex=None,
active_tag=None,
enable_service_check=False,
):
''' Main data-processing loop. For each piece of useful data, we'll
either save a metric, save an event or both. '''
# Split the first line into an index of fields
# The line looks like (broken up onto multiple lines)
# "# pxname,svname,qcur,qmax,scur,smax,slim,
# stot,bin,bout,dreq,dresp,ereq,econ,eresp,wretr,
# wredis,status,weight,act,bck,chkfail,chkdown,lastchg,
# downtime,qlimit,pid,iid,sid,throttle,lbtot,tracked,
# type,rate,rate_lim,rate_max,"
fields = []
for f in data[0].split(','):
if f:
f = f.replace('# ', '')
fields.append(f.strip())
self.hosts_statuses = defaultdict(int)
back_or_front = None
# Sanitize CSV, handle line breaks
data = self._sanitize_lines(data)
custom_tags = [] if custom_tags is None else custom_tags
active_tag = [] if active_tag is None else active_tag
# First initialize here so that it is defined whether or not we enter the for loop
line_tags = list(custom_tags)
# Skip the first line, go backwards to set back_or_front
for line in data[:0:-1]:
if not line.strip():
continue
# Store each line's values in a dictionary
data_dict = self._line_to_dict(fields, line)
if self._is_aggregate(data_dict):
back_or_front = data_dict['svname']
self._update_data_dict(data_dict, back_or_front)
self._update_hosts_statuses_if_needed(
collect_status_metrics, collect_status_metrics_by_host, data_dict, self.hosts_statuses
)
# Clone the list to avoid extending the original
# which would carry over previous iteration tags
line_tags = list(custom_tags)
regex_tags = self._tag_from_regex(tags_regex, data_dict['pxname'])
if regex_tags:
line_tags.extend(regex_tags)
if self._should_process(data_dict, collect_aggregates_only):
# update status
# Send the list of data to the metric and event callbacks
self._process_metrics(
data_dict,
url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=line_tags,
active_tag=active_tag,
)
if process_events:
self._process_event(
data_dict,
url,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=line_tags,
)
if enable_service_check:
self._process_service_check(
data_dict,
url,
tag_by_host=tag_service_check_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=line_tags,
)
if collect_status_metrics:
self._process_status_metric(
self.hosts_statuses,
collect_status_metrics_by_host,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
collate_status_tags_per_host=collate_status_tags_per_host,
count_status_by_service=count_status_by_service,
custom_tags=line_tags,
active_tag=active_tag,
)
self._process_backend_hosts_metric(
self.hosts_statuses,
services_incl_filter=services_incl_filter,
services_excl_filter=services_excl_filter,
custom_tags=line_tags,
active_tag=active_tag,
)
return data
def _sanitize_lines(self, data):
sanitized = []
def char_count(line, char):
count = 0
for c in line:
if c is char:
count += 1
return count
clean = ''
double_quotes = 0
for line in data:
double_quotes += char_count(line, '"')
clean += line
if double_quotes % 2 == 0:
sanitized.append(clean.replace('\n', '').replace('\r', ''))
double_quotes = 0
clean = ''
return sanitized
def _line_to_dict(self, fields, line):
data_dict = {}
values = line.split(',')
if len(values) > len(fields):
values = self._gather_quoted_values(values)
for i, val in enumerate(values):
if val:
try:
# Try converting to a long, if failure, just leave it
val = float(val)
except Exception:
pass
data_dict[fields[i]] = val
if 'status' in data_dict:
data_dict['status'] = self._normalize_status(data_dict['status'])
return data_dict
def _gather_quoted_values(self, values):
gathered_values = []
previous = ''
for val in values:
if val.startswith('"') and not val.endswith('"'):
previous = val
elif previous:
if val.endswith('"'):
gathered_values.append(previous + val)
previous = ''
else:
previous += val
else:
gathered_values.append(val)
return gathered_values
def _update_data_dict(self, data_dict, back_or_front):
"""
Adds spct if relevant, adds service
"""
data_dict['back_or_front'] = back_or_front
# The percentage of used sessions based on 'scur' and 'slim'
if 'slim' in data_dict and 'scur' in data_dict:
try:
data_dict['spct'] = (data_dict['scur'] / data_dict['slim']) * 100
except (TypeError, ZeroDivisionError):
pass
def _is_aggregate(self, data_dict):
return data_dict['svname'] in Services.ALL
def _update_hosts_statuses_if_needed(
self, collect_status_metrics, collect_status_metrics_by_host, data_dict, hosts_statuses
):
if data_dict['svname'] == Services.BACKEND:
return
if collect_status_metrics and 'status' in data_dict and 'pxname' in data_dict:
if collect_status_metrics_by_host and 'svname' in data_dict:
key = (data_dict['pxname'], data_dict['back_or_front'], data_dict['svname'], data_dict['status'])
else:
key = (data_dict['pxname'], data_dict['back_or_front'], data_dict['status'])
hosts_statuses[key] += 1
def _should_process(self, data_dict, collect_aggregates_only):
"""if collect_aggregates_only, we process only the aggregates
"""
if is_affirmative(collect_aggregates_only):
return self._is_aggregate(data_dict)
elif str(collect_aggregates_only).lower() == 'both':
return True
return data_dict['svname'] != Services.BACKEND
def _is_service_excl_filtered(self, service_name, services_incl_filter, services_excl_filter):
if self._tag_match_patterns(service_name, services_excl_filter):
if self._tag_match_patterns(service_name, services_incl_filter):
return False
return True
return False
def _tag_match_patterns(self, tag, filters):
if not filters:
return False
for rule in filters:
if re.search(rule, tag):
return True
return False
def _tag_from_regex(self, tags_regex, service_name):
"""
Use a named regexp on the current service_name to create extra tags
Example HAProxy service name: be_edge_http_sre-prod_elk
Example named regexp: be_edge_http_(?P<team>[a-z]+)\\-(?P<env>[a-z]+)_(?P<app>.*)
Resulting tags: ['team:sre','env:prod','app:elk']
"""
if not tags_regex or not service_name:
return []
match = re.compile(tags_regex).match(service_name)
if not match:
return []
# match.groupdict() returns tags dictionary in the form of {'name': 'value'}
# convert it to Datadog tag LIST: ['name:value']
return ["%s:%s" % (name, value) for name, value in iteritems(match.groupdict())]
@staticmethod
def _normalize_status(status):
"""
Try to normalize the HAProxy status as one of the statuses defined in `ALL_STATUSES`,
if it can't be matched return the status as-is in a tag-friendly format
ex: 'UP 1/2' -> 'up'
'no check' -> 'no_check'
"""
formatted_status = status.lower().replace(" ", "_")
for normalized_status in Services.ALL_STATUSES:
if formatted_status.startswith(normalized_status):
return normalized_status
return formatted_status
def _process_backend_hosts_metric(
self, hosts_statuses, services_incl_filter=None, services_excl_filter=None, custom_tags=None, active_tag=None
):
agg_statuses = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES})
custom_tags = [] if custom_tags is None else custom_tags
active_tag = [] if active_tag is None else active_tag
for host_status, count in iteritems(hosts_statuses):
try:
service, back_or_front, hostname, status = host_status
except Exception:
service, back_or_front, status = host_status
if back_or_front == 'FRONTEND':
continue
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
collated_status = Services.BACKEND_STATUS_TO_COLLATED.get(status)
if collated_status:
agg_statuses[service][collated_status] += count
else:
# create the entries for this service anyway
agg_statuses[service]
for service in agg_statuses:
tags = ['haproxy_service:%s' % service]
tags.extend(custom_tags)
tags.extend(active_tag)
self._handle_legacy_service_tag(tags, service)
self.gauge(
'haproxy.backend_hosts', agg_statuses[service][Services.AVAILABLE], tags=tags + ['available:true']
)
self.gauge(
'haproxy.backend_hosts', agg_statuses[service][Services.UNAVAILABLE], tags=tags + ['available:false']
)
return agg_statuses
def _process_status_metric(
self,
hosts_statuses,
collect_status_metrics_by_host,
services_incl_filter=None,
services_excl_filter=None,
collate_status_tags_per_host=False,
count_status_by_service=True,
custom_tags=None,
active_tag=None,
):
agg_statuses_counter = defaultdict(lambda: {status: 0 for status in Services.COLLATED_STATUSES})
custom_tags = [] if custom_tags is None else custom_tags
active_tag = [] if active_tag is None else active_tag
# Initialize `statuses_counter`: every value is a defaultdict initialized with the correct
# keys, which depends on the `collate_status_tags_per_host` option
reported_statuses = Services.ALL_STATUSES
if collate_status_tags_per_host:
reported_statuses = Services.COLLATED_STATUSES
reported_statuses_dict = defaultdict(int)
for reported_status in reported_statuses:
reported_statuses_dict[reported_status] = 0
statuses_counter = defaultdict(lambda: copy.copy(reported_statuses_dict))
for host_status, count in iteritems(hosts_statuses):
hostname = None
try:
service, _, hostname, status = host_status
except Exception:
service, _, status = host_status
if collect_status_metrics_by_host:
self.warning(
'`collect_status_metrics_by_host` is enabled but no host info could be extracted from HAProxy '
'stats endpoint for %s',
service,
)
if self._is_service_excl_filtered(service, services_incl_filter, services_excl_filter):
continue
tags = []
if count_status_by_service:
tags.append('haproxy_service:%s' % service)
self._handle_legacy_service_tag(tags, service)
if hostname:
tags.append('backend:%s' % hostname)
tags.extend(custom_tags)
tags.extend(active_tag)
counter_status = status
if collate_status_tags_per_host:
# An unknown status will be sent as UNAVAILABLE
counter_status = Services.STATUS_TO_COLLATED.get(status, Services.UNAVAILABLE)
statuses_counter[tuple(tags)][counter_status] += count
# Compute aggregates with collated statuses. If collate_status_tags_per_host is enabled we
# already send collated statuses with fine-grained tags, so no need to compute/send these aggregates
if not collate_status_tags_per_host:
agg_tags = []
if count_status_by_service:
agg_tags.append('haproxy_service:%s' % service)
self._handle_legacy_service_tag(agg_tags, service)
# An unknown status will be sent as UNAVAILABLE
status_key = Services.STATUS_TO_COLLATED.get(status, Services.UNAVAILABLE)
agg_statuses_counter[tuple(agg_tags)][status_key] += count
for tags, count_per_status in iteritems(statuses_counter):
for status, count in iteritems(count_per_status):
self.gauge('haproxy.count_per_status', count, tags=tags + ('status:%s' % status,))
# Send aggregates
for service_tags, service_agg_statuses in iteritems(agg_statuses_counter):
for status, count in iteritems(service_agg_statuses):
self.gauge("haproxy.count_per_status", count, tags=service_tags + ('status:%s' % status,))
def _process_metrics(
self, data, url, services_incl_filter=None, services_excl_filter=None, custom_tags=None, active_tag=None
):
"""
Data is a dictionary related to one host
(one line) extracted from the csv.
It should look like:
{'pxname':'dogweb', 'svname':'i-4562165', 'scur':'42', ...}
"""
hostname = data['svname']
service_name = data['pxname']
back_or_front = data['back_or_front']
custom_tags = [] if custom_tags is None else custom_tags
active_tag = [] if active_tag is None else active_tag
tags = ["type:%s" % back_or_front, "instance_url:%s" % url, "haproxy_service:%s" % service_name]
tags.extend(custom_tags)
tags.extend(active_tag)
self._handle_legacy_service_tag(tags, service_name)
if self._is_service_excl_filtered(service_name, services_incl_filter, services_excl_filter):
return
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
if data.get('addr'):
tags.append('server_address:{}'.format(data.get('addr')))
for key, value in data.items():
if HAProxy.METRICS.get(key):
suffix = HAProxy.METRICS[key][1]
name = "haproxy.%s.%s" % (back_or_front.lower(), suffix)
try:
if HAProxy.METRICS[key][0] == 'rate':
self.rate(name, float(value), tags=tags)
else:
self.gauge(name, float(value), tags=tags)
except ValueError:
pass
def _process_stick_table_metrics(
self, data, services_incl_filter=None, services_excl_filter=None, custom_tags=None
):
"""
Stick table metrics processing. Two metrics will be created for each stick table (current and max size)
"""
custom_tags = [] if not custom_tags else custom_tags
for line in data:
table = StickTable.parse(line)
if table is None:
continue
if self._is_service_excl_filtered(table.name, services_incl_filter, services_excl_filter):
continue
tags = ["haproxy_service:%s" % table.name, "stick_type:%s" % table.type] + custom_tags
self.gauge("haproxy.sticktable.size", float(table.size), tags=tags)
self.gauge("haproxy.sticktable.used", float(table.used), tags=tags)
def _process_event(self, data, url, services_incl_filter=None, services_excl_filter=None, custom_tags=None):
'''
Main event processing loop. An event will be created for a service
status change.
Service checks on the server side can be used to provide the same functionality
'''
hostname = data['svname']
service_name = data['pxname']
key = "%s:%s" % (hostname, service_name)
status = self.host_status[url][key]
custom_tags = [] if custom_tags is None else custom_tags
if self._is_service_excl_filtered(service_name, services_incl_filter, services_excl_filter):
return
data_status = data['status']
if status is None:
self.host_status[url][key] = data_status
return
if status != data_status and data_status in ('up', 'down'):
# If the status of a host has changed, we trigger an event
try:
lastchg = int(data['lastchg'])
except Exception:
lastchg = 0
# Create the event object
ev = self._create_event(
data_status, hostname, lastchg, service_name, data['back_or_front'], custom_tags=custom_tags
)
self.event(ev)
# Store this host status so we can check against it later
self.host_status[url][key] = data_status
def _create_event(self, status, hostname, lastchg, service_name, back_or_front, custom_tags=None):
custom_tags = [] if custom_tags is None else custom_tags
if status == 'down':
alert_type = "error"
title = "%s reported %s:%s %s" % (self.hostname, service_name, hostname, status.upper())
else:
if status == "up":
alert_type = "success"
else:
alert_type = "info"
title = "%s reported %s:%s back and %s" % (self.hostname, service_name, hostname, status.upper())
tags = ["haproxy_service:%s" % service_name]
if back_or_front == Services.BACKEND:
tags.append('backend:%s' % hostname)
tags.extend(custom_tags)
self._handle_legacy_service_tag(tags, service_name)
return {
'timestamp': int(time.time() - lastchg),
'event_type': EVENT_TYPE,
'host': self.hostname,
'msg_title': title,
'alert_type': alert_type,
"source_type_name": SOURCE_TYPE_NAME,
"event_object": hostname,
"tags": tags,
}
def _process_service_check(
self, data, url, tag_by_host=False, services_incl_filter=None, services_excl_filter=None, custom_tags=None
):
''' Report a service check, tagged by the service and the backend.
Statuses are defined in `STATUS_TO_SERVICE_CHECK` mapping.
'''
custom_tags = [] if custom_tags is None else custom_tags
service_name = data['pxname']
status = data['status']
haproxy_hostname = to_string(self.hostname)
check_hostname = haproxy_hostname if tag_by_host else ''
if self._is_service_excl_filtered(service_name, services_incl_filter, services_excl_filter):
return
if status in Services.STATUS_TO_SERVICE_CHECK:
service_check_tags = ["haproxy_service:%s" % service_name]
service_check_tags.extend(custom_tags)
self._handle_legacy_service_tag(service_check_tags, service_name)
hostname = data['svname']
if data['back_or_front'] == Services.BACKEND:
service_check_tags.append('backend:%s' % hostname)
status = Services.STATUS_TO_SERVICE_CHECK[status]
message = "%s reported %s:%s %s" % (haproxy_hostname, service_name, hostname, status)
self.service_check(
self.SERVICE_CHECK_NAME, status, message=message, hostname=check_hostname, tags=service_check_tags
)
def _handle_legacy_service_tag(self, tags, service):
if not self.instance.get('disable_legacy_service_tag', False):
self._log_deprecation('service_tag', 'haproxy_service')
tags.append('service:{}'.format(service))
| 39.857469 | 119 | 0.604577 |
e5c910d883d4bb3e6a644df8220432f5d1f01b8d | 473 | py | Python | logger.py | sjsafranek/mtg_price_tracker | 058c77cda241712d7a44c980ae64f325e5905230 | [
"MIT"
] | null | null | null | logger.py | sjsafranek/mtg_price_tracker | 058c77cda241712d7a44c980ae64f325e5905230 | [
"MIT"
] | null | null | null | logger.py | sjsafranek/mtg_price_tracker | 058c77cda241712d7a44c980ae64f325e5905230 | [
"MIT"
] | null | null | null |
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s [%(levelname)s] [%(threadName)s] %(filename)s %(funcName)s:%(lineno)d %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
streamhandler = logging.StreamHandler()
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
filehandler = logging.FileHandler('app.log')
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
| 26.277778 | 103 | 0.742072 |
0a515115ee0c557df7ea1481a6d4a3e603f552ec | 9,403 | py | Python | SALib/tests/test_optimal_trajectories.py | stijnvanhoey/SALib | e0c3bdf2fe5acbcb0524b083b158db7c635fe023 | [
"MIT"
] | null | null | null | SALib/tests/test_optimal_trajectories.py | stijnvanhoey/SALib | e0c3bdf2fe5acbcb0524b083b158db7c635fe023 | [
"MIT"
] | null | null | null | SALib/tests/test_optimal_trajectories.py | stijnvanhoey/SALib | e0c3bdf2fe5acbcb0524b083b158db7c635fe023 | [
"MIT"
] | 1 | 2020-05-21T13:15:18.000Z | 2020-05-21T13:15:18.000Z | from unittest import skipUnless
import numpy as np
from nose.tools import raises, with_setup
from numpy.testing import assert_equal
from SALib.sample.morris import sample_oat, \
find_optimum_combination, \
compute_optimised_trajectories, \
sample_groups
from SALib.sample.optimal_trajectories import return_max_combo
from SALib.util import read_param_file
from .test_util import setup_function
from . test_morris import setup_param_file_with_groups_prime
try:
import gurobipy
except ImportError:
_has_gurobi = False
else:
_has_gurobi = True
@with_setup(setup_param_file_with_groups_prime)
@skipUnless(_has_gurobi, "Gurobi is required for combinatorial optimisation")
def test_optimal_sample_with_groups():
'''
Tests that the combinatorial optimisation approach matches
that of the brute force approach
'''
param_file = "SALib/tests/test_param_file_w_groups_prime.txt"
problem = read_param_file(param_file)
N = 10
num_levels = 8
grid_jump = 4
k_choices = 4
num_params = problem['num_vars']
sample = sample_oat(problem,
N,
num_levels,
grid_jump)
actual = return_max_combo(sample,
N,
num_params,
k_choices)
desired = find_optimum_combination(sample,
N,
num_params,
k_choices)
assert_equal(actual, desired)
@skipUnless(_has_gurobi, "Gurobi is required for combinatorial optimisation")
@with_setup(setup_param_file_with_groups_prime)
def test_size_of_trajectories_with_groups():
'''
Tests that the number of trajectories produced is computed
correctly (i.e. that the size of the trajectories is a function
of the number of groups, rather than the number of variables
when groups are used.
There are seven variables and three groups.
With N=10:
1. the sample ignoring groups (i.e. the call to `sample_oat')
should be of size N*(D+1)-by-D.
2. the sample with groups should be of size N*(G+1)-by-D
When k=4:
3. the optimal sample ignoring groups should be of size k*(D+1)-by-D
4. the optimal sample with groups should be of size k*(G+1)-by-D
'''
param_file = "SALib/tests/test_param_file_w_groups_prime.txt"
group_problem = read_param_file(param_file)
no_group_problem = read_param_file(param_file)
no_group_problem['groups'] = None
N = 11
num_levels = 8
grid_jump = 4
k_choices = 4
num_params = group_problem['num_vars']
num_groups = 3
# Test 1. dimensions of sample ignoring groups
sample = sample_oat(no_group_problem,
N,
num_levels,
grid_jump)
size_x, size_y = sample.shape
assert_equal(size_x, N * (num_params + 1))
assert_equal(size_y, num_params)
# Test 2. dimensions of sample with groups
group_sample = sample_groups(group_problem,
N,
num_levels,
grid_jump)
size_x, size_y = group_sample.shape
assert_equal(size_x, N * (num_groups + 1))
assert_equal(size_y, num_params)
# Test 3. dimensions of optimal sample without groups
optimal_sample_without_groups = compute_optimised_trajectories(no_group_problem,
sample,
N,
k_choices)
size_x, size_y = optimal_sample_without_groups.shape
assert_equal(size_x, k_choices * (num_params + 1))
assert_equal(size_y, num_params)
# Test 4. dimensions of optimal sample with groups
optimal_sample_with_groups = compute_optimised_trajectories(group_problem,
group_sample,
N,
k_choices)
size_x, size_y = optimal_sample_with_groups.shape
assert_equal(size_x, k_choices * (num_groups + 1))
assert_equal(size_y, num_params)
@skipUnless(_has_gurobi, "Gurobi is required for combinatorial optimisation")
@with_setup(setup_function())
def test_optimal_combinations():
N = 6
param_file = "SALib/tests/test_params.txt"
problem = read_param_file(param_file)
num_params = problem['num_vars']
num_levels = 10
grid_jump = num_levels / 2
k_choices = 4
morris_sample = sample_oat(problem, N, num_levels, grid_jump)
actual = return_max_combo(morris_sample,
N,
num_params,
k_choices)
desired = find_optimum_combination(morris_sample,
N,
num_params,
k_choices)
assert_equal(actual, desired)
@skipUnless(_has_gurobi, "Gurobi is required for combinatorial optimisation")
@with_setup(setup_function())
def test_optimised_trajectories_without_groups():
"""
Tests that the optimisation problem gives
the same answer as the brute force problem
(for small values of `k_choices` and `N`),
particularly when there are two or more identical
trajectories
"""
N = 6
param_file = "SALib/tests/test_params.txt"
problem = read_param_file(param_file)
num_levels = 4
k_choices = 4
num_params = problem['num_vars']
groups = problem['groups']
# 6 trajectories, with 5th and 6th identical
input_sample = np.array([[ 0.33333333, 0.66666667],
[ 1. ,0.66666667],
[ 1. ,0. ],
[ 0. ,0.33333333],
[ 0. ,1. ],
[ 0.66666667 ,1. ],
[ 0.66666667 ,0.33333333],
[ 0.66666667 ,1. ],
[ 0. ,1. ],
[ 0.66666667 ,1. ],
[ 0.66666667 ,0.33333333],
[ 0. ,0.33333333],
[ 1. ,1. ],
[ 1. ,0.33333333],
[ 0.33333333 ,0.33333333],
[ 1. ,1. ],
[ 1. ,0.33333333],
[ 0.33333333 ,0.33333333]], dtype=np.float32)
print(input_sample)
# From gurobi optimal trajectories
actual = return_max_combo(input_sample,
N,
num_params,
k_choices,
groups)
desired = find_optimum_combination(input_sample,
N,
num_params,
k_choices,
groups)
assert_equal(actual, desired)
@skipUnless(_has_gurobi, "Gurobi is required for combinatorial optimisation")
@with_setup(setup_param_file_with_groups_prime)
def test_optimised_trajectories_with_groups():
"""
Tests that the optimisation problem gives
the same answer as the brute force problem
(for small values of `k_choices` and `N`)
with groups
"""
N = 11
param_file = "SALib/tests/test_param_file_w_groups_prime.txt"
problem = read_param_file(param_file)
num_levels = 4
grid_jump = num_levels / 2
k_choices = 4
num_params = problem['num_vars']
groups = problem['groups']
input_sample = sample_groups(problem, N, num_levels, grid_jump)
# From gurobi optimal trajectories
actual = return_max_combo(input_sample,
N,
num_params,
k_choices,
groups)
desired = find_optimum_combination(input_sample,
N,
num_params,
k_choices,
groups)
assert_equal(actual, desired)
@skipUnless(_has_gurobi, "Gurobi is required for combinatorial optimisation")
@with_setup(setup_function())
@raises(ValueError)
def test_raise_error_if_k_gt_N():
"""
Check that an error is raised if `k_choices` is greater than (or equal to) `N`
"""
N = 4
param_file = "SALib/tests/test_params.txt"
problem = read_param_file(param_file)
num_levels = 4
grid_jump = num_levels / 2
k_choices = 6
morris_sample = sample_oat(problem, N, num_levels, grid_jump)
compute_optimised_trajectories(problem,
morris_sample,
N,
k_choices)
| 32.992982 | 84 | 0.535255 |
fb7dde954da591f58849c9250387184af70578bd | 2,553 | py | Python | extensions/rules/graph.py | VictoriaRoux/oppia | 5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6 | [
"Apache-2.0"
] | 3 | 2015-03-17T01:34:14.000Z | 2015-04-11T10:35:53.000Z | extensions/rules/graph.py | VictoriaRoux/oppia | 5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6 | [
"Apache-2.0"
] | null | null | null | extensions/rules/graph.py | VictoriaRoux/oppia | 5ae2a7f0b5c85d6e28222844d22ebdbfb81923c6 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for Graph objects."""
__author__ = 'Zhan Xiong Chin'
from extensions.rules import base
import itertools
# TODO(czx): Speed up the isomorphism checker?
class IsIsomorphicTo(base.GraphRule):
description = 'is isomorphic to {{g|Graph}}, including matching labels'
is_generic = False
def _evaluate(self, subject):
if len(subject['vertices']) != len(self.g['vertices']):
return False
# Construct adjacency matrices
def construct_adjacency_matrix(graph):
ret = [[None for v in graph['vertices']] for v in graph['vertices']]
for edge in graph['edges']:
weight = edge['weight'] if graph['isWeighted'] else 1
ret[edge['src']][edge['dst']] = weight
if not graph['isDirected']:
ret[edge['dst']][edge['src']] = weight
return ret
adj = construct_adjacency_matrix(subject)
adj2 = construct_adjacency_matrix(self.g)
# Check against every permutation of vertices.
# The new index of vertex i in self.g is perm[i].
num_vertices = len(self.g['vertices'])
for perm in itertools.permutations(range(num_vertices)):
# Test matching labels
if subject['isLabeled'] and any([
self.g['vertices'][i]['label'] !=
subject['vertices'][perm[i]]['label']
for i in xrange(num_vertices)]):
continue
# Test isomorphism
found_isomorphism = True
for i in xrange(num_vertices):
for j in xrange(num_vertices):
if adj[perm[i]][perm[j]] != adj2[i][j]:
found_isomorphism = False
break
if not found_isomorphism:
break
if found_isomorphism:
return True
return False
| 37.544118 | 80 | 0.601253 |
7e34dbc4e6a0da3481d978b314a0b21135aeafcd | 813 | py | Python | python/event_convert.py | jassey/event_detail | 7584eb3371a2e43b2b2a6f656c27885d8802af98 | [
"Apache-2.0"
] | 1 | 2015-09-28T06:27:27.000Z | 2015-09-28T06:27:27.000Z | python/event_convert.py | jassey/event_detail | 7584eb3371a2e43b2b2a6f656c27885d8802af98 | [
"Apache-2.0"
] | null | null | null | python/event_convert.py | jassey/event_detail | 7584eb3371a2e43b2b2a6f656c27885d8802af98 | [
"Apache-2.0"
] | null | null | null | #
# 行数据拆分
#
# 输入:
# ID,日期,名称,创建者,参与者列表
# 1,2015-09-01,"设计师面试",HR,"小李,小王,小严"
#
# 输出:
# ID,日期,名称,创建者,参与者
# 1,2015-09-01,设计师面试,HR,小李
# 1,2015-09-01,设计师面试,HR,小王
# 1,2015-09-01,设计师面试,HR,小严
import csv
source = 'event.csv'
dest = 'event_detail.csv'
def get_detail():
content = []
with open(source, 'r', encoding='utf-8') as f:
reader = csv.DictReader(f)
for row in reader:
member_list = row['参与者列表'].split(',')
del row['参与者列表']
for member in member_list:
item = row.copy()
item["参与者"] = member
content.append(item)
return content
with open(dest, 'w', encoding='utf-8') as f:
writer = csv.DictWriter(f, ['ID', '日期', '名称', '创建者', '参与者'])
writer.writeheader()
writer.writerows(get_detail())
| 21.972973 | 64 | 0.553506 |
70f6555e23c64229f6e3852bb33fcdc67bf8998f | 6,939 | py | Python | scvi/models/distributions.py | gokceneraslan/scVI | afab47cf3b4b76acf41555e99665a3bd86f8320a | [
"MIT"
] | 1 | 2022-03-17T17:19:10.000Z | 2022-03-17T17:19:10.000Z | scvi/models/distributions.py | gokceneraslan/scVI | afab47cf3b4b76acf41555e99665a3bd86f8320a | [
"MIT"
] | null | null | null | scvi/models/distributions.py | gokceneraslan/scVI | afab47cf3b4b76acf41555e99665a3bd86f8320a | [
"MIT"
] | null | null | null | from typing import Union, Tuple
import warnings
import torch
from torch.distributions import (
constraints,
Distribution,
Gamma,
Poisson,
)
from torch.distributions.utils import (
broadcast_all,
probs_to_logits,
lazy_property,
logits_to_probs,
)
from scvi.models.log_likelihood import log_nb_positive, log_zinb_positive
def _convert_mean_disp_to_counts_logits(mu, theta, eps=1e-6):
r"""NB parameterizations conversion
:param mu: mean of the NB distribution.
:param theta: inverse overdispersion.
:param eps: constant used for numerical log stability.
:return: the number of failures until the experiment is stopped
and the success probability.
"""
assert (mu is None) == (
theta is None
), "If using the mu/theta NB parameterization, both parameters must be specified"
logits = (mu + eps).log() - (theta + eps).log()
total_count = theta
return total_count, logits
def _convert_counts_logits_to_mean_disp(total_count, logits):
"""NB parameterizations conversion
:param total_count: Number of failures until the experiment is stopped.
:param logits: success logits.
:return: the mean and inverse overdispersion of the NB distribution.
"""
theta = total_count
mu = logits.exp() * theta
return mu, theta
class NegativeBinomial(Distribution):
r"""Negative Binomial(NB) distribution using two parameterizations:
- (`total_count`, `probs`) where `total_count` is the number of failures
until the experiment is stopped
and `probs` the success probability.
- The (`mu`, `theta`) parameterization is the one used by scVI. These parameters respectively
control the mean and overdispersion of the distribution.
`_convert_mean_disp_to_counts_logits` and `_convert_counts_logits_to_mean_disp` provide ways to convert
one parameterization to another.
"""
arg_constraints = {
"mu": constraints.greater_than_eq(0),
"theta": constraints.greater_than_eq(0),
}
support = constraints.nonnegative_integer
def __init__(
self,
total_count: torch.Tensor = None,
probs: torch.Tensor = None,
logits: torch.Tensor = None,
mu: torch.Tensor = None,
theta: torch.Tensor = None,
validate_args=True,
):
self._eps = 1e-8
if (mu is None) == (total_count is None):
raise ValueError(
"Please use one of the two possible parameterizations. Refer to the documentation for more information."
)
using_param_1 = total_count is not None and (
logits is not None or probs is not None
)
if using_param_1:
logits = logits if logits is not None else probs_to_logits(probs)
total_count = total_count.type_as(logits)
total_count, logits = broadcast_all(total_count, logits)
mu, theta = _convert_counts_logits_to_mean_disp(total_count, logits)
else:
mu, theta = broadcast_all(mu, theta)
self.mu = mu
self.theta = theta
super().__init__(validate_args=validate_args)
def sample(self, sample_shape=torch.Size()):
gamma_d = self._gamma()
p_means = gamma_d.sample(sample_shape)
# Clamping as distributions objects can have buggy behaviors when
# their parameters are too high
l_train = torch.clamp(p_means, max=1e8)
counts = Poisson(
l_train
).sample() # Shape : (n_samples, n_cells_batch, n_genes)
return counts
def log_prob(self, value):
if self._validate_args:
try:
self._validate_sample(value)
except ValueError:
warnings.warn(
"The value argument must be within the support of the distribution",
UserWarning,
)
return log_nb_positive(value, mu=self.mu, theta=self.theta, eps=self._eps)
def _gamma(self):
concentration = self.theta
rate = self.theta / self.mu
# Important remark: Gamma is parametrized by the rate = 1/scale!
gamma_d = Gamma(concentration=concentration, rate=rate)
return gamma_d
class ZeroInflatedNegativeBinomial(NegativeBinomial):
r"""Zero Inflated Negative Binomial distribution.
zi_logits correspond to the zero-inflation logits
mu + mu ** 2 / theta
The negative binomial component parameters can follow two two parameterizations:
- The first one corresponds to the parameterization NB(`total_count`, `probs`)
where `total_count` is the number of failures until the experiment is stopped
and `probs` the success probability.
- The (`mu`, `theta`) parameterization is the one used by scVI. These parameters respectively
control the mean and overdispersion of the distribution.
`_convert_mean_disp_to_counts_logits` and `_convert_counts_logits_to_mean_disp`
provide ways to convert one parameterization to another.
"""
arg_constraints = {
"mu": constraints.greater_than_eq(0),
"theta": constraints.greater_than_eq(0),
"zi_probs": constraints.half_open_interval(0.0, 1.0),
"zi_logits": constraints.real,
}
support = constraints.nonnegative_integer
def __init__(
self,
total_count: torch.Tensor = None,
probs: torch.Tensor = None,
logits: torch.Tensor = None,
mu: torch.Tensor = None,
theta: torch.Tensor = None,
zi_logits: torch.Tensor = None,
validate_args=True,
):
super().__init__(
total_count=total_count,
probs=probs,
logits=logits,
mu=mu,
theta=theta,
validate_args=validate_args,
)
self.zi_logits, self.mu, self.theta = broadcast_all(
zi_logits, self.mu, self.theta
)
@lazy_property
def zi_logits(self) -> torch.Tensor:
return probs_to_logits(self.zi_probs, is_binary=True)
@lazy_property
def zi_probs(self) -> torch.Tensor:
return logits_to_probs(self.zi_logits, is_binary=True)
def sample(
self, sample_shape: Union[torch.Size, Tuple] = torch.Size()
) -> torch.Tensor:
with torch.no_grad():
samp = super().sample(sample_shape=sample_shape)
is_zero = torch.rand_like(samp) <= self.zi_probs
samp[is_zero] = 0.0
return samp
def log_prob(self, value: torch.Tensor) -> torch.Tensor:
try:
self._validate_sample(value)
except ValueError:
warnings.warn(
"The value argument must be within the support of the distribution",
UserWarning,
)
return log_zinb_positive(value, self.mu, self.theta, self.zi_logits, eps=1e-08)
| 34.695 | 120 | 0.645482 |
9b63dd3ed50830f4606adb3979421a8e21c30bbf | 2,978 | py | Python | tests/unit/facts/test_iri_mapping.py | pgajdos/py-junos-eznc | a13e49a3bc1ceaf681940f593d190f5d02d47369 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/unit/facts/test_iri_mapping.py | pgajdos/py-junos-eznc | a13e49a3bc1ceaf681940f593d190f5d02d47369 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/unit/facts/test_iri_mapping.py | pgajdos/py-junos-eznc | a13e49a3bc1ceaf681940f593d190f5d02d47369 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | __author__ = "Stacy Smith"
__credits__ = "Jeremy Schulman, Nitin Kumar"
import unittest
import pytest
from mock import patch, MagicMock
import os
from jnpr.junos import Device
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@pytest.mark.unit
class TestIriMapping(unittest.TestCase):
@patch("ncclient.manager.connect")
def setUp(self, mock_connect):
mock_connect.side_effect = self._mock_manager_setup
self.dev = Device(
host="1.1.1.1", user="rick", password="password123", gather_facts=False
)
self.dev.open()
@patch("jnpr.junos.Device.execute")
def test_iri_host_to_ip_mapping_fact(self, mock_execute):
mock_execute.side_effect = self._mock_manager_current_re
self.assertEqual(self.dev.facts["_iri_ip"]["re0"], ["128.0.0.4", "10.0.0.4"])
@patch("jnpr.junos.Device.execute")
def test_iri_ip_to_host_mapping_fact(self, mock_execute):
mock_execute.side_effect = self._mock_manager_current_re
self.assertEqual(
self.dev.facts["_iri_hostname"]["128.0.0.1"],
["master", "node", "fwdd", "member", "pfem"],
)
@patch("jnpr.junos.Device.execute")
def test_iri_template_ip_to_host_mapping_fact(self, mock_execute):
mock_execute.side_effect = self._mock_manager_current_re
self.assertEqual(
self.dev.facts["_iri_hostname"]["190.0.1.1"], ["gnf1-master", "psd1-master"]
)
@patch("jnpr.junos.Device.execute")
def test_iri_template_host_to_ip_mapping_fact(self, mock_execute):
mock_execute.side_effect = self._mock_manager_current_re
self.assertEqual(
self.dev.facts["_iri_ip"]["gnf1-master"], ["190.0.1.1", "190.1.1.1"]
)
@patch("jnpr.junos.Device.execute")
def test_iri_template_host_to_ip_mapping_fact(self, mock_execute):
mock_execute.side_effect = self._mock_manager_current_re2
self.assertEqual(self.dev.facts["_iri_ip"]["gnf1-master"], ["190.0.1.1"])
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__), "rpc-reply", fname)
foo = open(fpath).read()
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc[0]
return rpc_reply
def _mock_manager_setup(self, *args, **kwargs):
if kwargs:
device_params = kwargs["device_params"]
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
def _mock_manager_current_re(self, *args, **kwargs):
if args:
return self._read_file("iri_mapping_" + args[0].tag + ".xml")
def _mock_manager_current_re2(self, *args, **kwargs):
if args:
return self._read_file("iri_mapping2_" + args[0].tag + ".xml")
| 36.317073 | 88 | 0.670248 |
978749bad74e58610e9aeb47a9ad48fd20a5c2bb | 532 | py | Python | gevent_tasks/errors.py | blakev/gevent-tasks | 3cf5204e8587a0d7ea9ec7c86006173330b7d744 | [
"MIT"
] | 17 | 2017-10-18T00:01:42.000Z | 2021-08-10T10:17:59.000Z | gevent_tasks/errors.py | blakev/gevent-tasks | 3cf5204e8587a0d7ea9ec7c86006173330b7d744 | [
"MIT"
] | null | null | null | gevent_tasks/errors.py | blakev/gevent-tasks | 3cf5204e8587a0d7ea9ec7c86006173330b7d744 | [
"MIT"
] | 2 | 2017-10-18T10:32:59.000Z | 2021-01-25T20:15:08.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# >>
# gevent-tasks, 2019
# <<
class GeventTasksError(Exception):
"""Base exception class."""
class TaskKeyError(GeventTasksError, KeyError):
"""Thrown inside task execution when namespacing has an issue."""
class TaskRuntimeError(GeventTasksError, RuntimeError):
"""Thrown inside task execution when something goes wrong."""
class ForeverRuntimeError(GeventTasksError, RuntimeError):
"""Thrown from :func:`gevent_tasks.manager.TaskManager.forever`."""
| 19.703704 | 71 | 0.710526 |
c53f602c8932e90a31712dc578e7bd389237f336 | 3,568 | py | Python | ERP/estoque/lista_view.py | CSAAtibaia/Listas | 36ad6ffb01d17184d10727e1945bb738b5c199e8 | [
"MIT"
] | null | null | null | ERP/estoque/lista_view.py | CSAAtibaia/Listas | 36ad6ffb01d17184d10727e1945bb738b5c199e8 | [
"MIT"
] | 7 | 2019-12-05T02:27:57.000Z | 2021-09-22T17:57:30.000Z | ERP/estoque/lista_view.py | CSAAtibaia/Listas | 36ad6ffb01d17184d10727e1945bb738b5c199e8 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from ERP.estoque.models import Estoque, EstoqueItens
from ERP.core.models import Item
from django.db.models import Sum, F, CharField, IntegerField, Case, When, Value #, Q
from django.db.models.functions import Coalesce, Cast, Concat
# Create your views here.
def lista_itens(request):
template_name = 'lista_ativa.html'
ativa_tb = Estoque.objects.filter(movimento='s', aberto=True)
pedidos_item_tb = EstoqueItens.objects.filter(estoque__in = ativa_tb, quantidade__gt = 0)
itens = Item.objects.filter(
estoqueitens__gt=0, estoqueitens__estoque__aberto=True
).values(
'produto', 'saldo',
nome_forn=F('fornecedor__nome')
).order_by(
'fornecedor__nome', 'produto'
).distinct().annotate(
qtde=Sum(
Case(
When(estoqueitens__estoque__movimento='s', then=F('estoqueitens__quantidade')),
default=0,
output_field=IntegerField(),
)))
coagris_tb_1 = pedidos_item_tb.values(
higieniza=F('estoque__usuario__coagri__higieniza'),
coagri=Coalesce(
Cast('estoque__usuario__coagri__apelido', CharField()),
Cast(
Concat('estoque__usuario__first_name', Value(' '),
'estoque__usuario__last_name'), CharField()),
Cast('estoque__usuario__email', CharField()),
Cast('estoque__usuario__username', CharField())
),
nomeitem=F('produto__produto'),
total=F('quantidade'),
entrega=F('estoque__usuario__coagri__partilha__partilha'),
entrega_ico=F('estoque__usuario__coagri__partilha__icone')
)
coagris_tb = coagris_tb_1.order_by('higieniza', 'entrega', 'coagri', 'nomeitem')
locais_tb = coagris_tb.values(
'entrega', 'entrega_ico', 'higieniza', 'nomeitem'
).order_by(
'entrega', 'entrega_ico', 'higieniza', 'nomeitem'
).annotate(soma=Sum('quantidade'))
context = {'ativa_tb': ativa_tb,
'locais_tb': locais_tb,
'coagris_tb': coagris_tb,
'itens': itens,}
return render(request, template_name, context)
def lista_print(request):
template_name = 'itens_coagris_print.html'
ativa_tb = Estoque.objects.filter(movimento='s', aberto=True)
pedidos_item_tb = EstoqueItens.objects.filter(estoque__in = ativa_tb, quantidade__gt = 0)
coagris_tb_1 = pedidos_item_tb.values(
higieniza=F('estoque__usuario__coagri__higieniza'),
coagri=Coalesce(
Cast('estoque__usuario__coagri__apelido', CharField()),
Cast(
Concat('estoque__usuario__first_name', Value(' '),
'estoque__usuario__last_name'), CharField()),
Cast('estoque__usuario__email', CharField()),
Cast('estoque__usuario__username', CharField())
),
nomeitem=F('produto__produto'),
total=F('quantidade'),
entrega=F('estoque__usuario__coagri__partilha__partilha'),
entrega_ico=F('estoque__usuario__coagri__partilha__icone')
)
coagris_tb = coagris_tb_1.order_by('higieniza', 'entrega', 'coagri', 'nomeitem')
context = {'ativa_tb': ativa_tb,
'coagris_tb': coagris_tb,
}
return render(request, template_name, context)
| 39.644444 | 99 | 0.609305 |
e3bca6561a49502f2e787ea387700820a8506120 | 736 | py | Python | LeetCode/0081. Search in Rotated Sorted Array II/solution.py | InnoFang/oh-my-algorithms | f559dba371ce725a926725ad28d5e1c2facd0ab2 | [
"Apache-2.0"
] | 19 | 2018-08-26T03:10:58.000Z | 2022-03-07T18:12:52.000Z | LeetCode/0081. Search in Rotated Sorted Array II/solution.py | InnoFang/Algorithm-Library | 1896b9d8b1fa4cd73879aaecf97bc32d13ae0169 | [
"Apache-2.0"
] | null | null | null | LeetCode/0081. Search in Rotated Sorted Array II/solution.py | InnoFang/Algorithm-Library | 1896b9d8b1fa4cd73879aaecf97bc32d13ae0169 | [
"Apache-2.0"
] | 6 | 2020-03-16T23:00:06.000Z | 2022-01-13T07:02:08.000Z | """
279 / 279 test cases passed.
Runtime: 36 ms
Memory Usage: 15 MB
"""
class Solution:
def search(self, nums: List[int], target: int) -> bool:
l, r = 0, len(nums) - 1
while l <= r:
mid = l + ((r - l) >> 1)
if nums[mid] == target:
return True
if nums[mid] == nums[l]:
l += 1
continue
if nums[l] <= nums[mid]:
if nums[l] <= target < nums[mid]:
r = mid - 1
else:
l = mid + 1
else:
if nums[mid] < target <= nums[r]:
l = mid + 1
else:
r = mid - 1
return False
| 27.259259 | 59 | 0.36413 |
9b64afd6e50899f2e049661816b9d0d204a6475a | 17,760 | py | Python | pyvo/nameresolver/tests/test_SesameNoNet.py | kernsuite-debian/pyvo | ee85c50c5c520ac7bede2d6f18de225c57dedc33 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | pyvo/nameresolver/tests/test_SesameNoNet.py | kernsuite-debian/pyvo | ee85c50c5c520ac7bede2d6f18de225c57dedc33 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | pyvo/nameresolver/tests/test_SesameNoNet.py | kernsuite-debian/pyvo | ee85c50c5c520ac7bede2d6f18de225c57dedc33 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for pyvo.nameresolver.sesame
"""
from __future__ import print_function, division
import os, sys, shutil, re, imp
import unittest
from .. import sesame
import xml.etree.ElementTree as ET
from astropy.utils.data import get_pkg_data_filename
resultfile = "data/sesame.xml"
xmldecl = "<?xml version=\"1.0\"?>"
class DocQuantityTest(unittest.TestCase):
xmldecl = "<?xml version=\"1.0\"?>"
def makeQuantXML(self, tag, include="eqr"):
out = "{0}<{1}><v>447.89000</v>".format(self.xmldecl, tag)
if 'e' in include:
out += "<e>2.99793</e>"
if 'q' in include:
out += "<q>A</q>"
if 'r' in include:
out += "<r>1991RC3.9.C...0000d</r>"
out += "</{0}>".format(tag)
return out
def makeQuantEl(self, tag="Vel", include="eqr"):
xml = self.makeQuantXML(tag, include)
return ET.fromstring(xml)
def testUnit(self):
q = sesame.DocQuantity(self.makeQuantEl("Vel", include=""))
self.assertEquals("km/s", q.unit)
q = sesame.DocQuantity(self.makeQuantEl("pm", include=""))
self.assertEquals("mas/yr", q.unit)
q = sesame.DocQuantity(self.makeQuantEl("plx", include=""))
self.assertEquals("mas", q.unit)
q = sesame.DocQuantity(self.makeQuantEl("z", include=""))
self.assertEquals("", q.unit)
q = sesame.DocQuantity(self.makeQuantEl("mag", include=""))
self.assertEquals("", q.unit)
def testToString(self):
q = sesame.DocQuantity(self.makeQuantEl())
self.assertEquals("447.89 km/s", q.to_string(False))
self.assertEquals("447.89 +/- 2.99793 km/s", q.to_string(True))
self.assertEquals("447.89 +/- 2.99793 km/s", str(q))
# pdb.set_trace()
self.assertTrue(re.match(r'quant\((\S+,\s){4}\S+\)', repr(q)) is not None)
q = sesame.DocQuantity(self.makeQuantEl(include=""))
self.assertEquals("447.89 km/s", q.to_string(False))
self.assertEquals("447.89 km/s", q.to_string(True))
self.assertEquals("447.89 km/s", str(q))
self.assertTrue(re.match(r'quant\((\S+,\s){4}\S+\)', repr(q)) is not None)
def testCtor(self):
q = sesame.DocQuantity(self.makeQuantEl())
self.assertAlmostEquals(447.89, q.val)
self.assertAlmostEquals(2.997930, q.error)
self.assertEquals("km/s", q.unit)
self.assertEquals("A", q.qual)
self.assertEquals("1991RC3.9.C...0000d", q.ref)
q = sesame.DocQuantity(self.makeQuantEl(include=""))
self.assertAlmostEquals(447.89, q.val)
self.assertTrue(q.error is None)
self.assertEquals("km/s", q.unit)
self.assertTrue(q.qual is None)
self.assertTrue(q.ref is None)
q = sesame.DocQuantity(self.makeQuantEl(include="e"))
self.assertAlmostEquals(447.89, q.val)
self.assertAlmostEquals(2.997930, q.error)
self.assertEquals("km/s", q.unit)
self.assertTrue(q.qual is None)
self.assertTrue(q.ref is None)
q = sesame.DocQuantity(self.makeQuantEl(include="eq"))
self.assertAlmostEquals(447.89, q.val)
self.assertAlmostEquals(2.997930, q.error)
self.assertEquals("km/s", q.unit)
self.assertEquals("A", q.qual)
self.assertTrue(q.ref is None)
q = sesame.DocQuantity(self.makeQuantEl(include="er"))
self.assertAlmostEquals(447.89, q.val)
self.assertAlmostEquals(2.997930, q.error)
self.assertEquals("km/s", q.unit)
self.assertTrue(q.qual is None)
self.assertEquals("1991RC3.9.C...0000d", q.ref)
class ProperMotionTest(unittest.TestCase):
def makeQuantXML(self, include="eqr"):
tag = "pm"
out = "{0}<{1}><v>3.44</v>".format(xmldecl, tag)
if 'e' in include:
out += "<e>0.28</e>"
if 'q' in include:
out += "<q>A</q>"
if 'r' in include:
out += "<r>1991RC3.9.C...0000d</r>"
out += "<pa>54.18</pa><pmRA>3.1</pmRA>"
if 'e' in include:
out += "<epmRA>0.2</epmRA>"
out += "<pmDE>1.5</pmDE>"
if 'e' in include:
out += "<epmDE>0.2</epmDE>"
out += "</{0}>".format(tag)
return out
def makeQuantEl(self, include="eqr"):
xml = self.makeQuantXML(include)
return ET.fromstring(xml)
def testToString(self):
q = sesame.ProperMotion(self.makeQuantEl())
self.assertEquals("3.44 mas/yr", q.to_string(False))
self.assertEquals("3.44 +/- 0.28 mas/yr", q.to_string(True))
self.assertEquals("3.44 +/- 0.28 mas/yr", str(q))
# pdb.set_trace()
self.assertTrue(re.match(r'pm\((\S+,\s){9}\S+\)', repr(q)) is not None)
q = sesame.ProperMotion(self.makeQuantEl(include=""))
self.assertEquals("3.44 mas/yr", q.to_string(False))
self.assertEquals("3.44 mas/yr", q.to_string(True))
self.assertEquals("3.44 mas/yr", str(q))
self.assertTrue(re.match(r'pm\((\S+,\s){9}\S+\)', repr(q)) is not None)
def testCtor(self):
q = sesame.ProperMotion(self.makeQuantEl())
self.assertAlmostEquals(3.44, q.val)
self.assertAlmostEquals(0.28, q.error)
self.assertAlmostEquals(54.18, q.pa)
self.assertAlmostEquals(3.1, q.val_ra)
self.assertAlmostEquals(1.5, q.val_dec)
self.assertAlmostEquals(0.2, q.error_ra)
self.assertAlmostEquals(0.2, q.error_dec)
self.assertEquals("mas/yr", q.unit)
self.assertEquals("A", q.qual)
self.assertEquals("1991RC3.9.C...0000d", q.ref)
q = sesame.ProperMotion(self.makeQuantEl(include=""))
self.assertAlmostEquals(3.44, q.val)
self.assertAlmostEquals(54.18, q.pa)
self.assertAlmostEquals(3.1, q.val_ra)
self.assertAlmostEquals(1.5, q.val_dec)
self.assertTrue(q.error is None)
self.assertTrue(q.error_ra is None)
self.assertTrue(q.error_dec is None)
self.assertEquals("mas/yr", q.unit)
self.assertTrue(q.qual is None)
self.assertTrue(q.ref is None)
q = sesame.ProperMotion(self.makeQuantEl(include="e"))
self.assertAlmostEquals(3.44, q.val)
self.assertAlmostEquals(0.28, q.error)
self.assertAlmostEquals(54.18, q.pa)
self.assertAlmostEquals(3.1, q.val_ra)
self.assertAlmostEquals(1.5, q.val_dec)
self.assertAlmostEquals(0.2, q.error_ra)
self.assertAlmostEquals(0.2, q.error_dec)
self.assertEquals("mas/yr", q.unit)
self.assertTrue(q.qual is None)
self.assertTrue(q.ref is None)
q = sesame.ProperMotion(self.makeQuantEl(include="eq"))
self.assertAlmostEquals(3.44, q.val)
self.assertAlmostEquals(0.28, q.error)
self.assertAlmostEquals(54.18, q.pa)
self.assertAlmostEquals(3.1, q.val_ra)
self.assertAlmostEquals(1.5, q.val_dec)
self.assertAlmostEquals(0.2, q.error_ra)
self.assertAlmostEquals(0.2, q.error_dec)
self.assertEquals("mas/yr", q.unit)
self.assertEquals("A", q.qual)
self.assertTrue(q.ref is None)
q = sesame.ProperMotion(self.makeQuantEl(include="r"))
self.assertAlmostEquals(3.44, q.val)
self.assertAlmostEquals(54.18, q.pa)
self.assertAlmostEquals(3.1, q.val_ra)
self.assertAlmostEquals(1.5, q.val_dec)
self.assertEquals("mas/yr", q.unit)
self.assertTrue(q.qual is None)
self.assertTrue(q.error is None)
self.assertTrue(q.error_ra is None)
self.assertTrue(q.error_dec is None)
self.assertEquals("1991RC3.9.C...0000d", q.ref)
class ObjectDataTest(unittest.TestCase):
def setUp(self):
result = get_pkg_data_filename(resultfile)
self.sesel = ET.parse(result).getroot()
def selectResolver(self, target, which):
# pdb.set_trace()
el = self.sesel.findall("Target")
el = el[int(target-1)]
el = el.findall("Resolver")
return el[int(which-1)]
def testNotFound(self):
# pdb.set_trace()
el = self.selectResolver(2, 1)
res = sesame.ObjectData(el)
self.assertFalse(res.success)
el = self.selectResolver(2, 2)
res = sesame.ObjectData(el)
self.assertFalse(res.success)
el = self.selectResolver(1, 1)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
def testVizier(self):
el = self.selectResolver(1, 1)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
self.assertTrue(res.fromcache)
self.assertEquals("V=VizieR (local)", res.resolver_name)
self.assertEquals("12:18.9 +47:19", res.sexapos)
self.assertEquals(2, len(res.pos))
self.assertAlmostEquals(184.73, res.pos[0])
self.assertAlmostEquals(47.31, res.pos[1])
self.assertEquals("{NGC} 4258", res.oname)
def testSimbad(self):
el = self.selectResolver(1, 2)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
self.assertFalse(res.fromcache)
self.assertEquals("S=Simbad (CDS, via client/server)",
res.resolver_name)
self.assertEquals("12:18:57.61 +47:18:13.3", res.sexapos)
self.assertEquals(2, len(res.pos))
self.assertAlmostEquals(184.74008333, res.pos[0])
self.assertAlmostEquals(47.30371944, res.pos[1])
self.assertEquals("M 106", res.oname)
def testGet(self):
el = self.selectResolver(1, 2)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
self.assertEquals("S=Simbad (CDS, via client/server)",
res.resolver_name)
self.assertEquals("12:18:57.61 +47:18:13.3", res.get('jpos'))
self.assertEquals("184.74008333", res.get('jradeg'))
self.assertEquals("47.30371944", res.get('jdedeg'))
self.assertEquals("M 106", res.get('oname'))
self.assertEquals("LIN", res.get('otype'))
self.assertEquals("@609478", res.get('oid'))
self.assertEquals("2006AJ....131.1163S", res.get('refPos'))
self.assertEquals("3", res.get('MType'))
z = res.get('z')
self.assertTrue(z is not None)
self.assertEquals("D", z.qual)
self.assertEquals("2002LEDA.........0P", z.ref)
aliases = res.get('alias')
self.assertEquals(35, len(aliases))
self.assertTrue("Z 1216.5+4735" in aliases)
self.assertTrue("UGC 7353" in aliases)
self.assertTrue(res.get('Vel') is None)
def testGetitem(self):
el = self.selectResolver(1, 2)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
self.assertEquals("S=Simbad (CDS, via client/server)",
res.resolver_name)
self.assertEquals("12:18:57.61 +47:18:13.3", res['jpos'])
self.assertEquals("184.74008333", res['jradeg'])
self.assertEquals("47.30371944", res['jdedeg'])
self.assertEquals("M 106", res['oname'])
self.assertEquals("LIN", res['otype'])
self.assertEquals("@609478", res['oid'])
self.assertEquals("2006AJ....131.1163S", res['refPos'])
self.assertEquals("3", res['MType'])
z = res['z']
self.assertTrue(z is not None)
self.assertTrue(isinstance(z, sesame.DocQuantity))
self.assertEquals("D", z.qual)
self.assertEquals("2002LEDA.........0P", z.ref)
aliases = res['alias']
self.assertEquals(35, len(aliases))
self.assertTrue("Z 1216.5+4735" in aliases)
self.assertTrue("UGC 7353" in aliases)
def testKeys(self):
el = self.selectResolver(1, 1)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
self.assertTrue(res.resolver_name.startswith("V=VizieR"),
"Not matched: " + res.resolver_name)
keys = res.keys()
self.assertTrue("jpos" in keys)
self.assertTrue("oname" in keys)
self.assertTrue("jradeg" in keys)
self.assertTrue("jdedeg" in keys)
self.assertEquals(5, len(keys))
el = self.selectResolver(1, 3)
res = sesame.ObjectData(el)
self.assertTrue(res.success)
self.assertTrue(res.resolver_name.startswith("N=NED"),
"Not matched: " + res.resolver_name)
keys = res.keys()
self.assertTrue("jpos" in keys)
self.assertTrue("oname" in keys)
self.assertTrue("jradeg" in keys)
self.assertTrue("jdedeg" in keys)
self.assertTrue("MType" in keys)
self.assertEquals(12, len(keys))
class TargetTest(unittest.TestCase):
def setUp(self):
result = get_pkg_data_filename(resultfile)
self.sesel = ET.parse(result).getroot()
def selectTarget(self, which):
# pdb.set_trace()
el = self.sesel.findall("Target")
return el[int(which-1)]
def testProp(self):
target = sesame.Target(self.selectTarget(1))
self.assertEquals("ngc 4258", target.name)
self.assertEquals("VSNA", target.dbcodes)
self.assertEquals(3, len(target.responses))
for res in target.responses:
self.assertTrue(isinstance(res, sesame.ObjectData))
def testResponses(self):
for t in xrange(1,4):
target = sesame.Target(self.selectTarget(t))
for res in target.responses:
if t == 2:
self.assertFalse(res.success)
else:
self.assertTrue(res.success)
def testAccordingTo(self):
target = sesame.Target(self.selectTarget(1))
res = target.according_to("sim")
self.assertTrue(res is not None)
self.assertEquals("M 106", res.oname)
res = target.according_to("viz")
self.assertTrue(res is not None)
self.assertEquals("{NGC} 4258", res.oname)
res = target.according_to("NED")
self.assertTrue(res is not None)
res = target.according_to("goob")
self.assertTrue(res is None)
self.assertRaises(LookupError, target.according_to, "")
class SesameQueryTest(unittest.TestCase):
def setUp(self):
self.query = sesame.SesameQuery()
def testCtor(self):
self.assertEquals(sesame.default_endpoint, self.query.baseurl)
self.query = sesame.SesameQuery(sesame.endpoints["cfa"])
self.assertEquals(sesame.endpoints["cfa"], self.query.baseurl)
def testDbs(self):
self.assertEquals("", self.query.dbs)
self.query.dbs = "SV"
self.assertEquals("SV", self.query.dbs)
self.query.dbs = "GB"
self.assertEquals("GB", self.query.dbs)
del self.query.dbs
self.assertEquals("", self.query.dbs)
self.query.useDatabases("Simb", "Vi")
self.assertEquals("SV", self.query.dbs)
# pdb.set_trace()
self.query.useDefaultDatabase()
self.assertEquals("", self.query.dbs)
def testGetQueryURL(self):
self.query.names = "m51"
self.assertEquals(sesame.default_endpoint + "/-ox?m51",
self.query.getqueryurl())
self.query.names = "m101 m51".split()
self.query.opts = 'I'
self.assertEquals(sesame.default_endpoint + "/-oxI?m101&m51",
self.query.getqueryurl())
self.query.dbs = "SN"
self.assertEquals(sesame.default_endpoint + "/-oxI/SN?m101&m51",
self.query.getqueryurl())
self.query.ignorecache = True
self.assertEquals(sesame.default_endpoint + "/-oxI/~SN?m101&m51",
self.query.getqueryurl())
self.assertEquals(sesame.default_endpoint + "/-oxI/~SN?m101&m51",
self.query.getqueryurl(format='x'))
self.assertEquals(sesame.default_endpoint + "/-ox2I/~SN?m101&m51",
self.query.getqueryurl(format='x2'))
self.assertEquals(sesame.default_endpoint + "/-oI/~SN?m101&m51",
self.query.getqueryurl(format='pc'))
self.assertEquals(sesame.default_endpoint + "/-ox2pI/~SN?m101&m51",
self.query.getqueryurl(format='x2', astext=True))
def assertRaisesOnQuery(self, msg, format=None):
self.assertRaises(sesame.DALQueryError, self.query.getqueryurl,
format=format)
self.query.getqueryurl(True)
def testGetBadQueryURL(self):
self.assertRaisesOnQuery("Failed to catch lack of source")
self.query.names = "m51"
self.query.dbs = "GB"
self.assertRaisesOnQuery("Failed to catch bad DB codes")
self.query.dbs = ""
self.query.opts = "gw"
self.assertRaisesOnQuery("Failed to catch bad option codes")
self.query.opts = ""
self.assertRaisesOnQuery("Failed to catch bad format", "uu")
class EndpointSetTest(unittest.TestCase):
def testSetDef(self):
self.assertEquals(sesame.endpoints["cds"], sesame.default_endpoint)
# pdb.set_trace()
sesame.set_default_endpoint("cds")
self.assertEquals(sesame.endpoints["cds"], sesame.default_endpoint)
sesame.set_default_endpoint("cfa")
self.assertEquals(sesame.endpoints["cfa"], sesame.default_endpoint)
__all__ = "DocQuantityTest ProperMotionTest ObjectDataTest SesameQueryTest EndpointSetTest".split()
def suite():
tests = []
for t in __all__:
tests.append(unittest.makeSuite(globals()[t]))
return unittest.TestSuite(tests)
if __name__ == "__main__":
unittest.main()
| 37 | 99 | 0.605743 |
2e843c038ea389a03d6943a6720d06efe3c834c8 | 230 | py | Python | pyvisa_py/protocols/__init__.py | Handfeger/pyvisa-py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | [
"MIT"
] | 157 | 2017-08-01T07:46:59.000Z | 2022-03-06T04:44:11.000Z | pyvisa_py/protocols/__init__.py | Handfeger/pyvisa-py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | [
"MIT"
] | 251 | 2017-05-31T00:48:56.000Z | 2022-03-22T06:12:37.000Z | pyvisa_py/protocols/__init__.py | Handfeger/pyvisa-py | fcfb45895cd44dd922985c3a9d8f3372c8318d63 | [
"MIT"
] | 77 | 2017-05-31T14:32:52.000Z | 2022-02-19T20:58:06.000Z | # -*- coding: utf-8 -*-
"""Implements protocols on top of lower level libraries to talk to instruments.
:copyright: 2014-2020 by PyVISA-py Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
| 25.555556 | 79 | 0.721739 |
c01517b1f5042ce3a58ddb794915d97f5981af66 | 14,718 | py | Python | source/utctx/unittest/runtests.py | cablelabs/Utopia | 1f8654b3dc91fe78941c4a67d507747bc14fca57 | [
"Apache-2.0"
] | null | null | null | source/utctx/unittest/runtests.py | cablelabs/Utopia | 1f8654b3dc91fe78941c4a67d507747bc14fca57 | [
"Apache-2.0"
] | null | null | null | source/utctx/unittest/runtests.py | cablelabs/Utopia | 1f8654b3dc91fe78941c4a67d507747bc14fca57 | [
"Apache-2.0"
] | 1 | 2020-12-10T07:34:30.000Z | 2020-12-10T07:34:30.000Z | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2015 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
#!/usr/bin/python
###########################################################################
#
# Copyright (c) 2008-2009 Cisco Systems, Inc. All rights reserved.
#
# Cisco Systems, Inc. retains all right, title and interest (including all
# intellectual property rights) in and to this computer program, which is
# protected by applicable intellectual property laws. Unless you have obtained
# a separate written license from Cisco Systems, Inc., you are not authorized
# to utilize all or a part of this computer program for any purpose (including
# reproduction, distribution, modification, and compilation into object code),
# and you must immediately destroy or return to Cisco Systems, Inc. all copies
# of this computer program. If you are licensed by Cisco Systems, Inc., your
# rights to utilize this computer program are limited by the terms of that
# license. To obtain a license, please contact Cisco Systems, Inc.
#
# This computer program contains trade secrets owned by Cisco Systems, Inc.
# and, unless unauthorized by Cisco Systems, Inc. in writing, you agree to
# maintain the confidentiality of this computer program and related information
# and to not disclose this computer program and related information to any
# other person or entity.
#
# THIS COMPUTER PROGRAM IS PROVIDED AS IS WITHOUT ANY WARRANTIES, AND CISCO
# SYSTEMS, INC. EXPRESSLY DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED,
# INCLUDING THE WARRANTIES OF MERCHANTIBILITY, FITNESS FOR A PARTICULAR
# PURPOSE, TITLE, AND NONINFRINGEMENT.
#
###########################################################################
#
# unittest.py - UTCTX Library unit test runner
#
import optparse
import os
import platform
import re
import shutil
import subprocess
import sys
import tempfile
import unittest
#
# Main
#
def main():
# Command line options
cmdParser = optparse.OptionParser()
cmdParser.add_option("-b", action = "store_true", dest = "bBuildOnly",
help = "Build unittest only (no run)")
cmdParser.add_option("-r", action = "store_true", dest = "bRunOnly",
help = "Run unit tests only (no build)")
cmdParser.add_option("-v", action = "store_true", dest = "bVerbose",
help = "Verbose output")
cmdParser.add_option("-u", action = "store_true", dest = "bUpdateExpected",
help = "Update expected output")
cmdParser.add_option("--debug", action = "store_true", dest = "bDebug",
help = "Build debug binaries")
(cmdOptions, cmdArgs) = cmdParser.parse_args()
# Test suites...
suites = unittest.TestSuite()
dirSrc = "src"
dirTests = "tests"
if not cmdOptions.bRunOnly:
# Build test
buildSuite = BuildTestSuite(dirSrc, cmdOptions.bDebug)
suites.addTest(buildSuite)
if not cmdOptions.bBuildOnly:
# API test
apiSuite = APITestSuite(dirSrc, dirTests, cmdOptions.bUpdateExpected)
suites.addTest(apiSuite)
# Transaction test
transSuite = TransactionTestSuite(dirSrc, dirTests, cmdOptions.bUpdateExpected)
suites.addTest(transSuite)
# RWLock test
rwlockSuite = RWLockTestSuite(dirSrc)
suites.addTest(rwlockSuite)
# Run the test suites
runner = unittest.TextTestRunner()
if cmdOptions.bVerbose:
runner.verbosity = 2
runner.run(suites)
#
# Helper function to determine if two files differ
#
def FilesDiffer(expected, actual, fnIgnore = None, fnErrorActual = None):
# Ignore line endings on non-Linux systems
bIgnoreLineEndings = (platform.system() != 'Linux')
# Read the expected file
fhExpected = open(expected, "r")
try:
expectedText = fhExpected.read()
except:
fhExpected.close()
raise
fhExpected.close()
# Read the actual file
fhActual = open(actual, "r")
try:
actualText = fhActual.read()
except:
fhActual.close()
raise
fhActual.close()
# No reason split into lines? If so, just compare the whole files...
if fnIgnore is None and fnErrorActual is None and not bIgnoreLineEndings:
return expectedText != actualText
# Split the text into lines...
expectedLines = expectedText.splitlines(not bIgnoreLineEndings)
actualLines = actualText.splitlines(not bIgnoreLineEndings)
# Check for errors in the actual text
if fnErrorActual is not None:
errors = [ x for x in actualLines if fnErrorActual(x) ]
if errors:
raise Exception("\n".join(errors))
# Compare
if fnIgnore is not None:
expectedLines = [ x for x in expectedLines if not fnIgnore(x) ]
actualLines = [ x for x in actualLines if not fnIgnore(x) ]
return expectedLines != actualLines
######################################################################
#
# Build Test Suite
#
######################################################################
#
# Load the build test
#
class BuildTestSuite(unittest.TestSuite):
def __init__(self, dirSrc, bDebug):
unittest.TestSuite.__init__(self)
for dirSrc, bDebug, cFlags in \
[(dirSrc, bDebug, "-DUTCTX_LOG"),
(dirSrc, bDebug, "-DUTCTX_POSIX_SEM"),
(dirSrc, bDebug, "-DUTCTX_LOG -DUTCTX_POSIX_SEM"),
(dirSrc, bDebug, None)]:
self.addTest(BuildTestCase(dirSrc, bDebug, cFlags))
#
# Build test case
#
class BuildTestCase(unittest.TestCase):
def __init__(self, dirSrc, bDebug, cFlags):
unittest.TestCase.__init__(self)
self.__dirSrc = dirSrc
self.__bDebug = bDebug
self.__cFlags = cFlags
def __str__(self):
if self.__cFlags is None:
return 'Build: '
else:
return 'Build UNITTEST_CFLAGS="%s":' % self.__cFlags
def runTest(self):
# Set the target
if self.__bDebug:
target = "debug"
else:
target = "release"
# Setup the make command
make = 'make clean %s' % (target)
# Append cflags
if self.__cFlags is not None:
make += ' UNITTEST_CFLAGS="%s"' % self.__cFlags
# Run make
proc = subprocess.Popen(make, shell = True, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, cwd = self.__dirSrc)
makeOutput = proc.communicate()[0]
if proc.wait() != 0:
if makeOutput is not None:
self.fail(makeOutput)
######################################################################
#
# API Test Suite
#
######################################################################
#
# Load the API test
#
class APITestSuite(unittest.TestSuite):
def __init__(self, dirSrc, dirTests, bUpdateExpected):
unittest.TestSuite.__init__(self)
for strDesc, dirSrc, dirTests, strState, bUpdateExpected in \
[("get", dirSrc, dirTests, "utctx_api.st", bUpdateExpected),
("set", dirSrc, dirTests, "utctx_api.st", bUpdateExpected),
("unset", dirSrc, dirTests, "utctx_api.st", bUpdateExpected),
("event", dirSrc, dirTests, "utctx_api.st", bUpdateExpected),
("utopia-values", dirSrc, dirTests, "utctx.st", bUpdateExpected)]:
self.addTest(APITestCase(strDesc, dirSrc, dirTests, strState, bUpdateExpected))
#
# API test case
#
class APITestCase(unittest.TestCase):
def __init__(self, strDesc, dirSrc, dirTests, strState, bUpdateExpected):
unittest.TestCase.__init__(self)
self.__strDesc = strDesc
self.__dirSrc = dirSrc
self.__fileState = os.path.join("state", strState)
self.__dirTest = os.path.join(dirTests, "API")
self.__testProgram = os.path.join(dirSrc, "unittest")
self.__mallocInterposer = os.path.join(dirSrc, "malloc_interposer.so")
self.__bUpdateExpected = bUpdateExpected
# Ignore malloc stats in expected/actual, if requested
reIgnoreMallocStats = re.compile("^malloc_interposer\.c - [exit:|\*+]")
self.__fnDiffIgnore = lambda s: reIgnoreMallocStats.search(s) is not None
# Report errors for memory leaks
reMemoryLeaks = re.compile("^malloc_interposer\.c - Memory leak:")
self.__fnDiffError = lambda s: reMemoryLeaks.search(s) is not None
def __str__(self):
return "API: %s" % self.__strDesc
def runTest(self):
# Copy the state file
shutil.copy(self.__fileState, os.path.join(self.__dirSrc, "utctx.st"))
# Setup the cmd args
args = [os.path.abspath(self.__testProgram), self.__strDesc, "commit"]
# Setup files
fileActual = os.path.join(self.__dirTest, self.__strDesc, "actual.out")
fileExpected = os.path.join(self.__dirTest, self.__strDesc, "expected.out")
# Execute the test program
cmdLine = " ".join(args)
fhOutput = open(fileActual, "w")
try:
proc = subprocess.Popen(args = args, stdout = fhOutput, cwd = self.__dirSrc,
env = { "LD_PRELOAD": os.path.abspath(self.__mallocInterposer) })
returnCode = proc.wait()
if returnCode != 0:
self.fail("Test failed with return code %s!\n%s" % (returnCode, cmdLine))
except:
fhOutput.close()
raise
fhOutput.close()
# Diff the actual output with expected
if not os.path.exists(fileExpected):
self.fail("Expected output file %s does not exist" % fileExpected)
elif FilesDiffer(fileExpected, fileActual, self.__fnDiffIgnore, self.__fnDiffError):
if self.__bUpdateExpected:
shutil.copy(fileActual, fileExpected)
else:
self.fail('Actual output differs from expected: "%s" "%s"\n%s' %
(fileActual, fileExpected, cmdLine))
######################################################################
#
# Transaction Test Suite
#
######################################################################
#
# Load the Transaction test
#
class TransactionTestSuite(unittest.TestSuite):
def __init__(self, dirSrc, dirTests, bUpdateExpected):
unittest.TestSuite.__init__(self)
self.addTest(TransactionTestCase("commit", dirSrc, dirTests, True, bUpdateExpected))
self.addTest(TransactionTestCase("rollback", dirSrc, dirTests, False, bUpdateExpected))
#
# Transaction test case
#
class TransactionTestCase(unittest.TestCase):
def __init__(self, strDesc, dirSrc, dirTests, bCommit, bUpdateExpected):
unittest.TestCase.__init__(self)
self.__strDesc = strDesc
self.__dirSrc = dirSrc
self.__dirTest = os.path.join(dirTests, "Transaction")
self.__testProgram = os.path.join(dirSrc, "unittest")
self.__fileState = os.path.join("state", "utctx_api.st")
self.__fileEndState = os.path.join(dirSrc, "utctx.st")
self.__bCommit = bCommit
self.__bUpdateExpected = bUpdateExpected
def __str__(self):
return "Transaction: %s" % self.__strDesc
def runTest(self):
# Init the end state file with the state
shutil.copy(self.__fileState, self.__fileEndState)
# Setup the cmd args
args = [os.path.abspath(self.__testProgram), "set"]
if self.__bCommit:
args.extend(["commit"])
# Execute the test program
cmdLine = " ".join(args)
fhOutput = tempfile.TemporaryFile()
try:
proc = subprocess.Popen(args = args, stdout = fhOutput, cwd = self.__dirSrc)
returnCode = proc.wait()
except:
fhOutput.close()
raise
fhOutput.close()
if returnCode != 0:
self.fail("Test failed with return code %s!\n%s" % (returnCode, cmdLine))
# Setup diff files
fileActual = os.path.join(self.__dirTest, self.__strDesc, "actual.st")
fileExpected = os.path.join(self.__dirTest, self.__strDesc, "expected.st")
# Copy the end state to the actual state file
shutil.copy(self.__fileEndState, fileActual)
# Diff the actual output with expected
if not os.path.exists(fileExpected):
self.fail("Expected state file %s does not exist" % fileExpected)
elif FilesDiffer(fileExpected, fileActual):
if self.__bUpdateExpected:
shutil.copy(fileActual, fileExpected)
else:
self.fail('Actual state differs from expected: "%s" "%s"\n%s' %
(fileActual, fileExpected, cmdLine))
######################################################################
#
# RWLock Test Suite
#
######################################################################
#
# Load the RWLock test
#
class RWLockTestSuite(unittest.TestSuite):
def __init__(self, dirSrc):
unittest.TestSuite.__init__(self)
self.addTest(RWLockTestCase(dirSrc))
#
# RWLock test case
#
class RWLockTestCase(unittest.TestCase):
def __init__(self, dirSrc):
unittest.TestCase.__init__(self)
self.__dirSrc = dirSrc
self.__testProgram = os.path.join(dirSrc, "rwlock_unittest")
def __str__(self):
return "RWLock:"
def runTest(self):
fhOutput = tempfile.TemporaryFile()
args = os.path.abspath(self.__testProgram)
proc = subprocess.Popen(args, shell = True, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd = self.__dirSrc)
makeOutput = proc.communicate()[0]
if proc.wait() != 0:
if makeOutput is not None:
self.fail(makeOutput)
######################################################################
if __name__ == "__main__":
main()
| 33.912442 | 126 | 0.605381 |
3335f36d0f250a3cd551f342f5d4b6327b2cb5c9 | 38,752 | py | Python | Blender/scripts/MOD_Opti_PSK_Dump/MOD5_Opti_PSK_Dump_animation_might_be_stable_work_on_head_not_tail_align_for_offset.py | ECToo/unrealtacticalmod | f78c77587d8a706697703be064fb854ffc31ea4c | [
"Unlicense"
] | null | null | null | Blender/scripts/MOD_Opti_PSK_Dump/MOD5_Opti_PSK_Dump_animation_might_be_stable_work_on_head_not_tail_align_for_offset.py | ECToo/unrealtacticalmod | f78c77587d8a706697703be064fb854ffc31ea4c | [
"Unlicense"
] | null | null | null | Blender/scripts/MOD_Opti_PSK_Dump/MOD5_Opti_PSK_Dump_animation_might_be_stable_work_on_head_not_tail_align_for_offset.py | ECToo/unrealtacticalmod | f78c77587d8a706697703be064fb854ffc31ea4c | [
"Unlicense"
] | null | null | null | #!BPY
"""
Name: 'Unreal Skeletal Mesh/Animation (.psk and .psa) Mod05'
Blender: 240
Group: 'Export'
Tooltip: 'Unreal Skeletal Mesh and Animation Export (*.psk, *.psa)'
"""
__author__ = "Optimus_P-Fat/Active_Trash"
__version__ = "0.0.4"
__bpydoc__ = """\
-- Unreal Skeletal Mesh and Animation Export (.psk and .psa) export script v0.0.1 --<br>
- NOTES:
- This script Exports To Unreal's PSK and PSA file formats for Skeletal Meshes and Animations. <br>
- This script DOES NOT support vertex animation! These require completely different file formats. <br>
- v0.0.1
- Initial version
- v0.0.2
- This version adds support for more than one material index!
- v0.0.3
- This will work on UT3 and it is a stable version that work with vehicle for testing.
- Main Bone fix no dummy needed to be there. Some part of the area may not work when main bone did not detect.
- Fix the bone offset position as head bone that connect to it.
- There are two points which is the head and tail.
- Note I add on to the notes a bit and comments out the other ones that are not need in here.
- Did not work with psa export yet.
- Edit by: Darknet
- v0.0.4
- This is an update to fix the bone pose iusses position in psa that is off set to the tail and not to the head. That is now fixed to the head.
- To make it work for psa you must add the bones in the Unreal Editor from AnimSet under UseTranslationBoneNames.
- Edit by: Darknet
"""
# DANGER! This code is complete garbage! Do not read!
# TODO: Throw some liscence junk in here: (maybe some GPL?)
# Liscence Junk: Use this script for whatever you feel like!
import Blender, time, os, math, sys as osSys, operator
from Blender import sys, Window, Draw, Scene, Mesh, Material, Texture, Image, Mathutils, Armature
from cStringIO import StringIO
from struct import pack, calcsize
# REFERENCE MATERIAL JUST IN CASE:
#
# U = x / sqrt(x^2 + y^2 + z^2)
# V = y / sqrt(x^2 + y^2 + z^2)
#
# Triangles specifed counter clockwise for front face
#
#defines for sizeofs
SIZE_FQUAT = 16
SIZE_FVECTOR = 12
SIZE_VJOINTPOS = 44
SIZE_ANIMINFOBINARY = 168
SIZE_VCHUNKHEADER = 32
SIZE_VMATERIAL = 88
SIZE_VBONE = 120
SIZE_FNAMEDBONEBINARY = 120
SIZE_VRAWBONEINFLUENCE = 12
SIZE_VQUATANIMKEY = 32
SIZE_VVERTEX = 16
SIZE_VPOINT = 12
SIZE_VTRIANGLE = 12
########################################################################
# Generic Object->Integer mapping
# the object must be usable as a dictionary key
class ObjMap:
def __init__(self):
self.dict = {}
self.next = 0
def get(self, obj):
if (obj in self.dict):
return self.dict[obj]
else:
id = self.next
self.next = self.next + 1
self.dict[obj] = id
return id
def items(self):
getval = operator.itemgetter(0)
getkey = operator.itemgetter(1)
return map(getval, sorted(self.dict.items(), key=getkey))
########################################################################
# RG - UNREAL DATA STRUCTS - CONVERTED FROM C STRUCTS GIVEN ON UDN SITE
# provided here: http://udn.epicgames.com/Two/BinaryFormatSpecifications.html
class FQuat:
def __init__(self):
self.X = 0.0
self.Y = 0.0
self.Z = 0.0
self.W = 1.0
def dump(self):
data = pack('ffff', self.X, self.Y, self.Z, self.W)
return data
def __cmp__(self, other):
return cmp(self.X, other.X) \
or cmp(self.Y, other.Y) \
or cmp(self.Z, other.Z) \
or cmp(self.W, other.W)
def __hash__(self):
return hash(self.X) ^ hash(self.Y) ^ hash(self.Z) ^ hash(self.W)
def __str__(self):
return "[%f,%f,%f,%f](FQuat)" % (self.X, self.Y, self.Z, self.W)
class FVector:
def __init__(self, X=0.0, Y=0.0, Z=0.0):
self.X = X
self.Y = Y
self.Z = Z
def dump(self):
data = pack('fff', self.X, self.Y, self.Z)
return data
def __cmp__(self, other):
return cmp(self.X, other.X) \
or cmp(self.Y, other.Y) \
or cmp(self.Z, other.Z)
def __hash__(self):
return hash(self.X) ^ hash(self.Y) ^ hash(self.Z)
def dot(self, other):
return self.X * other.X + self.Y * other.Y + self.Z * other.Z
def cross(self, other):
return FVector(self.Y * other.Z - self.Z * other.Y,
self.Z * other.X - self.X * other.Z,
self.X * other.Y - self.Y * other.X)
def sub(self, other):
return FVector(self.X - other.X,
self.Y - other.Y,
self.Z - other.Z)
class VJointPos:
def __init__(self):
self.Orientation = FQuat()
self.Position = FVector()
self.Length = 0.0
self.XSize = 0.0
self.YSize = 0.0
self.ZSize = 0.0
def dump(self):
data = self.Orientation.dump() + self.Position.dump() + pack('4f', self.Length, self.XSize, self.YSize, self.ZSize)
return data
class AnimInfoBinary:
def __init__(self):
self.Name = "" # length=64
self.Group = "" # length=64
self.TotalBones = 0
self.RootInclude = 0
self.KeyCompressionStyle = 0
self.KeyQuotum = 0
self.KeyPrediction = 0.0
self.TrackTime = 0.0
self.AnimRate = 0.0
self.StartBone = 0
self.FirstRawFrame = 0
self.NumRawFrames = 0
def dump(self):
data = pack('64s64siiiifffiii', self.Name, self.Group, self.TotalBones, self.RootInclude, self.KeyCompressionStyle, self.KeyQuotum, self.KeyPrediction, self.TrackTime, self.AnimRate, self.StartBone, self.FirstRawFrame, self.NumRawFrames)
return data
class VChunkHeader:
def __init__(self, name, type_size):
self.ChunkID = name # length=20
self.TypeFlag = 1999801 # special value
self.DataSize = type_size
self.DataCount = 0
def dump(self):
data = pack('20siii', self.ChunkID, self.TypeFlag, self.DataSize, self.DataCount)
return data
class VMaterial:
def __init__(self):
self.MaterialName = "" # length=64
self.TextureIndex = 0
self.PolyFlags = 0 # DWORD
self.AuxMaterial = 0
self.AuxFlags = 0 # DWORD
self.LodBias = 0
self.LodStyle = 0
def dump(self):
data = pack('64siLiLii', self.MaterialName, self.TextureIndex, self.PolyFlags, self.AuxMaterial, self.AuxFlags, self.LodBias, self.LodStyle)
return data
class VBone:
def __init__(self):
self.Name = "" # length = 64
self.Flags = 0 # DWORD
self.NumChildren = 0
self.ParentIndex = 0
self.BonePos = VJointPos()
def dump(self):
data = pack('64sLii', self.Name, self.Flags, self.NumChildren, self.ParentIndex) + self.BonePos.dump()
return data
#same as above - whatever - this is how Epic does it...
class FNamedBoneBinary:
def __init__(self):
self.Name = "" # length = 64
self.Flags = 0 # DWORD
self.NumChildren = 0
self.ParentIndex = 0
self.BonePos = VJointPos()
self.IsRealBone = 0 # this is set to 1 when the bone is actually a bone in the mesh and not a dummy
def dump(self):
data = pack('64sLii', self.Name, self.Flags, self.NumChildren, self.ParentIndex) + self.BonePos.dump()
return data
class VRawBoneInfluence:
def __init__(self):
self.Weight = 0.0
self.PointIndex = 0
self.BoneIndex = 0
def dump(self):
data = pack('fii', self.Weight, self.PointIndex, self.BoneIndex)
return data
class VQuatAnimKey:
def __init__(self):
self.Position = FVector()
self.Orientation = FQuat()
self.Time = 0.0
def dump(self):
data = self.Position.dump() + self.Orientation.dump() + pack('f', self.Time)
return data
class VVertex:
def __init__(self):
self.PointIndex = 0 # WORD
self.U = 0.0
self.V = 0.0
self.MatIndex = 0 #BYTE
self.Reserved = 0 #BYTE
def dump(self):
data = pack('HHffBBH', self.PointIndex, 0, self.U, self.V, self.MatIndex, self.Reserved, 0)
return data
def __cmp__(self, other):
return cmp(self.PointIndex, other.PointIndex) \
or cmp(self.U, other.U) \
or cmp(self.V, other.V) \
or cmp(self.MatIndex, other.MatIndex) \
or cmp(self.Reserved, other.Reserved)
def __hash__(self):
return hash(self.PointIndex) \
^ hash(self.U) ^ hash(self.V) \
^ hash(self.MatIndex) \
^ hash(self.Reserved)
class VPoint:
def __init__(self):
self.Point = FVector()
def dump(self):
return self.Point.dump()
def __cmp__(self, other):
return cmp(self.Point, other.Point)
def __hash__(self):
return hash(self.Point)
class VTriangle:
def __init__(self):
self.WedgeIndex0 = 0 # WORD
self.WedgeIndex1 = 0 # WORD
self.WedgeIndex2 = 0 # WORD
self.MatIndex = 0 # BYTE
self.AuxMatIndex = 0 # BYTE
self.SmoothingGroups = 0 # DWORD
def dump(self):
data = pack('HHHBBL', self.WedgeIndex0, self.WedgeIndex1, self.WedgeIndex2, self.MatIndex, self.AuxMatIndex, self.SmoothingGroups)
return data
# END UNREAL DATA STRUCTS
########################################################################
#RG - helper class to handle the normal way the UT files are stored
#as sections consisting of a header and then a list of data structures
class FileSection:
def __init__(self, name, type_size):
self.Header = VChunkHeader(name, type_size)
self.Data = [] # list of datatypes
def dump(self):
data = self.Header.dump()
for i in range(len(self.Data)):
data = data + self.Data[i].dump()
return data
def UpdateHeader(self):
self.Header.DataCount = len(self.Data)
class PSKFile:
def __init__(self):
self.GeneralHeader = VChunkHeader("ACTRHEAD", 0)
self.Points = FileSection("PNTS0000", SIZE_VPOINT) #VPoint
self.Wedges = FileSection("VTXW0000", SIZE_VVERTEX) #VVertex
self.Faces = FileSection("FACE0000", SIZE_VTRIANGLE) #VTriangle
self.Materials = FileSection("MATT0000", SIZE_VMATERIAL) #VMaterial
self.Bones = FileSection("REFSKELT", SIZE_VBONE) #VBone
self.Influences = FileSection("RAWWEIGHTS", SIZE_VRAWBONEINFLUENCE) #VRawBoneInfluence
#RG - this mapping is not dumped, but is used internally to store the new point indices
# for vertex groups calculated during the mesh dump, so they can be used again
# to dump bone influences during the armature dump
#
# the key in this dictionary is the VertexGroup/Bone Name, and the value
# is a list of tuples containing the new point index and the weight, in that order
#
# Layout:
# { groupname : [ (index, weight), ... ], ... }
#
# example:
# { 'MyVertexGroup' : [ (0, 1.0), (5, 1.0), (3, 0.5) ] , 'OtherGroup' : [(2, 1.0)] }
self.VertexGroups = {}
def AddPoint(self, p):
#print 'AddPoint'
self.Points.Data.append(p)
def AddWedge(self, w):
#print 'AddWedge'
self.Wedges.Data.append(w)
def AddFace(self, f):
#print 'AddFace'
self.Faces.Data.append(f)
def AddMaterial(self, m):
#print 'AddMaterial'
self.Materials.Data.append(m)
def AddBone(self, b):
#print 'AddBone [%s]: Position: (x=%f, y=%f, z=%f) Rotation=(%f,%f,%f,%f)' % (b.Name, b.BonePos.Position.X, b.BonePos.Position.Y, b.BonePos.Position.Z, b.BonePos.Orientation.X,b.BonePos.Orientation.Y,b.BonePos.Orientation.Z,b.BonePos.Orientation.W)
self.Bones.Data.append(b)
def AddInfluence(self, i):
#print 'AddInfluence'
self.Influences.Data.append(i)
def UpdateHeaders(self):
self.Points.UpdateHeader()
self.Wedges.UpdateHeader()
self.Faces.UpdateHeader()
self.Materials.UpdateHeader()
self.Bones.UpdateHeader()
self.Influences.UpdateHeader()
def dump(self):
self.UpdateHeaders()
data = self.GeneralHeader.dump() + self.Points.dump() + self.Wedges.dump() + self.Faces.dump() + self.Materials.dump() + self.Bones.dump() + self.Influences.dump()
return data
def GetMatByIndex(self, mat_index):
if mat_index >= 0 and len(self.Materials.Data) > mat_index:
return self.Materials.Data[mat_index]
else:
m = VMaterial()
m.MaterialName = "Mat%i" % mat_index
self.AddMaterial(m)
return m
def PrintOut(self):
print '--- PSK FILE EXPORTED ---'
print 'point count: %i' % len(self.Points.Data)
print 'wedge count: %i' % len(self.Wedges.Data)
print 'face count: %i' % len(self.Faces.Data)
print 'material count: %i' % len(self.Materials.Data)
print 'bone count: %i' % len(self.Bones.Data)
print 'inlfuence count: %i' % len(self.Influences.Data)
print '-------------------------'
# PSA FILE NOTES FROM UDN:
#
# The raw key array holds all the keys for all the bones in all the specified sequences,
# organized as follows:
# For each AnimInfoBinary's sequence there are [Number of bones] times [Number of frames keys]
# in the VQuatAnimKeys, laid out as tracks of [numframes] keys for each bone in the order of
# the bones as defined in the array of FnamedBoneBinary in the PSA.
#
# Once the data from the PSK (now digested into native skeletal mesh) and PSA (digested into
# a native animation object containing one or more sequences) are associated together at runtime,
# bones are linked up by name. Any bone in a skeleton (from the PSK) that finds no partner in
# the animation sequence (from the PSA) will assume its reference pose stance ( as defined in
# the offsets & rotations that are in the VBones making up the reference skeleton from the PSK)
class PSAFile:
def __init__(self):
self.GeneralHeader = VChunkHeader("ANIMHEAD", 0)
self.Bones = FileSection("BONENAMES", SIZE_FNAMEDBONEBINARY) #FNamedBoneBinary
self.Animations = FileSection("ANIMINFO", SIZE_ANIMINFOBINARY) #AnimInfoBinary
self.RawKeys = FileSection("ANIMKEYS", SIZE_VQUATANIMKEY) #VQuatAnimKey
# this will take the format of key=Bone Name, value = (BoneIndex, Bone Object)
# THIS IS NOT DUMPED
self.BoneLookup = {}
def dump(self):
data = self.Generalheader.dump() + self.Bones.dump() + self.Animations.dump() + self.RawKeys.dump()
return data
def AddBone(self, b):
#LOUD
#print "AddBone: " + b.Name
self.Bones.Data.append(b)
def AddAnimation(self, a):
#LOUD
#print "AddAnimation: %s, TotalBones: %i, AnimRate: %f, NumRawFrames: %i, TrackTime: %f" % (a.Name, a.TotalBones, a.AnimRate, a.NumRawFrames, a.TrackTime)
self.Animations.Data.append(a)
def AddRawKey(self, k):
#LOUD
#print "AddRawKey [%i]: Time: %f, Quat: x=%f, y=%f, z=%f, w=%f, Position: x=%f, y=%f, z=%f" % (len(self.RawKeys.Data), k.Time, k.Orientation.X, k.Orientation.Y, k.Orientation.Z, k.Orientation.W, k.Position.X, k.Position.Y, k.Position.Z)
self.RawKeys.Data.append(k)
def UpdateHeaders(self):
self.Bones.UpdateHeader()
self.Animations.UpdateHeader()
self.RawKeys.UpdateHeader()
def GetBoneByIndex(self, bone_index):
if bone_index >= 0 and len(self.Bones.Data) > bone_index:
return self.Bones.Data[bone_index]
def IsEmpty(self):
return (len(self.Bones.Data) == 0 or len(self.Animations.Data) == 0)
def StoreBone(self, b):
self.BoneLookup[b.Name] = [-1, b]
def UseBone(self, bone_name):
if bone_name in self.BoneLookup:
bone_data = self.BoneLookup[bone_name]
if bone_data[0] == -1:
bone_data[0] = len(self.Bones.Data)
self.AddBone(bone_data[1])
#self.Bones.Data.append(bone_data[1])
return bone_data[0]
def GetBoneByName(self, bone_name):
if bone_name in self.BoneLookup:
bone_data = self.BoneLookup[bone_name]
return bone_data[1]
def GetBoneIndex(self, bone_name):
if bone_name in self.BoneLookup:
bone_data = self.BoneLookup[bone_name]
return bone_data[0]
def dump(self):
self.UpdateHeaders()
data = self.GeneralHeader.dump() + self.Bones.dump() + self.Animations.dump() + self.RawKeys.dump()
return data
def PrintOut(self):
print '--- PSA FILE EXPORTED ---'
print 'bone count: %i' % len(self.Bones.Data)
print 'animation count: %i' % len(self.Animations.Data)
print 'rawkey count: %i' % len(self.RawKeys.Data)
print '-------------------------'
####################################
# helpers to create bone structs
def make_vbone(name, parent_index, child_count, orientation_quat, position_vect):
bone = VBone()
bone.Name = name
bone.ParentIndex = parent_index
bone.NumChildren = child_count
bone.BonePos.Orientation = orientation_quat
bone.BonePos.Position.X = position_vect.x
bone.BonePos.Position.Y = position_vect.y
bone.BonePos.Position.Z = position_vect.z
#these values seem to be ignored?
#bone.BonePos.Length = tail.length
#bone.BonePos.XSize = tail.x
#bone.BonePos.YSize = tail.y
#bone.BonePos.ZSize = tail.z
return bone
def make_namedbonebinary(name, parent_index, child_count, orientation_quat, position_vect, is_real):
bone = FNamedBoneBinary()
bone.Name = name
bone.ParentIndex = parent_index
bone.NumChildren = child_count
bone.BonePos.Orientation = orientation_quat
bone.BonePos.Position.X = position_vect.x
bone.BonePos.Position.Y = position_vect.y
bone.BonePos.Position.Z = position_vect.z
bone.IsRealBone = is_real
return bone
##################################################
#RG - check to make sure face isnt a line
def is_1d_face(blender_face):
return ((blender_face.v[0].co == blender_face.v[1].co) or \
(blender_face.v[1].co == blender_face.v[2].co) or \
(blender_face.v[2].co == blender_face.v[0].co))
##################################################
# Actual object parsing functions
def parse_meshes(blender_meshes, psk_file):
print "----- parsing meshes -----"
#print 'blender_meshes length: %i' % (len(blender_meshes))
for current_obj in blender_meshes:
current_mesh = current_obj.getData()
#print 'current mesh name: ' + current_mesh.name
#raw_mesh = Mesh.Get(current_mesh.name)
# Get the world transform for the object
object_mat = current_obj.mat
# add material 0
#m = VMaterial()
#m.MaterialName = "Mat0"
#psk_file.AddMaterial(m)
#print 'faces: %i' % (len(current_mesh.faces))
#print 'verts: %i' % (len(current_mesh.verts))
#print 'has face UV: %i' % (current_mesh.hasFaceUV())
points = ObjMap()
wedges = ObjMap()
discarded_face_count = 0
#print ' -- Dumping Mesh Faces -- '
for current_face in current_mesh.faces:
#print ' -- Dumping UVs -- '
#print current_face.uv
if len(current_face.v) != 3:
raise RuntimeError("Non-triangular face (%i)" % len(current_face.v))
#todo: add two fake faces made of triangles?
#RG - apparently blender sometimes has problems when you do quad to triangle
# conversion, and ends up creating faces that have only TWO points -
# one of the points is simply in the vertex list for the face twice.
# This is bad, since we can't get a real face normal for a LINE, we need
# a plane for this. So, before we add the face to the list of real faces,
# ensure that the face is actually a plane, and not a line. If it is not
# planar, just discard it and notify the user in the console after we're
# done dumping the rest of the faces
if not is_1d_face(current_face):
wedge_list = []
vect_list = []
#get or create the current material
m = psk_file.GetMatByIndex(current_face.mat)
print 'material: %i' % (current_face.mat)
for i in range(3):
vert = current_face.v[i]
if len(current_face.uv) != 3:
#print "WARNING: Current face is missing UV coordinates - writing 0,0..."
uv = [0.0, 0.0]
else:
uv = list(current_face.uv[i])
#flip V coordinate because UEd requires it and DOESN'T flip it on its own like it
#does with the mesh Y coordinates.
#this is otherwise known as MAGIC-2
uv[1] = 1.0 - uv[1]
#print "Vertex UV: ", uv, " UVCO STUFF:", vert.uvco.x, vert.uvco.y
# RE - Append untransformed vector (for normal calc below)
# TODO: convert to Blender.Mathutils
vect_list.append(FVector(vert.co.x, vert.co.y, vert.co.z))
# Transform position for export
vpos = vert.co * object_mat
# Create the point
p = VPoint()
p.Point.X = vpos.x
p.Point.Y = vpos.y
p.Point.Z = vpos.z
# Create the wedge
w = VVertex()
w.MatIndex = current_face.mat
w.PointIndex = points.get(p) # get index from map
w.U = uv[0]
w.V = uv[1]
wedge_index = wedges.get(w)
wedge_list.append(wedge_index)
#print results
#print 'result PointIndex=%i, U=%f, V=%f, wedge_index=%i' % (
# w.PointIndex,
# w.U,
# w.V,
# wedge_index)
# Determine face vertex order
# get normal from blender
no = current_face.no
# TODO: convert to Blender.Mathutils
# convert to FVector
norm = FVector(no[0], no[1], no[2])
# Calculate the normal of the face in blender order
tnorm = vect_list[1].sub(vect_list[0]).cross(vect_list[2].sub(vect_list[1]))
# RE - dot the normal from blender order against the blender normal
# this gives the product of the two vectors' lengths along the blender normal axis
# all that matters is the sign
dot = norm.dot(tnorm)
# print results
#print 'face norm: (%f,%f,%f), tnorm=(%f,%f,%f), dot=%f' % (
# norm.X, norm.Y, norm.Z,
# tnorm.X, tnorm.Y, tnorm.Z,
# dot)
tri = VTriangle()
# RE - magic: if the dot product above > 0, order the vertices 2, 1, 0
# if the dot product above < 0, order the vertices 0, 1, 2
# if the dot product is 0, then blender's normal is coplanar with the face
# and we cannot deduce which side of the face is the outside of the mesh
if (dot > 0):
(tri.WedgeIndex2, tri.WedgeIndex1, tri.WedgeIndex0) = wedge_list
elif (dot < 0):
(tri.WedgeIndex0, tri.WedgeIndex1, tri.WedgeIndex2) = wedge_list
else:
raise RuntimeError("normal vector coplanar with face! points:", current_face.v[0].co, current_face.v[1].co, current_face.v[2].co)
tri.MatIndex = current_face.mat
psk_file.AddFace(tri)
else:
discarded_face_count = discarded_face_count + 1
for point in points.items():
psk_file.AddPoint(point)
for wedge in wedges.items():
psk_file.AddWedge(wedge)
#RG - if we happend upon any non-planar faces above that we've discarded,
# just let the user know we discarded them here in case they want
# to investigate
if discarded_face_count > 0:
print "INFO: Discarded %i non-planar faces." % (discarded_face_count)
#RG - walk through the vertex groups and find the indexes into the PSK points array
#for them, then store that index and the weight as a tuple in a new list of
#verts for the group that we can look up later by bone name, since Blender matches
#verts to bones for influences by having the VertexGroup named the same thing as
#the bone
vertex_groups = current_mesh.getVertGroupNames()
for group in vertex_groups:
verts = current_mesh.getVertsFromGroup(group, 1)
vert_list = []
for vert_data in verts:
vert_index = vert_data[0]
vert_weight = vert_data[1]
vert = current_mesh.verts[vert_index]
vpos = vert.co * object_mat
p = VPoint()
p.Point.X = vpos.x
p.Point.Y = vpos.y
p.Point.Z = vpos.z
point_index = points.get(p)
v_item = (point_index, vert_weight)
vert_list.append(v_item)
#print 'VertexGroup: %s, vert index=%i, point_index=%i' % (group, vert_index, point_index)
psk_file.VertexGroups[group] = vert_list
def make_fquat(bquat):
quat = FQuat()
#flip handedness for UT = set x,y,z to negative (rotate in other direction)
quat.X = bquat.x
quat.Y = bquat.y
quat.Z = bquat.z
quat.W = bquat.w
return quat
# TODO: remove this 1am hack
nbone = 0
def parse_bone(blender_bone, psk_file, psa_file, parent_id, is_root_bone, parent_mat):
global nbone # look it's evil!
#print ' --- Dumping Bone --- '
#print 'blender bone name: ' + blender_bone.name
'''
if blender_bone.hasChildren():
child_count = len(blender_bone.children)
else:
child_count = 0
'''
child_count = len(blender_bone.children)
'''
if (parent_mat):
head = blender_bone.head['ARMATURESPACE'] * parent_mat
tail = blender_bone.tail['ARMATURESPACE'] * parent_mat
rot_mat = blender_bone.matrix['ARMATURESPACE'] * parent_mat.rotationPart()
quat = make_fquat(rot_mat.toQuat())
else:
head = blender_bone.head['BONESPACE']
tail = blender_bone.tail['BONESPACE']
quat = make_fquat(blender_bone.matrix['BONESPACE'].toQuat())
'''
head = blender_bone.head['ARMATURESPACE'] #BONESPACE #ARMATURESPACE
tail = blender_bone.tail['ARMATURESPACE']
quat = make_fquat(blender_bone.matrix['ARMATURESPACE'].toQuat())
bone_vect = tail-head
#LOUD
#print "Head: ", head
#print "Tail: ", tail
#print "Quat: ", quat
final_parent_id = parent_id
#RG/RE -
#if we are not seperated by a small distance, create a dummy bone for the displacement
#this is only needed for root bones, since UT assumes a connected skeleton, and from here
#down the chain we just use "tail" as an endpoint
#if(head.length > 0.001 and is_root_bone == 1):
if(0):
pb = make_vbone("dummy_" + blender_bone.name, parent_id, 1, FQuat(), head)
psk_file.AddBone(pb)
pbb = make_namedbonebinary("dummy_" + blender_bone.name, parent_id, 1, FQuat(), head, 0)
psa_file.StoreBone(pbb)
final_parent_id = nbone
nbone = nbone + 1
tail = tail-head
my_id = nbone
if nbone == 0:
pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, head)
#pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, head)
psk_file.AddBone(pb)
pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, head, 1)
#pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, head, 1)
psa_file.StoreBone(pbb)
else:
pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, head)
#pb = make_vbone(blender_bone.name, final_parent_id, child_count, quat, head)
psk_file.AddBone(pb)
pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, head, 1)
#pbb = make_namedbonebinary(blender_bone.name, final_parent_id, child_count, quat, head, 1)
psa_file.StoreBone(pbb)
print 'blender bone name: ' + blender_bone.name,"final_parent_id",final_parent_id,"child_count",child_count,"nbone:",nbone,"my_id:",my_id
nbone = nbone + 1
#RG - dump influences for this bone - use the data we collected in the mesh dump phase
# to map our bones to vertex groups
if blender_bone.name in psk_file.VertexGroups:
vertex_list = psk_file.VertexGroups[blender_bone.name]
for vertex_data in vertex_list:
point_index = vertex_data[0]
vertex_weight = vertex_data[1]
influence = VRawBoneInfluence()
influence.Weight = vertex_weight
influence.BoneIndex = my_id
influence.PointIndex = point_index
#print 'Adding Bone Influence for [%s] = Point Index=%i, Weight=%f' % (my_id, point_index, vertex_weight)
psk_file.AddInfluence(influence)
#recursively dump child bones
if blender_bone.hasChildren():
for current_child_bone in blender_bone.children:
parse_bone(current_child_bone, psk_file, psa_file, my_id, 0, None)
def make_armature_bone(blender_object, psk_file, psa_file):
# this makes a dummy bone to offset the armature origin for each armature
global nbone #hacky
my_id = nbone
armature = blender_object.getData()
#screw efficiency! just calc this again
bones = [x for x in armature.bones.values() if not x.hasParent()]
child_count = len(bones)
object_matrix = blender_object.mat
quat = make_fquat(object_matrix.toQuat())
tail = object_matrix.translationPart()
print "root bone:",blender_object.name
#for psk file
root_bone = make_vbone(blender_object.name, 0, child_count, quat, tail)
psk_file.AddBone(root_bone)
#for psa file
root_bone_binary = make_namedbonebinary(blender_object.name, 0, child_count, quat, tail, 0)
psa_file.StoreBone(root_bone_binary)
nbone = nbone + 1
return my_id
def parse_armature(blender_armature, psk_file, psa_file):
global nbone
print "----- parsing armature -----"
#print 'blender_armature length: %i' % (len(blender_armature))
#magic 0 sized root bone for UT - this is where all armature dummy bones will attach
#dont increment nbone here because we initialize it to 1 (hackity hackity hack)
#count top level bones first. screw efficiency again - ohnoz it will take dayz to runz0r!
child_count = 0
for current_obj in blender_armature:
current_armature = current_obj.getData()
bones = [x for x in current_armature.bones.values() if not x.hasParent()]
child_count += len(bones)
#make root bone
'''
pb = make_vbone("", 0, child_count, FQuat(), Blender.Mathutils.Vector(0,0,0))
psk_file.AddBone(pb)
pbb = make_namedbonebinary("", 0, child_count, FQuat(), Blender.Mathutils.Vector(0,0,0), 0)
psa_file.StoreBone(pbb)
'''
for current_obj in blender_armature:
print 'current armature name: ' + current_obj.name
current_armature = current_obj.getData()
#armature_id = make_armature_bone(current_obj, psk_file, psa_file)
#we dont want children here - only the top level bones of the armature itself
#we will recursively dump the child bones as we dump these bones
bones = [x for x in current_armature.bones.values() if not x.hasParent()]
for current_bone in bones:
parse_bone(current_bone, psk_file, psa_file, 0, 0, current_obj.mat)
# get blender objects by type
def get_blender_objects(objects, type):
return [x for x in objects if x.getType() == type]
#strips current extension (if any) from filename and replaces it with extension passed in
def make_filename_ext(filename, extension):
new_filename = ''
extension_index = filename.rfind('.')
if extension_index == -1:
new_filename = filename + extension
else:
new_filename = filename[0:extension_index] + extension
return new_filename
# returns the quaternion Grassman product a*b
# this is the same as the rotation a(b(x))
# (ie. the same as B*A if A and B are matrices representing
# the rotations described by quaternions a and b)
def grassman(a, b):
return Blender.Mathutils.Quaternion(
a.w*b.w - a.x*b.x - a.y*b.y - a.z*b.z,
a.w*b.x + a.x*b.w + a.y*b.z - a.z*b.y,
a.w*b.y - a.x*b.z + a.y*b.w + a.z*b.x,
a.w*b.z + a.x*b.y - a.y*b.x + a.z*b.w)
def parse_animation(blender_scene, psa_file):
print "----- parsing animation -----"
blender_context = blender_scene.getRenderingContext()
anim_rate = blender_context.framesPerSec()
#print 'Scene: %s Start Frame: %i, End Frame: %i' % (blender_scene.getName(), blender_context.startFrame(), blender_context.endFrame())
#print "Frames Per Sec: %i" % anim_rate
export_objects = blender_scene.objects
blender_armatures = get_blender_objects(export_objects, 'Armature')
cur_frame_index = 0
for act in Armature.NLA.GetActions().values():
action_name = act.getName()
action_keyframes = act.getFrameNumbers()
start_frame = min(action_keyframes)
end_frame = max(action_keyframes)
scene_frames = xrange(start_frame, end_frame+1)
#scene_frames = action_keyframes
frame_count = len(scene_frames)
anim = AnimInfoBinary()
anim.Name = action_name
anim.Group = "" #wtf is group?
anim.NumRawFrames = frame_count
anim.AnimRate = anim_rate
anim.FirstRawFrame = cur_frame_index
count_previous_keys = len(psa_file.RawKeys.Data)
#print "------------ Action: %s, frame keys:" % (action_name) , action_keys
print "----- Action: %s" % action_name;
unique_bone_indexes = {}
for obj in blender_armatures:
current_armature = obj.getData()
act.setActive(obj)
# bone lookup table
bones_lookup = {}
for bone in current_armature.bones.values():
bones_lookup[bone.name] = bone
frame_count = len(scene_frames)
#print "Frame Count: %i" % frame_count
pose_data = obj.getPose()
#these must be ordered in the order the bones will show up in the PSA file!
ordered_bones = {}
ordered_bones = sorted([(psa_file.UseBone(x.name), x) for x in pose_data.bones.values()], key=operator.itemgetter(0))
#############################
# ORDERED FRAME, BONE
#for frame in scene_frames:
for i in range(frame_count):
frame = scene_frames[i]
#LOUD
#print "==== outputting frame %i ===" % frame
if frame_count > i+1:
next_frame = scene_frames[i+1]
#print "This Frame: %i, Next Frame: %i" % (frame, next_frame)
else:
next_frame = -1
#print "This Frame: %i, Next Frame: NONE" % frame
Blender.Set('curframe', frame)
cur_frame_index = cur_frame_index + 1
for bone_data in ordered_bones:
bone_index = bone_data[0]
pose_bone = bone_data[1]
blender_bone = bones_lookup[pose_bone.name]
#just need the total unique bones used, later for this AnimInfoBinary
unique_bone_indexes[bone_index] = bone_index
#LOUD
#print "-------------------", pose_bone.name
#head = blender_bone.head['ARMATURESPACE'] #BONESPACE
#tail = blender_bone.tail['ARMATURESPACE'] #BONESPACE
#quat = blender_bone.matrix['ARMATURESPACE'].toQuat() #BONESPACE
head = blender_bone.head['ARMATURESPACE'] #BONESPACE #ARMATURESPACE
tail = blender_bone.tail['ARMATURESPACE'] #BONESPACE
quat = blender_bone.matrix['ARMATURESPACE'].toQuat() #BONESPACE
#print "Head: ", head
#print "Tail: ", tail
#print "Quat: ", quat
#print "orig quat: ", quat
#print "pose quat: ", pose_bone.quat
#head = pose_bone.head
quat = grassman(quat, pose_bone.quat)
#WOW
#tail = (pose_bone.quat * (head)) + head + pose_bone.loc
#tail = (pose_bone.quat*(head))+ (head)+ pose_bone.loc
#tail = (pose_bone.quat) + pose_bone.loc
tail = (pose_bone.quat * (tail)) + tail + pose_bone.loc
head = (pose_bone.quat * (head)) + head + pose_bone.loc
# no parent? apply armature transform
if not blender_bone.hasParent():
print "parent"
parent_mat = obj.mat
head = head * parent_mat
tail = tail * parent_mat
quat = grassman(parent_mat.toQuat(), quat)
#print blender_bone.name
#print "Head: ", head
#print "Tail: ", tail
#print "Quat: ", quat
#print "L0c: ", pose_bone.loc
vkey = VQuatAnimKey()
vkey.Position.X = head.x
vkey.Position.Y = head.y
vkey.Position.Z = head.z
#vkey.Position.X = 0.0
#vkey.Position.Y = 1.0
#vkey.Position.Z = 0.0
vkey.Orientation = make_fquat(quat)
#time frm now till next frame = diff / framesPerSec
if next_frame >= 0:
diff = next_frame - frame
else:
diff = 1.0
#print "Diff = ", diff
vkey.Time = float(diff)/float(blender_context.framesPerSec())
psa_file.AddRawKey(vkey)
#done looping frames
#done looping armatures
#continue adding animInfoBinary counts here
anim.TotalBones = len(unique_bone_indexes)
anim.TrackTime = float(frame_count) / anim.AnimRate
psa_file.AddAnimation(anim)
def fs_callback(filename):
t = sys.time()
import time
import datetime
print "======EXPORTING TO UNREAL SKELETAL MESH FORMATS========\r\n"
psk = PSKFile()
psa = PSAFile()
#sanity check - this should already have the extension, but just in case, we'll give it one if it doesn't
psk_filename = make_filename_ext(filename, '.psk')
#make the psa filename
psa_filename = make_filename_ext(filename, '.psa')
#print 'PSK File: ' + psk_filename
#print 'PSA File: ' + psa_filename
blender_meshes = []
blender_armature = []
current_scene = Blender.Scene.GetCurrent()
current_scene.makeCurrent()
cur_frame = Blender.Get('curframe') #store current frame before we start walking them during animation parse
objects = current_scene.getChildren()
blender_meshes = get_blender_objects(objects, 'Mesh')
blender_armature = get_blender_objects(objects, 'Armature')
try:
#######################
# STEP 1: MESH DUMP
# we build the vertexes, wedges, and faces in here, as well as a vertexgroup lookup table
# for the armature parse
parse_meshes(blender_meshes, psk)
except:
Blender.Set('curframe', cur_frame) #set frame back to original frame
print "Exception during Mesh Parse"
raise
try:
#######################
# STEP 2: ARMATURE DUMP
# IMPORTANT: do this AFTER parsing meshes - we need to use the vertex group data from
# the mesh parse in here to generate bone influences
parse_armature(blender_armature, psk, psa)
except:
Blender.Set('curframe', cur_frame) #set frame back to original frame
print "Exception during Armature Parse"
raise
try:
#######################
# STEP 3: ANIMATION DUMP
# IMPORTANT: do AFTER parsing bones - we need to do bone lookups in here during animation frames
parse_animation(current_scene, psa)
except:
Blender.Set('curframe', cur_frame) #set frame back to original frame
print "Exception during Animation Parse"
raise
# reset current frame
Blender.Set('curframe', cur_frame) #set frame back to original frame
##########################
# FILE WRITE
#RG - dump psk file
psk.PrintOut()
file = open(psk_filename, "wb")
file.write(psk.dump())
file.close()
print 'Successfully Exported File: ' + psk_filename
#RG - dump psa file
if not psa.IsEmpty():
psa.PrintOut()
file = open(psa_filename, "wb")
file.write(psa.dump())
file.close()
print 'Successfully Exported File: ' + psa_filename
else:
print 'No Animations to Export'
print 'My Export MOD05 PSK/PSA Script finished in %.2f seconds' % (sys.time()-t)
t = datetime.datetime.now()
#print "Epoch Seconds:", time.mktime(t.timetuple())
EpochSeconds = time.mktime(t.timetuple())
print datetime.datetime.fromtimestamp(EpochSeconds)
#now = datetime.datetime.fromtimestamp(datetime.datetime.now())
#print now.ctime()
if __name__ == '__main__':
Window.FileSelector(fs_callback, 'Export PSK/PSA File', sys.makename(ext='.psk'))
#fs_callback('c:\\ChainBenderSideTurret.psk') | 32.537364 | 252 | 0.658366 |
69b311ca9a1b3953aef25c2499ce05cca83047bc | 18,835 | py | Python | nova/openstack/common/gettextutils.py | bopopescu/nova-28 | add7f5625ba49c0575328294a796428c443cd988 | [
"Apache-2.0"
] | null | null | null | nova/openstack/common/gettextutils.py | bopopescu/nova-28 | add7f5625ba49c0575328294a796428c443cd988 | [
"Apache-2.0"
] | 5 | 2020-06-05T17:58:28.000Z | 2022-02-11T03:39:35.000Z | nova/openstack/common/gettextutils.py | bopopescu/nova-28 | add7f5625ba49c0575328294a796428c443cd988 | [
"Apache-2.0"
] | 1 | 2020-07-24T06:47:54.000Z | 2020-07-24T06:47:54.000Z | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from nova.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_AVAILABLE_LANGUAGES = {}
# FIXME(dhellmann): Remove this when moving to oslo.i18n.
USE_LAZY = False
class TranslatorFactory(object):
"""Create translator functions
"""
def __init__(self, domain, lazy=False, localedir=None):
"""Establish a set of translation functions for the domain.
:param domain: Name of translation domain,
specifying a message catalog.
:type domain: str
:param lazy: Delays translation until a message is emitted.
Defaults to False.
:type lazy: Boolean
:param localedir: Directory with translation catalogs.
:type localedir: str
"""
self.domain = domain
self.lazy = lazy
if localedir is None:
localedir = os.environ.get(domain.upper() + '_LOCALEDIR')
self.localedir = localedir
def _make_translation_func(self, domain=None):
"""Return a new translation function ready for use.
Takes into account whether or not lazy translation is being
done.
The domain can be specified to override the default from the
factory, but the localedir from the factory is always used
because we assume the log-level translation catalogs are
installed in the same directory as the main application
catalog.
"""
if domain is None:
domain = self.domain
if self.lazy:
return functools.partial(Message, domain=domain)
t = gettext.translation(
domain,
localedir=self.localedir,
fallback=True,
)
if six.PY3:
return t.gettext
return t.ugettext
@property
def primary(self):
"The default translation function."
return self._make_translation_func()
def _make_log_translation_func(self, level):
return self._make_translation_func(self.domain + '-log-' + level)
@property
def log_info(self):
"Translate info-level log messages."
return self._make_log_translation_func('info')
@property
def log_warning(self):
"Translate warning-level log messages."
return self._make_log_translation_func('warning')
@property
def log_error(self):
"Translate error-level log messages."
return self._make_log_translation_func('error')
@property
def log_critical(self):
"Translate critical-level log messages."
return self._make_log_translation_func('critical')
# NOTE(dhellmann): When this module moves out of the incubator into
# oslo.i18n, these global variables can be moved to an integration
# module within each application.
# Create the global translation functions.
_translators = TranslatorFactory('nova')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
# NOTE(dhellmann): End of globals that will move to the application's
# integration module.
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
# FIXME(dhellmann): This function will be removed in oslo.i18n,
# because the TranslatorFactory makes it superfluous.
global _, _LI, _LW, _LE, _LC, USE_LAZY
tf = TranslatorFactory('nova', lazy=True)
_ = tf.primary
_LI = tf.log_info
_LW = tf.log_warning
_LE = tf.log_error
_LC = tf.log_critical
USE_LAZY = True
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
from six import moves
tf = TranslatorFactory(domain, lazy=True)
moves.builtins.__dict__['_'] = tf.primary
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='nova', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
if six.PY2:
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
| 37.745491 | 79 | 0.650279 |
add096f5a04be3b79076a7a9e072443c1a05ada2 | 1,932 | py | Python | sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/sql/azure-mgmt-sqlvirtualmachine/azure/mgmt/sqlvirtualmachine/_configuration.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrestazure import AzureConfiguration
from .version import VERSION
class SqlVirtualMachineManagementClientConfiguration(AzureConfiguration):
"""Configuration for SqlVirtualMachineManagementClient
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Subscription ID that identifies an Azure
subscription.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(SqlVirtualMachineManagementClientConfiguration, self).__init__(base_url)
# Starting Autorest.Python 4.0.64, make connection pool activated by default
self.keep_alive = True
self.add_user_agent('azure-mgmt-sqlvirtualmachine/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
| 38.64 | 86 | 0.671843 |
6535c2ad7e83778c230674c0525da591801933c2 | 2,608 | py | Python | selfdrive/tombstoned.py | iambluefred/Test_3 | 31e888342ef86591bdd311be17e00f4f5036122b | [
"MIT"
] | null | null | null | selfdrive/tombstoned.py | iambluefred/Test_3 | 31e888342ef86591bdd311be17e00f4f5036122b | [
"MIT"
] | null | null | null | selfdrive/tombstoned.py | iambluefred/Test_3 | 31e888342ef86591bdd311be17e00f4f5036122b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import time
from raven import Client
from raven.transport.http import HTTPTransport
from selfdrive.version import version, dirty
from selfdrive.swaglog import cloudlog
MAX_SIZE = 100000 * 10 # Normal size is 40-100k, allow up to 1M
def get_tombstones():
"""Returns list of (filename, ctime) for all tombstones in /data/tombstones
and apport crashlogs in /var/crash"""
files = []
for folder in ["/data/tombstones/", "/var/crash/"]:
if os.path.exists(folder):
with os.scandir(folder) as d:
# Loop over first 1000 directory entries
for _, f in zip(range(1000), d):
if f.name.startswith("tombstone") or f.name.endswith(".crash"):
files.append((f.path, int(f.stat().st_ctime)))
return files
def report_tombstone(fn, client):
f_size = os.path.getsize(fn)
if f_size > MAX_SIZE:
cloudlog.error(f"Tombstone {fn} too big, {f_size}. Skipping...")
return
with open(fn, encoding='ISO-8859-1') as f:
contents = f.read()
# Get summary for sentry title
if fn.endswith(".crash"):
lines = contents.split('\n')
message = lines[6]
status_idx = contents.find('ProcStatus')
if status_idx >= 0:
lines = contents[status_idx:].split('\n')
message += " " + lines[1]
else:
message = " ".join(contents.split('\n')[5:7])
# Cut off pid/tid, since that varies per run
name_idx = message.find('name')
if name_idx >= 0:
message = message[name_idx:]
# Cut off fault addr
fault_idx = message.find(', fault addr')
if fault_idx >= 0:
message = message[:fault_idx]
cloudlog.error({'tombstone': message})
client.captureMessage(
message=message,
sdk={'name': 'tombstoned', 'version': '0'},
extra={
'tombstone_fn': fn,
'tombstone': contents
},
)
def main():
initial_tombstones = set(get_tombstones())
client = Client('https://137e8e621f114f858f4c392c52e18c6d:8aba82f49af040c8aac45e95a8484970@sentry.io/1404547',
install_sys_hook=False, transport=HTTPTransport, release=version, tags={'dirty': dirty}, string_max_length=10000)
client.user_context({'id': os.environ.get('DONGLE_ID')})
while True:
now_tombstones = set(get_tombstones())
for fn, ctime in (now_tombstones - initial_tombstones):
try:
cloudlog.info(f"reporting new tombstone {fn}")
report_tombstone(fn, client)
except Exception:
cloudlog.exception(f"Error reporting tombstone {fn}")
initial_tombstones = now_tombstones
time.sleep(5)
if __name__ == "__main__":
main()
| 27.166667 | 131 | 0.659126 |
f3162f8945a86c4d6e119975fbec0048161155f4 | 5,567 | py | Python | data_loading.py | zikegcwk/aicamp_food | 038d3a18830e6f5b9fdaf9d91a5df7fd8c7a5140 | [
"MIT"
] | null | null | null | data_loading.py | zikegcwk/aicamp_food | 038d3a18830e6f5b9fdaf9d91a5df7fd8c7a5140 | [
"MIT"
] | 8 | 2020-03-04T08:25:25.000Z | 2020-03-04T08:25:37.000Z | data_loading.py | zikegcwk/aicamp_food | 038d3a18830e6f5b9fdaf9d91a5df7fd8c7a5140 | [
"MIT"
] | 3 | 2020-03-28T12:54:58.000Z | 2021-02-16T21:42:31.000Z | import os, csv, random, pickle
'''
because this is a demo app, we just load everything into memory.
The data needs to:
1. Display candies information
2. Display ingredients information
candies = {
'Candy Corn': {
'score': -0.5,
'ingredients': [...],
'good_ingredients': [1, 2, 3],
'bad_ingredients': [a, b c]
}
}
ingredients = {
'sugar': {
'score': -1,
'articles': [...]
'predictions': [...]
'good_articles': [a, b, c],
'bad_articles': [d, e, f]
}
}
'''
def get_ingredients(file_path):
'''file has these columns:
- ingredient
- search_phrase
- site_name
- url
- title
- description
- prediction
'''
ingredients = {}
with open(file_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
ingd = row['ingredient']
if not ingredients.get(ingd):
ingredients[ingd] = {
'ingredient_name': ingd,
'score': None,
'full_articles': [],
'articles': [],
'predictions': [],
'good_articles': [],
'bad_articles': []
}
title = row.get('title', '')
description = row.get('description', '')
ingredients[ingd]['full_articles'].append(
{
'site_name': row.get('site_name'),
'url': row.get('url'),
'title': title,
'description': description,
'prediction': row.get('prediction')
}
)
article = title + ': ' + description
ingredients[ingd]['articles'].append(article)
if row['prediction'] == '1':
ingredients[ingd]['good_articles'].append(article)
if row['prediction'] == '-1':
ingredients[ingd]['bad_articles'].append(article)
if row['prediction'] != '2':
ingredients[ingd]['predictions'].append(
int(row.get('prediction'))
)
for k, ingd in ingredients.items():
ingd['score'] = round(10 * sum(ingd['predictions']) / len(ingd['predictions']), 2)
ingredients[k] = ingd
return ingredients
def get_ingredient_score():
here = os.getcwd()
score_path = os.path.join(here, "ingredient_score.csv")
scores = {}
ingredients = []
with open(score_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
ind = {}
ind['ingredient'] = row['ingredient']
ind['score'] = row['score']
ingredients.append(ind)
scores[row['ingredient']] = row['score']
return ingredients, scores
def get_candies(pkl_path, ingredients):
'''canides is a list of candies. It's fine as we just will list candies.
but for ingredients we have to use a dict because we need to find them
for each candies.
{
'candy_ingredients': [...],
'candy_name': 'Haribo Starmix',
'ingredient_scores': [...],
'score: -7.0
}
'''
[scores, candies] = pickle.load(open(pkl_path, 'rb'))
candy_urls = get_candy_urls()
updated_candies = []
for candy in candies:
ingredient_scores = [float(s) for s in candy['ingredient_scores'] if s != 'could not find results in our database']
candy['score'] = round(sum(ingredient_scores) / len(ingredient_scores), 2)
candy['ingredients'] = []
candy['bad_ingredients'] = []
candy['ok_ingredients'] = []
candy['good_ingredients'] = []
for ind in candy['candy_ingredients']:
full_ingredient = ingredients.get(ind)
if not full_ingredient:
continue
candy['ingredients'].append(full_ingredient)
if full_ingredient['score'] >= 5:
candy['good_ingredients'].append(full_ingredient)
elif full_ingredient['score'] >=0 and full_ingredient['score'] < 5:
candy['ok_ingredients'].append(full_ingredient)
else:
candy['bad_ingredients'].append(full_ingredient)
candy['image_url'] = candy_urls[candy['candy_name']]
updated_candies.append(candy)
return sorted(updated_candies, key=lambda c: c['score'])
def get_good_candies(candies, n):
good_candies = [candy for candy in candies if candy['score'] >= 0]
return random.choices(good_candies, k=n)
def get_bad_candies(candies, n):
bad_candies = [candy for candy in candies if candy['score'] < 0]
return random.choices(bad_candies, k=n)
def get_candy_urls():
here = os.getcwd()
file_name = 'candy_urls.csv'
file_path = os.path.join(here, file_name)
candy_urls = {}
with open(file_path, 'r') as f:
reader = csv.DictReader(f)
for row in reader:
candy_name = row['candy_name']
image_url = row['image_url']
candy_urls[candy_name] = image_url
return candy_urls
if __name__ == "__main__":
file_name = 'ingredients_prediction.csv'
here = os.getcwd()
file_path = os.path.join(here, file_name)
ingredients = get_ingredients(file_path)
pkl_name = 'ingredients.pkl'
pkl_path = os.path.join(here, pkl_name)
candies = get_candies(pkl_path) | 31.630682 | 123 | 0.540866 |
c18baa35d9e93dec6cae0eae6975ed9e74b40dcf | 2,251 | py | Python | src/cnn/transforms/transforms.py | portelaraian/tuberculosis-runmila-minoHealth | 03f7f146e913053399ec31bc0aeda018df9b2eff | [
"MIT"
] | null | null | null | src/cnn/transforms/transforms.py | portelaraian/tuberculosis-runmila-minoHealth | 03f7f146e913053399ec31bc0aeda018df9b2eff | [
"MIT"
] | null | null | null | src/cnn/transforms/transforms.py | portelaraian/tuberculosis-runmila-minoHealth | 03f7f146e913053399ec31bc0aeda018df9b2eff | [
"MIT"
] | null | null | null | import random
import math
import cv2
from albumentations.augmentations import functional as F
from albumentations.core.transforms_interface import ImageOnlyTransform
'''def resized_crop(image, height, width, x_min, y_min, x_max, y_max):
image = F.crop(image, x_min, y_min, x_max, y_max)
image = cv2.resize(image, (width, height))
return image'''
class RandomResizedCrop(ImageOnlyTransform):
def __init__(self, height, width, scale=(0.08, 1.0), ratio=(3/4, 4/3), always_apply=False, p=1.0):
super().__init__(always_apply, p)
self.height = height
self.width = width
self.scale = scale
self.ratio = ratio
def apply(self, image, **params):
height, width = image.shape[:2]
area = height * width
for attempt in range(15):
target_area = random.uniform(*self.scale) * area
aspect_ratio = random.uniform(*self.ratio)
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if random.random() < 0.5 and min(self.ratio) <= (h / w) <= max(self.ratio):
w, h = h, w
if w <= width and h <= height:
x_min = random.randint(0, width - w)
y_min = random.randint(0, height - h)
return resized_crop(image, self.height, self.width, x_min, y_min, x_min+w, y_min+h)
min_side = min(height, width)
x_min = random.randint(0, width - min_side)
y_min = random.randint(0, height - min_side)
return resized_crop(image, self.height, self.width, x_min, y_min, x_min+min_side, y_min+min_side)
class RandomDicomNoise(ImageOnlyTransform):
def __init__(self, limit=None, limit_ratio=None, always_apply=False, p=0.5):
assert limit or limit_ratio
super().__init__(always_apply, p)
self.limit = limit
self.limit_ratio = limit_ratio
def apply(self, image, **params):
if self.limit:
value = random.uniform(-self.limit, self.limit)
image += value
else:
ratio = random.uniform(1.0-self.limit_ratio, 1.0+self.limit_ratio)
image *= ratio
return image
| 33.597015 | 105 | 0.615282 |
c4b2ca49abc55a05647560a86bb01f7e5e52d3b3 | 2,088 | py | Python | order_fulfillment/iterate_multi_item_pickfailprob_highlow.py | srinathsridhar/simulation-data | fac912c7cfa365d9b7264d4cd58c3a8a866729df | [
"MIT"
] | null | null | null | order_fulfillment/iterate_multi_item_pickfailprob_highlow.py | srinathsridhar/simulation-data | fac912c7cfa365d9b7264d4cd58c3a8a866729df | [
"MIT"
] | null | null | null | order_fulfillment/iterate_multi_item_pickfailprob_highlow.py | srinathsridhar/simulation-data | fac912c7cfa365d9b7264d4cd58c3a8a866729df | [
"MIT"
] | null | null | null | import json
import order_fulfillment_multi_item as ofmi
import random
def read_data(dataFile, ship_cost):
file = open(dataFile, 'r')
inst = json.load(file)
file.close()
if ship_cost == 'H':
for _store in inst[0]:
for _item in range(len(_store['p'])):
if _store['p'][_item] < 0.9:
_store['p'][_item] = random.uniform(0.5, 0.8)
else:
for _store in inst[0]:
for _item in range(len(_store['p'])):
if _store['p'][_item] > 0.1:
_store['p'][_item] = random.uniform(0.05, 0.1)
return inst
algorithms = ['LB', 'LSC', '6-SPS', '4-SPS', '2-SPS', '1-SPS', 'Greedy']
retailers = ['A', 'B', 'C', 'D', 'E']
for pickFailProb in ['H', 'L']:
if pickFailProb == 'H':
fileout = 'summary_stats/costs_multi_item_high_pickfailprob.txt'
else:
fileout = 'summary_stats/costs_multi_item_low_pickfailprob.txt'
for retailer in retailers:
for instance in range(1, 101):
for alg in ['LSC', '2-SPS', 'Greedy']:
filename = 'data/mi/retailer_{0}/{0}.{1}.json'.format(retailer, instance)
print('file', filename)
data = read_data(filename, pickFailProb)
cost, assignment = ofmi.solve_multi_item(alg, data)
nStores = len(data[0])
nItems = len(data[0][0]['p'])
storeDistribution = [0 for _ in range(nItems)]
for item, store in data[1]:
storeDistribution[item] += 1
numItemsPerStore = [-1 for _ in range(nStores)]
for [item, store] in data[1]:
numItemsPerStore[store] += 1
overlap_factor = round(sum(numItemsPerStore) / (nStores * (nItems - 1)), 3)
f = open(fileout, "a+")
row = '{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\n'.format(
retailer, instance, nItems, storeDistribution,overlap_factor,alg, cost, assignment)
f.write(row)
f.close()
| 34.8 | 103 | 0.523467 |
ec6602dfc5c53064e87a79ca29afa958f42e5785 | 2,573 | py | Python | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Policy_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Policy_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Policy_Case0037.py | opengauss-mirror/Yat | aef107a8304b94e5d99b4f1f36eb46755eb8919e | [
"MulanPSL-1.0"
] | null | null | null | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : policy
Case Name : 密码为a-z小写字母的最少要求个数password_min_lowercase=999
Description :
1.在postgres.conf中设置password_min_lowercase=999,重启数据库生效
2.初始用户执行:create user wf with password '$PASSWORD';
Expect :
1.设置成功,数据库重启成功
2.提示密码至少包含999个小写字母
History :
"""
import unittest
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Policy(unittest.TestCase):
def setUp(self):
logger.info('---Opengauss_Function_Security_Policy_Case0037 start---')
self.common = Common()
self.sh_primy = CommonSH('PrimaryDbUser')
self.new_password1 = macro.COMMON_PASSWD.upper() + "qaz"
self.Constant = Constant()
self.configure = 'password_min_lowercase=999'
msg0 = self.common.config_set_modify(self.configure)
logger.info(msg0)
status_msg = self.sh_primy.get_db_cluster_status()
logger.info(status_msg)
self.assertTrue("Degraded" in status_msg or "Normal" in status_msg)
def test_policy(self):
logger.info('-----------create user -----------------')
sql_cmd1 = f'create user wf with password \'{self.new_password1}\';'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
logger.info(msg1)
self.assertIn(self.Constant.PASSWORD_CONTAIN_AT_LEAST_MSG, msg1)
def tearDown(self):
logger.info('-----------恢复配置,并清理环境-----------')
self.configure = 'password_min_lowercase=0'
msg0 = self.common.config_set_modify(self.configure)
logger.info(msg0)
status_msg = self.sh_primy.get_db_cluster_status()
logger.info(status_msg)
self.assertTrue("Degraded" in status_msg or "Normal" in status_msg)
sql_cmd1 = 'drop user if exists wf cascade;'
msg1 = self.sh_primy.execut_db_sql(sql_cmd1)
logger.info(msg1)
logger.info('Opengauss_Function_Security_Policy_Case0037 finish')
| 37.289855 | 84 | 0.699184 |
d2cdabdab25c201d99d6c1f01cc011887c7b9c54 | 8,468 | py | Python | bin/translate_server.py | zhengzx-nlp/NJUNMT-tf-server | d931021a39ebcd9b036fa612d69cf3876378e198 | [
"Apache-2.0"
] | null | null | null | bin/translate_server.py | zhengzx-nlp/NJUNMT-tf-server | d931021a39ebcd9b036fa612d69cf3876378e198 | [
"Apache-2.0"
] | null | null | null | bin/translate_server.py | zhengzx-nlp/NJUNMT-tf-server | d931021a39ebcd9b036fa612d69cf3876378e198 | [
"Apache-2.0"
] | 2 | 2018-11-02T07:52:04.000Z | 2019-04-25T09:06:06.000Z | # -*- coding: UTF-8 -*-
# Copyright 2018, Natural Language Processing Group, Nanjing University,
#
# Author: Zheng Zaixiang
# Contact: zhengzx@nlp.nju.edu.cn
# or zhengzx.142857@gmail.com
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from njunmt.ensemble_experiment import *
import json
import sys
import errno
import socket
if sys.version_info[0] < 3:
import SocketServer as socketserver
else:
import socketserver
def wrap_message(**args):
# return bytes(json.dumps(args), encoding="UTF-8")
return json.dumps(args).encode()
def unwrap_message(json_str):
try:
return json.loads(json_str.decode())
except:
return {"command": "control", "content": "error"}
class SimpleEnsembleExperiment(EnsembleExperiment):
def __init__(self,
model_configs,
model_dirs,
weight_scheme="average"):
""" Initializes the ensemble experiment.
Args:
model_configs: A dictionary of all configurations.
model_dirs: A list of model directories (checkpoints).
weight_scheme: A string, the ensemble weights. See
`EnsembleModel.get_ensemble_weights()` for more details.
"""
super(EnsembleExperiment, self).__init__()
self._model_dirs = model_dirs
self._weight_scheme = weight_scheme
infer_options = parse_params(
params=model_configs["infer"],
default_params=self.default_inference_options())
self._model_configs = model_configs
self._model_configs["infer"] = infer_options
print_params("Model parameters: ", self._model_configs)
self.experiment_spec = {
'model_configs': model_configs
}
self.init_experiment()
print("Start listening...")
def init_vocab(self):
vocab_source = Vocab(
filename=self._model_configs["infer"]["source_words_vocabulary"],
bpe_codes=self._model_configs["infer"]["source_bpecodes"])
vocab_target = Vocab(
filename=self._model_configs["infer"]["target_words_vocabulary"],
bpe_codes=self._model_configs["infer"]["target_bpecodes"])
return vocab_source, vocab_target
def init_model(self, sess, vocab_source, vocab_target):
print("Building model...")
estimator_spec = model_fn_ensemble(
self._model_dirs, vocab_source, vocab_target,
weight_scheme=self._weight_scheme,
inference_options=self._model_configs["infer"])
predict_op = estimator_spec.predictions
sess.run(tf.global_variables_initializer())
print("Done.")
return sess, predict_op, estimator_spec
def init_experiment(self):
""" Runs ensemble model. """
print("Initialize experiment...")
sess = self._build_default_session()
vocab_source, vocab_target = self.init_vocab()
sess, predict_op, estimator_spec = self.init_model(sess, vocab_source, vocab_target)
self.experiment_spec.update(**{
"session": sess,
"predict_op": predict_op,
"vocab_source": vocab_source,
"vocab_target": vocab_target,
"estimator_spec": estimator_spec,
"model_info": {"model_dir": ", ".join(self._model_dirs)}
})
print("Done.")
def reload_model(self, model_dirs):
print("Reloading model...")
self._model_dirs = model_dirs.split(",")
self.experiment_spec['session'].close()
tf.reset_default_graph()
self.init_experiment()
print("Done.")
class TranslateServer(socketserver.TCPServer):
def init_experiment(self, **args):
experiment = SimpleEnsembleExperiment(**args)
self._experiment = experiment
self.experiment_spec = experiment.experiment_spec
def reload_model(self, model_dirs):
self._experiment.reload_model(model_dirs)
class TranslateRequestHandler(socketserver.BaseRequestHandler, object):
"""
The request handler class for our server.
It is instantiated once per connection to the server, and must
override the handle() method to implement communication to the
client.
"""
def __init__(self, request, client_address, server):
self.experiment_spec = server.experiment_spec
super(TranslateRequestHandler, self).__init__(request, client_address, server)
def preprocess_raw(self, raw_data):
"""
:param raw_data: json string
{
"command": str (control, translate),
"data": str
}
:return: processed dict
{
"command": str (control, translate),
"data": tf feeding_data (if not translate then None)
}
"""
msg = unwrap_message(raw_data)
if msg["command"] == "translate":
lines = msg["content"].strip().split("\n")
text_inputter = TextLineInputter(
line_readers=[LineReader(
data=lines,
preprocessing_fn=lambda x: self.experiment_spec["vocab_source"].convert_to_idlist(x))],
padding_id=self.experiment_spec["vocab_source"].pad_id,
batch_size=self.experiment_spec["model_configs"]["infer"]["batch_size"])
feeding_data = text_inputter.make_feeding_data(self.experiment_spec["estimator_spec"].input_fields)
return {"command": "translate", "content": feeding_data}
return msg
def handle(self):
# self.request is the TCP socket connected to the client
print("User from ({}:{}) connected.:".format(*self.client_address))
while True:
try:
raw_data = self.request.recv(1024).strip() # json string
print(raw_data)
# preprocess raw_data to request (dict)
request = self.preprocess_raw(raw_data)
print(request)
if request["command"] == "translate":
trans_outputs = []
sources = []
for feeding_data in request["content"]:
source, trans_output, trans_score = infer(
sess=self.experiment_spec["session"],
prediction_op=self.experiment_spec["predict_op"],
infer_data=feeding_data,
output=None,
vocab_source=self.experiment_spec["vocab_source"],
vocab_target=self.experiment_spec["vocab_target"],
delimiter=self.experiment_spec["model_configs"]["infer"]["delimiter"],
output_attention=False,
tokenize_output=self.experiment_spec["model_configs"]["infer"]["char_level"],
verbose=True)
sources.extend(source)
trans_outputs.extend(trans_output)
sources = "\n".join(sources)
trans_outputs = "\n".join(trans_outputs)
response = wrap_message(status="success", info="", source=sources, translation=trans_outputs,
model_info=self.experiment_spec["model_info"])
elif request["command"] == "control":
if request["content"] == "close":
break
elif request["command"] == "reload":
new_model_dirs = request["content"]
self.server.reload_model(new_model_dirs)
response = wrap_message(status="success",
info="Reloaded model from {}".format(new_model_dirs),
model_info=self.experiment_spec["model_info"])
self.request.sendall(response)
except Exception as e:
response = wrap_message(status="error")
try:
self.request.sendall(response)
except socket.error as e:
print("Close connection from {}:{}.".format(*self.client_address))
break
| 36.188034 | 113 | 0.583727 |
a3c7897172d61d22fb47eeae4c41991514b26cd9 | 5,785 | py | Python | data/p3BR/R2/benchmark/startQiskit_noisy137.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy137.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p3BR/R2/benchmark/startQiskit_noisy137.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=3
# total number=24
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.x(input_qubit[2]) # number=12
prog.cx(input_qubit[0],input_qubit[2]) # number=13
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.h(input_qubit[1]) # number=18
prog.cz(input_qubit[2],input_qubit[1]) # number=19
prog.h(input_qubit[1]) # number=20
prog.y(input_qubit[1]) # number=14
prog.h(input_qubit[1]) # number=21
prog.cz(input_qubit[2],input_qubit[1]) # number=22
prog.h(input_qubit[1]) # number=23
prog.z(input_qubit[2]) # number=3
prog.x(input_qubit[1]) # number=17
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_noisy137.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| 29.666667 | 140 | 0.630078 |
b7962ff71e44b0f302dda3ec54203ec817d7707d | 25,185 | py | Python | python/ray/tests/test_advanced.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 1 | 2021-02-14T01:53:37.000Z | 2021-02-14T01:53:37.000Z | python/ray/tests/test_advanced.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 77 | 2021-06-05T07:04:56.000Z | 2022-03-26T07:04:33.000Z | python/ray/tests/test_advanced.py | linyiyue/ray | 90d2456ec70270a1f894ec3ef6f3004533859e03 | [
"Apache-2.0"
] | 1 | 2019-07-25T23:01:38.000Z | 2019-07-25T23:01:38.000Z | # coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import json
import logging
import random
import sys
import threading
import time
import os
import numpy as np
import pytest
import ray.cluster_utils
import ray._private.profiling as profiling
from ray._private.test_utils import (client_test_enabled,
RayTestTimeoutException, SignalActor)
from ray.exceptions import ReferenceCountingAssertionError
if client_test_enabled():
from ray.util.client import ray
else:
import ray
logger = logging.getLogger(__name__)
# issue https://github.com/ray-project/ray/issues/7105
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_internal_free(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
class Sampler:
def sample(self):
return [1, 2, 3, 4, 5]
def sample_big(self):
return np.zeros(1024 * 1024)
sampler = Sampler.remote()
# Free deletes from in-memory store.
obj_ref = sampler.sample.remote()
ray.get(obj_ref)
ray.internal.free(obj_ref)
with pytest.raises(ReferenceCountingAssertionError):
ray.get(obj_ref)
# Free deletes big objects from plasma store.
big_id = sampler.sample_big.remote()
ray.get(big_id)
ray.internal.free(big_id)
time.sleep(1) # wait for delete RPC to propagate
with pytest.raises(ReferenceCountingAssertionError):
ray.get(big_id)
def test_multiple_waits_and_gets(shutdown_only):
# It is important to use three workers here, so that the three tasks
# launched in this experiment can run at the same time.
ray.init(num_cpus=3)
@ray.remote
def f(delay):
time.sleep(delay)
return 1
@ray.remote
def g(input_list):
# The argument input_list should be a list containing one object ref.
ray.wait([input_list[0]])
@ray.remote
def h(input_list):
# The argument input_list should be a list containing one object ref.
ray.get(input_list[0])
# Make sure that multiple wait requests involving the same object ref
# all return.
x = f.remote(1)
ray.get([g.remote([x]), g.remote([x])])
# Make sure that multiple get requests involving the same object ref all
# return.
x = f.remote(1)
ray.get([h.remote([x]), h.remote([x])])
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_caching_functions_to_run(shutdown_only):
# Test that we export functions to run on all workers before the driver
# is connected.
def f(worker_info):
sys.path.append(1)
ray.worker.global_worker.run_function_on_all_workers(f)
def f(worker_info):
sys.path.append(2)
ray.worker.global_worker.run_function_on_all_workers(f)
def g(worker_info):
sys.path.append(3)
ray.worker.global_worker.run_function_on_all_workers(g)
def f(worker_info):
sys.path.append(4)
ray.worker.global_worker.run_function_on_all_workers(f)
ray.init(num_cpus=1)
@ray.remote
def get_state():
time.sleep(1)
return sys.path[-4], sys.path[-3], sys.path[-2], sys.path[-1]
res1 = get_state.remote()
res2 = get_state.remote()
assert ray.get(res1) == (1, 2, 3, 4)
assert ray.get(res2) == (1, 2, 3, 4)
# Clean up the path on the workers.
def f(worker_info):
sys.path.pop()
sys.path.pop()
sys.path.pop()
sys.path.pop()
ray.worker.global_worker.run_function_on_all_workers(f)
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_running_function_on_all_workers(ray_start_regular):
def f(worker_info):
sys.path.append("fake_directory")
ray.worker.global_worker.run_function_on_all_workers(f)
@ray.remote
def get_path1():
return sys.path
assert "fake_directory" == ray.get(get_path1.remote())[-1]
# the function should only run on the current driver once.
assert sys.path[-1] == "fake_directory"
if len(sys.path) > 1:
assert sys.path[-2] != "fake_directory"
def f(worker_info):
sys.path.pop(-1)
ray.worker.global_worker.run_function_on_all_workers(f)
# Create a second remote function to guarantee that when we call
# get_path2.remote(), the second function to run will have been run on
# the worker.
@ray.remote
def get_path2():
return sys.path
assert "fake_directory" not in ray.get(get_path2.remote())
@pytest.mark.skipif(
"RAY_PROFILING" not in os.environ,
reason="Only tested in client/profiling build.")
def test_profiling_api(ray_start_2_cpus):
@ray.remote
def f():
with profiling.profile(
"custom_event", extra_data={"name": "custom name"}):
pass
ray.put(1)
object_ref = f.remote()
ray.wait([object_ref])
ray.get(object_ref)
# Wait until all of the profiling information appears in the profile
# table.
timeout_seconds = 20
start_time = time.time()
while True:
profile_data = ray.timeline()
event_types = {event["cat"] for event in profile_data}
expected_types = [
"task",
"task:deserialize_arguments",
"task:execute",
"task:store_outputs",
"wait_for_function",
"ray.get",
"ray.put",
"ray.wait",
"submit_task",
"fetch_and_run_function",
# TODO (Alex) :https://github.com/ray-project/ray/pull/9346
# "register_remote_function",
"custom_event", # This is the custom one from ray.profile.
]
if all(expected_type in event_types
for expected_type in expected_types):
break
if time.time() - start_time > timeout_seconds:
raise RayTestTimeoutException(
"Timed out while waiting for information in "
"profile table. Missing events: {}.".format(
set(expected_types) - set(event_types)))
# The profiling information only flushes once every second.
time.sleep(1.1)
def test_wait_cluster(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
cluster.add_node(num_cpus=1, resources={"RemoteResource": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"RemoteResource": 1})
def f():
return
# Make sure we have enough workers on the remote nodes to execute some
# tasks.
tasks = [f.remote() for _ in range(10)]
start = time.time()
ray.get(tasks)
end = time.time()
# Submit some more tasks that can only be executed on the remote nodes.
tasks = [f.remote() for _ in range(10)]
# Sleep for a bit to let the tasks finish.
time.sleep((end - start) * 2)
_, unready = ray.wait(tasks, num_returns=len(tasks), timeout=0)
# All remote tasks should have finished.
assert len(unready) == 0
@pytest.mark.skip(reason="TODO(ekl)")
def test_object_transfer_dump(ray_start_cluster):
cluster = ray_start_cluster
num_nodes = 3
for i in range(num_nodes):
cluster.add_node(resources={str(i): 1}, object_store_memory=10**9)
ray.init(address=cluster.address)
@ray.remote
def f(x):
return
# These objects will live on different nodes.
object_refs = [
f._remote(args=[1], resources={str(i): 1}) for i in range(num_nodes)
]
# Broadcast each object from each machine to each other machine.
for object_ref in object_refs:
ray.get([
f._remote(args=[object_ref], resources={str(i): 1})
for i in range(num_nodes)
])
# The profiling information only flushes once every second.
time.sleep(1.1)
transfer_dump = ray.state.object_transfer_timeline()
# Make sure the transfer dump can be serialized with JSON.
json.loads(json.dumps(transfer_dump))
assert len(transfer_dump) >= num_nodes**2
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_receive"
}) == num_nodes
assert len({
event["pid"]
for event in transfer_dump if event["name"] == "transfer_send"
}) == num_nodes
def test_identical_function_names(ray_start_regular):
# Define a bunch of remote functions and make sure that we don't
# accidentally call an older version.
num_calls = 200
@ray.remote
def f():
return 1
results1 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 2
results2 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 3
results3 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 4
results4 = [f.remote() for _ in range(num_calls)]
@ray.remote
def f():
return 5
results5 = [f.remote() for _ in range(num_calls)]
assert ray.get(results1) == num_calls * [1]
assert ray.get(results2) == num_calls * [2]
assert ray.get(results3) == num_calls * [3]
assert ray.get(results4) == num_calls * [4]
assert ray.get(results5) == num_calls * [5]
@ray.remote
def g():
return 1
@ray.remote # noqa: F811
def g(): # noqa: F811
return 2
@ray.remote # noqa: F811
def g(): # noqa: F811
return 3
@ray.remote # noqa: F811
def g(): # noqa: F811
return 4
@ray.remote # noqa: F811
def g(): # noqa: F811
return 5
result_values = ray.get([g.remote() for _ in range(num_calls)])
assert result_values == num_calls * [5]
def test_illegal_api_calls(ray_start_regular):
# Verify that we cannot call put on an ObjectRef.
x = ray.put(1)
with pytest.raises(Exception):
ray.put(x)
# Verify that we cannot call get on a regular value.
with pytest.raises(Exception):
ray.get(3)
@pytest.mark.skipif(
client_test_enabled(), reason="grpc interaction with releasing resources")
def test_multithreading(ray_start_2_cpus):
# This test requires at least 2 CPUs to finish since the worker does not
# release resources when joining the threads.
def run_test_in_multi_threads(test_case, num_threads=10, num_repeats=25):
"""A helper function that runs test cases in multiple threads."""
def wrapper():
for _ in range(num_repeats):
test_case()
time.sleep(random.randint(0, 10) / 1000.0)
return "ok"
executor = ThreadPoolExecutor(max_workers=num_threads)
futures = [executor.submit(wrapper) for _ in range(num_threads)]
for future in futures:
assert future.result() == "ok"
@ray.remote
def echo(value, delay_ms=0):
if delay_ms > 0:
time.sleep(delay_ms / 1000.0)
return value
def test_api_in_multi_threads():
"""Test using Ray api in multiple threads."""
@ray.remote
class Echo:
def echo(self, value):
return value
# Test calling remote functions in multiple threads.
def test_remote_call():
value = random.randint(0, 1000000)
result = ray.get(echo.remote(value))
assert value == result
run_test_in_multi_threads(test_remote_call)
# Test multiple threads calling one actor.
actor = Echo.remote()
def test_call_actor():
value = random.randint(0, 1000000)
result = ray.get(actor.echo.remote(value))
assert value == result
run_test_in_multi_threads(test_call_actor)
# Test put and get.
def test_put_and_get():
value = random.randint(0, 1000000)
result = ray.get(ray.put(value))
assert value == result
run_test_in_multi_threads(test_put_and_get)
# Test multiple threads waiting for objects.
num_wait_objects = 10
objects = [
echo.remote(i, delay_ms=10) for i in range(num_wait_objects)
]
def test_wait():
ready, _ = ray.wait(
objects,
num_returns=len(objects),
timeout=1000.0,
)
assert len(ready) == num_wait_objects
assert ray.get(ready) == list(range(num_wait_objects))
run_test_in_multi_threads(test_wait, num_repeats=1)
# Run tests in a driver.
test_api_in_multi_threads()
# Run tests in a worker.
@ray.remote
def run_tests_in_worker():
test_api_in_multi_threads()
return "ok"
assert ray.get(run_tests_in_worker.remote()) == "ok"
# Test actor that runs background threads.
@ray.remote
class MultithreadedActor:
def __init__(self):
self.lock = threading.Lock()
self.thread_results = []
def background_thread(self, wait_objects):
try:
# Test wait
ready, _ = ray.wait(
wait_objects,
num_returns=len(wait_objects),
timeout=1000.0,
)
assert len(ready) == len(wait_objects)
for _ in range(20):
num = 10
# Test remote call
results = [echo.remote(i) for i in range(num)]
assert ray.get(results) == list(range(num))
# Test put and get
objects = [ray.put(i) for i in range(num)]
assert ray.get(objects) == list(range(num))
time.sleep(random.randint(0, 10) / 1000.0)
except Exception as e:
with self.lock:
self.thread_results.append(e)
else:
with self.lock:
self.thread_results.append("ok")
def spawn(self):
wait_objects = [echo.remote(i, delay_ms=10) for i in range(10)]
self.threads = [
threading.Thread(
target=self.background_thread, args=(wait_objects, ))
for _ in range(20)
]
[thread.start() for thread in self.threads]
def join(self):
[thread.join() for thread in self.threads]
assert self.thread_results == ["ok"] * len(self.threads)
return "ok"
actor = MultithreadedActor.remote()
actor.spawn.remote()
ray.get(actor.join.remote()) == "ok"
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_wait_makes_object_local(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
cluster.add_node(num_cpus=2)
ray.init(address=cluster.address)
@ray.remote
class Foo:
def method(self):
return np.zeros(1024 * 1024)
a = Foo.remote()
# Test get makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ray.get(x_id)
assert ray.worker.global_worker.core_worker.object_exists(x_id)
# Test wait makes the object local.
x_id = a.method.remote()
assert not ray.worker.global_worker.core_worker.object_exists(x_id)
ok, _ = ray.wait([x_id])
assert len(ok) == 1
assert ray.worker.global_worker.core_worker.object_exists(x_id)
@pytest.mark.skipif(client_test_enabled(), reason="internal api")
def test_future_resolution_skip_plasma(ray_start_cluster):
cluster = ray_start_cluster
# Disable worker caching so worker leases are not reused; set object
# inlining size threshold and enable storing of small objects in in-memory
# object store so the borrowed ref is inlined.
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"worker_lease_timeout_milliseconds": 0,
"max_direct_call_object_size": 100 * 1024,
"put_small_object_in_memory_store": True,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"pin_head": 1})
def f(x):
return x + 1
@ray.remote(resources={"pin_worker": 1})
def g(x):
borrowed_ref = x[0]
f_ref = f.remote(borrowed_ref)
# borrowed_ref should be inlined on future resolution and shouldn't be
# in Plasma.
assert ray.worker.global_worker.core_worker.object_exists(
borrowed_ref, memory_store_only=True)
return ray.get(f_ref) * 2
one = ray.put(1)
g_ref = g.remote([one])
assert ray.get(g_ref) == 4
def test_task_output_inline_bytes_limit(ray_start_cluster):
cluster = ray_start_cluster
# Disable worker caching so worker leases are not reused; set object
# inlining size threshold and enable storing of small objects in in-memory
# object store so the borrowed ref is inlined.
# set task_rpc_inlined_bytes_limit which only allows inline 20 bytes.
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"worker_lease_timeout_milliseconds": 0,
"max_direct_call_object_size": 100 * 1024,
"task_rpc_inlined_bytes_limit": 20,
"put_small_object_in_memory_store": True,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(num_returns=5, resources={"pin_head": 1})
def f():
return list(range(5))
@ray.remote(resources={"pin_worker": 1})
def sum():
numbers = f.remote()
result = 0
for i, ref in enumerate(numbers):
result += ray.get(ref)
inlined = ray.worker.global_worker.core_worker.object_exists(
ref, memory_store_only=True)
if i < 2:
assert inlined
else:
assert not inlined
return result
assert ray.get(sum.remote()) == 10
def test_task_arguments_inline_bytes_limit(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
num_cpus=1,
resources={"pin_head": 1},
_system_config={
"max_direct_call_object_size": 100 * 1024,
# if task_rpc_inlined_bytes_limit is greater than
# max_grpc_message_size, this test fails.
"task_rpc_inlined_bytes_limit": 18 * 1024,
"max_grpc_message_size": 20 * 1024,
"put_small_object_in_memory_store": True,
},
)
cluster.add_node(num_cpus=1, resources={"pin_worker": 1})
ray.init(address=cluster.address)
@ray.remote(resources={"pin_worker": 1})
def foo(ref1, ref2, ref3):
return ref1 == ref2 + ref3
@ray.remote(resources={"pin_head": 1})
def bar():
# if the refs are inlined, the test fails.
# refs = [ray.put(np.random.rand(1024) for _ in range(3))]
# return ray.get(
# foo.remote(refs[0], refs[1], refs[2]))
return ray.get(
foo.remote(
np.random.rand(1024), # 8k
np.random.rand(1024), # 8k
np.random.rand(1024))) # 8k
ray.get(bar.remote())
# This case tests whether gcs-based actor scheduler works properly with
# a normal task co-existed.
@pytest.mark.skipif(sys.platform == "win32", reason="Time out on Windows")
def test_schedule_actor_and_normal_task(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
memory=1024**3, _system_config={"gcs_actor_scheduling_enabled": True})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=600 * 1024**2, num_cpus=0.01)
class Foo:
def method(self):
return 2
@ray.remote(memory=600 * 1024**2, num_cpus=0.01)
def fun(singal1, signal_actor2):
signal_actor2.send.remote()
ray.get(singal1.wait.remote())
return 1
singal1 = SignalActor.remote()
signal2 = SignalActor.remote()
o1 = fun.remote(singal1, signal2)
# Make sure the normal task is executing.
ray.get(signal2.wait.remote())
# The normal task is blocked now.
# Try to create actor and make sure this actor is not created for the time
# being.
foo = Foo.remote()
o2 = foo.method.remote()
ready_list, remaining_list = ray.wait([o2], timeout=2)
assert len(ready_list) == 0 and len(remaining_list) == 1
# Send a signal to unblock the normal task execution.
ray.get(singal1.send.remote())
# Check the result of normal task.
assert ray.get(o1) == 1
# Make sure the actor is created.
assert ray.get(o2) == 2
# This case tests whether gcs-based actor scheduler works properly
# in a large scale.
@pytest.mark.skipif(sys.platform == "win32", reason="Time out on Windows")
def test_schedule_many_actors_and_normal_tasks(ray_start_cluster):
cluster = ray_start_cluster
node_count = 10
actor_count = 50
each_actor_task_count = 50
normal_task_count = 1000
node_memory = 2 * 1024**3
for i in range(node_count):
cluster.add_node(
memory=node_memory,
_system_config={"gcs_actor_scheduling_enabled": True}
if i == 0 else {})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=100 * 1024**2, num_cpus=0.01)
class Foo:
def method(self):
return 2
@ray.remote(memory=100 * 1024**2, num_cpus=0.01)
def fun():
return 1
normal_task_object_list = [fun.remote() for _ in range(normal_task_count)]
actor_list = [Foo.remote() for _ in range(actor_count)]
actor_object_list = [
actor.method.remote() for _ in range(each_actor_task_count)
for actor in actor_list
]
for object in ray.get(actor_object_list):
assert object == 2
for object in ray.get(normal_task_object_list):
assert object == 1
# This case tests whether gcs-based actor scheduler distributes actors
# in a balanced way. By default, it uses the `SPREAD` strategy of
# gcs resource scheduler.
@pytest.mark.skipif(sys.platform == "win32", reason="Time out on Windows")
@pytest.mark.parametrize("args", [[5, 20], [5, 3]])
def test_actor_distribution_balance(ray_start_cluster, args):
cluster = ray_start_cluster
node_count = args[0]
actor_count = args[1]
for i in range(node_count):
cluster.add_node(
memory=1024**3,
_system_config={"gcs_actor_scheduling_enabled": True}
if i == 0 else {})
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=100 * 1024**2, num_cpus=0.01)
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
actor_distribution = {}
actor_list = [Foo.remote() for _ in range(actor_count)]
for actor in actor_list:
node_id = ray.get(actor.method.remote())
if node_id not in actor_distribution.keys():
actor_distribution[node_id] = []
actor_distribution[node_id].append(actor)
if node_count >= actor_count:
assert len(actor_distribution) == actor_count
for node_id, actors in actor_distribution.items():
assert len(actors) == 1
else:
assert len(actor_distribution) == node_count
for node_id, actors in actor_distribution.items():
assert len(actors) <= int(actor_count / node_count)
# This case tests whether RequestWorkerLeaseReply carries normal task resources
# when the request is rejected (due to resource preemption by normal tasks).
@pytest.mark.skip(
reason="The period of pull based resource report (10ms) is hard-coded.")
def test_worker_lease_reply_with_resources(ray_start_cluster):
cluster = ray_start_cluster
cluster.add_node(
memory=2000 * 1024**2,
_system_config={
"raylet_report_resources_period_milliseconds": 1000000,
"gcs_actor_scheduling_enabled": True,
})
node2 = cluster.add_node(memory=1000 * 1024**2)
ray.init(address=cluster.address)
cluster.wait_for_nodes()
@ray.remote(memory=1500 * 1024**2)
def fun(signal):
signal.send.remote()
time.sleep(30)
return 0
signal = SignalActor.remote()
fun.remote(signal)
# Make sure that the `fun` is running.
ray.get(signal.wait.remote())
@ray.remote(memory=800 * 1024**2)
class Foo:
def method(self):
return ray.worker.global_worker.node.unique_id
foo1 = Foo.remote()
o1 = foo1.method.remote()
ready_list, remaining_list = ray.wait([o1], timeout=10)
# If RequestWorkerLeaseReply carries normal task resources,
# GCS will then schedule foo1 to node2. Otherwise,
# GCS would keep trying to schedule foo1 to
# node1 and getting rejected.
assert len(ready_list) == 1 and len(remaining_list) == 0
assert ray.get(o1) == node2.unique_id
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| 30.56432 | 79 | 0.630891 |
a9bf0799fac9d5a0caf25cd752e4ab6ea674218b | 6,389 | py | Python | tests/test_wal_transfer.py | ArtemZ/wal-e | c1fc33b2f9f8e2c6d51cbb7607fdaf7eb16e87a1 | [
"BSD-3-Clause"
] | 1 | 2021-04-10T09:30:28.000Z | 2021-04-10T09:30:28.000Z | tests/test_wal_transfer.py | heroku/wal-e-archived | 0b7c363abc1a1dfafbe2defbf753893f35824318 | [
"BSD-3-Clause"
] | null | null | null | tests/test_wal_transfer.py | heroku/wal-e-archived | 0b7c363abc1a1dfafbe2defbf753893f35824318 | [
"BSD-3-Clause"
] | null | null | null | import gevent
import pytest
from fast_wait import fast_wait
from wal_e import worker
from wal_e.exception import UserCritical
assert fast_wait
class Explosion(Exception):
"""Marker type for fault injection."""
pass
class FakeWalSegment(object):
def __init__(self, seg_path, explicit=False,
upload_explosive=False,
mark_done_explosive=False):
self.explicit = explicit
self._upload_explosive = upload_explosive
self._mark_done_explosive = mark_done_explosive
self._marked = False
self._uploaded = False
def mark_done(self):
if self._mark_done_explosive:
raise self._mark_done_explosive
self._marked = True
class FakeWalUploader(object):
def __call__(self, segment):
if segment._upload_explosive:
raise segment._upload_explosive
segment._uploaded = True
return segment
def failed(seg):
"""Returns true if a segment could be a failed upload.
Or in progress, the two are not distinguished.
"""
return seg._marked is False and seg._uploaded is False
def success(seg):
"""Returns true if a segment has been successfully uploaded.
Checks that mark_done was not called if this is an 'explicit' wal
segment from Postgres.
"""
if seg.explicit:
assert seg._marked is False
return seg._uploaded
def indeterminate(seg):
"""Returns true as long as the segment is internally consistent.
Checks invariants of mark_done, depending on whether the segment
has been uploaded. This is useful in cases with tests with
failures and concurrent execution, and calls out the state of the
segment in any case to the reader.
"""
if seg._uploaded:
if seg.explicit:
assert seg._marked is False
else:
assert seg._marked is True
else:
assert seg._marked is False
return True
def prepare_multi_upload_segments():
"""Prepare a handful of fake segments for upload."""
# The first segment is special, being explicitly passed by
# Postgres.
yield FakeWalSegment('0' * 8 * 3, explicit=True)
# Additional segments are non-explicit, which means they will have
# their metadata manipulated by wal-e rather than relying on the
# Postgres archiver.
for i in xrange(1, 5):
yield FakeWalSegment(str(i) * 8 * 3, explicit=False)
def test_simple_upload():
"""Model a case where there is no concurrency while uploading."""
group = worker.WalTransferGroup(FakeWalUploader())
seg = FakeWalSegment('1' * 8 * 3, explicit=True)
group.start(seg)
group.join()
assert success(seg)
def test_multi_upload():
"""Model a case with upload concurrency."""
group = worker.WalTransferGroup(FakeWalUploader())
segments = list(prepare_multi_upload_segments())
# "Start" fake uploads
for seg in segments:
group.start(seg)
group.join()
# Check invariants on the non-explicit segments.
for seg in segments:
assert success(seg)
def test_simple_fail():
"""Model a simple failure in the non-concurrent case."""
group = worker.WalTransferGroup(FakeWalUploader())
exp = Explosion('fail')
seg = FakeWalSegment('1' * 8 * 3, explicit=True, upload_explosive=exp)
group.start(seg)
with pytest.raises(Explosion) as e:
group.join()
assert e.value is exp
assert failed(seg)
def test_multi_explicit_fail():
"""Model a failure of the explicit segment under concurrency."""
group = worker.WalTransferGroup(FakeWalUploader())
segments = list(prepare_multi_upload_segments())
exp = Explosion('fail')
segments[0]._upload_explosive = exp
for seg in segments:
group.start(seg)
with pytest.raises(Explosion) as e:
group.join()
assert e.value is exp
assert failed(segments[0])
for seg in segments[1:]:
assert success(seg)
def test_multi_pipeline_fail():
"""Model a failure of the pipelined segments under concurrency."""
group = worker.WalTransferGroup(FakeWalUploader())
segments = list(prepare_multi_upload_segments())
exp = Explosion('fail')
fail_idx = 2
segments[fail_idx]._upload_explosive = exp
for seg in segments:
group.start(seg)
with pytest.raises(Explosion) as e:
group.join()
assert e.value is exp
for i, seg in enumerate(segments):
if i == fail_idx:
assert failed(seg)
else:
# Given race conditions in conjunction with exceptions --
# which will abort waiting for other greenlets to finish
# -- one can't know very much about the final state of
# segment.
assert indeterminate(seg)
def test_finally_execution():
"""When one segment fails ensure parallel segments clean up."""
segBad = FakeWalSegment('1' * 8 * 3)
segOK = FakeWalSegment('2' * 8 * 3)
class CleanupCheckingUploader(object):
def __init__(self):
self.cleaned_up = False
def __call__(self, segment):
if segment is segOK:
try:
while True:
gevent.sleep(0.1)
finally:
self.cleaned_up = True
elif segment is segBad:
raise Explosion('fail')
else:
assert False, 'Expect only two segments'
segment._uploaded = True
return segment
uploader = CleanupCheckingUploader()
group = worker.WalTransferGroup(uploader)
group.start(segOK)
group.start(segBad)
with pytest.raises(Explosion):
group.join()
assert uploader.cleaned_up is True
def test_start_after_join():
"""Break an invariant by adding transfers after .join."""
group = worker.WalTransferGroup(FakeWalUploader())
group.join()
seg = FakeWalSegment('arbitrary')
with pytest.raises(UserCritical):
group.start(seg)
def test_mark_done_fault():
"""Exercise exception handling from .mark_done()"""
group = worker.WalTransferGroup(FakeWalUploader())
exp = Explosion('boom')
seg = FakeWalSegment('arbitrary', mark_done_explosive=exp)
group.start(seg)
with pytest.raises(Explosion) as e:
group.join()
assert e.value is exp
| 26.077551 | 74 | 0.653154 |
590503421f48c179e03dc339595a1b076192234d | 5,521 | py | Python | src/unicon/plugins/sros/service_implementation.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 1 | 2021-02-25T19:36:56.000Z | 2021-02-25T19:36:56.000Z | src/unicon/plugins/sros/service_implementation.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | null | null | null | src/unicon/plugins/sros/service_implementation.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | null | null | null | __author__ = 'Difu Hu <pyats-support@cisco.com;pyats-support-ext@cisco.com>'
from unicon.bases.routers.services import BaseService
from unicon.core.errors import SubCommandFailure
from unicon.eal.dialogs import Dialog, Statement
from unicon.plugins.generic.service_implementation import Configure, Execute
from .statements import sros_statements
KEY_RETURN_ROOT = '\x1a'
class SrosServiceMixin(object):
def return_to_cli_root(self, state):
handle = self.get_handle()
state = handle.state_machine.get_state(state)
statement = Statement(pattern=state.pattern,
action=None,
args=None,
loop_continue=False,
continue_timer=False,
trim_buffer=True)
dialog = Dialog([sros_statements.discard_uncommitted, statement])
handle.spawn.send(KEY_RETURN_ROOT)
try:
dialog.process(handle.spawn)
except Exception as err:
raise SubCommandFailure('Return to cli root failed', err) from err
def log_service_call(self):
BaseService.log_service_call(self)
def pre_service(self, *args, **kwargs):
self.prompt_recovery = kwargs.get('prompt_recovery', False)
sm = self.get_sm()
con = self.connection
sm.go_to(self.start_state,
con.spawn,
prompt_recovery=self.prompt_recovery,
context=con.context)
self.return_to_cli_root(self.start_state)
def post_service(self, *args, **kwargs):
self.return_to_cli_root(self.end_state)
super().post_service(*args, **kwargs)
class SrosMdcliExecute(SrosServiceMixin, Execute):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.start_state = 'mdcli'
self.end_state = 'mdcli'
class SrosMdcliConfigure(SrosServiceMixin, Configure):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.start_state = 'mdcli'
self.end_state = 'mdcli'
self.commit_cmd = 'commit'
self.mode = connection.settings.MDCLI_CONFIGURE_DEFAULT_MODE
def call_service(self,
*args,
mode='',
**kwargs):
mode = mode or self.mode
handle = self.get_handle()
handle.spawn.sendline('configure {}'.format(mode))
super().call_service(*args, **kwargs)
class SrosClassiccliExecute(SrosServiceMixin, Execute):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.start_state = 'classiccli'
self.end_state = 'classiccli'
class SrosClassiccliConfigure(SrosServiceMixin, Configure):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.start_state = 'classiccli'
self.end_state = 'classiccli'
self.commit_cmd = ''
class SrosExecute(BaseService):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.execute_map = {'classiccli': 'classiccli_execute',
'mdcli': 'mdcli_execute'}
def pre_service(self, *args, **kwargs):
pass
def post_service(self, *args, **kwargs):
pass
def call_service(self, *args, **kwargs):
handle = self.get_handle()
state = handle.state_machine.current_state
execute = getattr(self.connection, self.execute_map[state])
self.result = execute(*args, **kwargs)
class SrosConfigure(BaseService):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
self.configure_map = {'classiccli': 'classiccli_configure',
'mdcli': 'mdcli_configure'}
def pre_service(self, *args, **kwargs):
pass
def post_service(self, *args, **kwargs):
pass
def call_service(self, *args, **kwargs):
handle = self.get_handle()
state = handle.state_machine.current_state
configure = getattr(self.connection, self.configure_map[state])
self.result = configure(*args, **kwargs)
class SrosSwitchCliEngine(BaseService):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
def pre_service(self, *args, **kwargs):
pass
def post_service(self, *args, **kwargs):
pass
def call_service(self, engine, *args, **kwargs):
self.prompt_recovery = kwargs.get('prompt_recovery', False)
sm = self.get_sm()
con = self.connection
sm.go_to(engine,
con.spawn,
prompt_recovery=self.prompt_recovery,
context=con.context)
self.result = True
def get_service_result(self):
return self.result
class SrosGetCliEngine(BaseService):
def __init__(self, connection, context, **kwargs):
super().__init__(connection, context, **kwargs)
def pre_service(self, *args, **kwargs):
pass
def post_service(self, *args, **kwargs):
pass
def call_service(self, *args, **kwargs):
handle = self.get_handle()
self.result = handle.state_machine.current_state
def get_service_result(self):
return self.result
| 31.548571 | 78 | 0.629958 |
abda2917553bde53f81af523c6139724dd84cd73 | 873 | py | Python | app/user/views.py | mzwamshandu/recipe-app-api | cc7da9c7e72da318ca3f36bd4a3fcf173ef1a929 | [
"MIT"
] | null | null | null | app/user/views.py | mzwamshandu/recipe-app-api | cc7da9c7e72da318ca3f36bd4a3fcf173ef1a929 | [
"MIT"
] | null | null | null | app/user/views.py | mzwamshandu/recipe-app-api | cc7da9c7e72da318ca3f36bd4a3fcf173ef1a929 | [
"MIT"
] | null | null | null | from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from user.serializers import UserSerializer, AuthTokenSerializer
class CreateUserView(generics.CreateAPIView):
# Create a new user in the system
serializer_class = UserSerializer
class CreateTokenView(ObtainAuthToken):
serializer_class = AuthTokenSerializer
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
class ManageUserView(generics.RetrieveUpdateAPIView):
# Manage the authenticated user
serializer_class = UserSerializer
authentication_classes = (authentication.TokenAuthentication, )
permissions_classes = (permissions.IsAuthenticated, )
def get_object(self):
# retrive and return authentication user
return self.request.user
| 33.576923 | 67 | 0.80756 |
02b25039bcca245f303523d0bee33ddaab3db065 | 11,959 | py | Python | google/cloud/scheduler/v1beta1/scheduler-v1beta1-py/google/cloud/scheduler_v1beta1/services/cloud_scheduler/transports/base.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/scheduler/v1beta1/scheduler-v1beta1-py/google/cloud/scheduler_v1beta1/services/cloud_scheduler/transports/base.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/scheduler/v1beta1/scheduler-v1beta1-py/google/cloud/scheduler_v1beta1/services/cloud_scheduler/transports/base.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import packaging.version
import pkg_resources
import google.auth # type: ignore
import google.api_core # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.scheduler_v1beta1.types import cloudscheduler
from google.cloud.scheduler_v1beta1.types import job
from google.cloud.scheduler_v1beta1.types import job as gcs_job
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-cloud-scheduler',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
try:
# google.auth.__version__ was added in 1.26.0
_GOOGLE_AUTH_VERSION = google.auth.__version__
except AttributeError:
try: # try pkg_resources if it is available
_GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
except pkg_resources.DistributionNotFound: # pragma: NO COVER
_GOOGLE_AUTH_VERSION = None
class CloudSchedulerTransport(abc.ABC):
"""Abstract transport class for CloudScheduler."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
)
DEFAULT_HOST: str = 'cloudscheduler.googleapis.com'
def __init__(
self, *,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive")
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file,
**scopes_kwargs,
quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id)
# If the credentials are service account credentials, then always try to use self signed JWT.
if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
# TODO(busunkim): This method is in the base transport
# to avoid duplicating code across the transport classes. These functions
# should be deleted once the minimum required versions of google-auth is increased.
# TODO: Remove this function once google-auth >= 1.25.0 is required
@classmethod
def _get_scopes_kwargs(cls, host: str, scopes: Optional[Sequence[str]]) -> Dict[str, Optional[Sequence[str]]]:
"""Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
scopes_kwargs = {}
if _GOOGLE_AUTH_VERSION and (
packaging.version.parse(_GOOGLE_AUTH_VERSION)
>= packaging.version.parse("1.25.0")
):
scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
else:
scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
return scopes_kwargs
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.list_jobs: gapic_v1.method.wrap_method(
self.list_jobs,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.get_job: gapic_v1.method.wrap_method(
self.get_job,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.create_job: gapic_v1.method.wrap_method(
self.create_job,
default_timeout=600.0,
client_info=client_info,
),
self.update_job: gapic_v1.method.wrap_method(
self.update_job,
default_timeout=600.0,
client_info=client_info,
),
self.delete_job: gapic_v1.method.wrap_method(
self.delete_job,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.pause_job: gapic_v1.method.wrap_method(
self.pause_job,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.resume_job: gapic_v1.method.wrap_method(
self.resume_job,
default_retry=retries.Retry(
initial=0.1,maximum=60.0,multiplier=1.3, predicate=retries.if_exception_type(
core_exceptions.DeadlineExceeded,
core_exceptions.ServiceUnavailable,
),
deadline=600.0,
),
default_timeout=600.0,
client_info=client_info,
),
self.run_job: gapic_v1.method.wrap_method(
self.run_job,
default_timeout=600.0,
client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def list_jobs(self) -> Callable[
[cloudscheduler.ListJobsRequest],
Union[
cloudscheduler.ListJobsResponse,
Awaitable[cloudscheduler.ListJobsResponse]
]]:
raise NotImplementedError()
@property
def get_job(self) -> Callable[
[cloudscheduler.GetJobRequest],
Union[
job.Job,
Awaitable[job.Job]
]]:
raise NotImplementedError()
@property
def create_job(self) -> Callable[
[cloudscheduler.CreateJobRequest],
Union[
gcs_job.Job,
Awaitable[gcs_job.Job]
]]:
raise NotImplementedError()
@property
def update_job(self) -> Callable[
[cloudscheduler.UpdateJobRequest],
Union[
gcs_job.Job,
Awaitable[gcs_job.Job]
]]:
raise NotImplementedError()
@property
def delete_job(self) -> Callable[
[cloudscheduler.DeleteJobRequest],
Union[
empty_pb2.Empty,
Awaitable[empty_pb2.Empty]
]]:
raise NotImplementedError()
@property
def pause_job(self) -> Callable[
[cloudscheduler.PauseJobRequest],
Union[
job.Job,
Awaitable[job.Job]
]]:
raise NotImplementedError()
@property
def resume_job(self) -> Callable[
[cloudscheduler.ResumeJobRequest],
Union[
job.Job,
Awaitable[job.Job]
]]:
raise NotImplementedError()
@property
def run_job(self) -> Callable[
[cloudscheduler.RunJobRequest],
Union[
job.Job,
Awaitable[job.Job]
]]:
raise NotImplementedError()
__all__ = (
'CloudSchedulerTransport',
)
| 38.085987 | 161 | 0.598378 |
d723d647e0eb43b64c90edfe9eb2be9ca36e7e0f | 3,422 | py | Python | lib/aging_vec.py | calico/2019_murine_cell_aging | 11ab15b82df886c4a51f73b25c061a9360862c2e | [
"Apache-2.0"
] | 1 | 2020-12-03T21:28:05.000Z | 2020-12-03T21:28:05.000Z | lib/aging_vec.py | calico/2019_murine_cell_aging | 11ab15b82df886c4a51f73b25c061a9360862c2e | [
"Apache-2.0"
] | null | null | null | lib/aging_vec.py | calico/2019_murine_cell_aging | 11ab15b82df886c4a51f73b25c061a9360862c2e | [
"Apache-2.0"
] | 1 | 2021-04-16T12:01:16.000Z | 2021-04-16T12:01:16.000Z | '''Compute vectors of difference across a contrast'''
import anndata
import numpy as np
import tqdm
def compute_displacement_vector(adata: anndata.AnnData,
contrast: str = 'age',
groupby: str = 'cell_type',
embedding: str = 'counts') -> np.ndarray:
'''
Compute the difference between mean vectors for cells in
different groups of a binary `contrast`.
Parameters
----------
adata : anndata.AnnData
[Cells, Genes] AnnData object.
contrast : str
binary variable in `adata.obs`. differences in mean
expression vectors are computed across the two groups
specified in this variable.
groupby : str
categorical variable in `adata.obs`. mean expression
vectors are computed separately for each level in this
variable.
embedding : str
specifies the embedding space to calculate mean vectors.
["counts", "pca", "umap", "nmf"]
Returns
-------
difference_vectors : np.ndarray
[n_groups, n_embedding_dim, (contrast_0, contrast_1, difference)]
mean expression vectors for each group, contrast combination
and their differences.
'''
# [Cells, Genes]
if embedding.lower() == 'counts':
if type(adata.X) != np.ndarray:
X = adata.X.toarray()
else:
X = adata.X
elif embedding.lower() == 'pca':
if 'X_pca' not in adata.obsm.keys():
raise ValueError('PCA object not present in AnnData.')
X = adata.obsm['X_pca']
elif embedding.lower() == 'umap':
if 'X_umap' not in adata.obsm.keys():
raise ValueError('UMAP object not present in AnnData.')
X = adata.obsm['X_umap']
elif embedding.lower() == 'nmf':
if 'X_nmf' not in adata.obsm.keys():
raise ValueError('NMF object not present in AnnData.')
X = adata.obsm['X_nmf']
else:
raise ValueError('invalid embedding argument')
print('%d cells and %d features.' % X.shape)
contrast_groups = np.unique(adata.obs[contrast])
if len(contrast_groups) != 2:
msg = f'`constrast` must have binary values, not {len(contrast_groups)}'
raise ValueError(msg)
contrast_bindices = []
for g in contrast_groups:
contrast_bindices.append(
adata.obs[contrast] == g
)
groupby_groups = np.unique(adata.obs[groupby])
groupby_bindices = []
for g in groupby_groups:
groupby_bindices.append(
adata.obs[groupby] == g
)
# build matrix of difference vectors
difference_vectors = np.zeros(
(len(groupby_groups), X.shape[1], len(contrast_groups)+1)
)
for i, group in tqdm.tqdm(enumerate(groupby_groups),
desc='Computing group contrasts'):
for j, contrast in enumerate(contrast_groups):
group_bidx = groupby_bindices[i]
contrast_bidx = contrast_bindices[j]
bidx = np.logical_and(group_bidx, contrast_bidx)
cells = X[bidx, :]
difference_vectors[i, :, j] = cells.mean(axis=0)
# compute difference vector
difference_vectors[i, :, 2] = difference_vectors[i,
:, 0] - difference_vectors[i, :, 1]
return difference_vectors
| 34.918367 | 92 | 0.591175 |
b1df5780446f1f41b5b73502680ee6bd1e2ccd30 | 3,136 | py | Python | HiProc/anneal.py | a-moin/hi-opt | 9fe9d86b8968e15e378ebf782ec6b5f971858c8b | [
"BSD-2-Clause"
] | null | null | null | HiProc/anneal.py | a-moin/hi-opt | 9fe9d86b8968e15e378ebf782ec6b5f971858c8b | [
"BSD-2-Clause"
] | null | null | null | HiProc/anneal.py | a-moin/hi-opt | 9fe9d86b8968e15e378ebf782ec6b5f971858c8b | [
"BSD-2-Clause"
] | null | null | null | #***************************************************************************#
# Copyright (c) 2016 - 2017, University of California, Berkeley. #
# #
# Author(s): Ali Moin, EECS Department, UC Berkeley. #
# Pierluigi Nuzzo, EE Department, USC. #
# #
# This file is distributed under the terms in the attached LICENSE file. #
#***************************************************************************#
import random
import numpy as np
import simanneal
# s = [n0, n1, ..., n9, rt, MAC, pw]
PDRmin = 100
PDR_FAIL = 1e6
INFEAS = 1e8
t_sim = 0
data = {}
searched = {}
with open('all_results.txt') as f:
lines = f.readlines()
for l in range(len(lines)):
s = lines[l].split('\t')
data[s[0]] = [round(float(s[1]), 1), round(float(s[2])), float(s[3])]
class HIannealer(simanneal.Annealer):
def move(self):
# choose a random entry in the matrix
i = random.randrange(len(self.state))
# flip the entry 0 <=> 1
if i == 12:
self.state[i] = (self.state[i] + 1) % 3
else:
self.state[i] = 1 - self.state[i]
def energy(self):
# evaluate the function to minimize
global t_sim
global searched
nodes = []
for j in range(10):
if self.state[j] > 0:
nodes.append(j)
if self.state[11] == 1:
mac = 'CSMA'
else:
mac = 'TDMA'
if self.state[10] == 1:
routing = 'Star'
else:
routing = 'Mesh'
if self.state[12] == 0:
pw = '-20dBm'
elif self.state[12] == 1:
pw = '-10dBm'
else:
pw = '0dBm'
dickey = str(nodes) + '|' + mac + '|' + pw + '|' + routing
if dickey in data:
if dickey not in searched:
searched[dickey] = data[dickey]
t_sim += data[dickey][2]
PDR = data[dickey][0]
if PDR >= PDRmin:
return data[dickey][1]
else:
return PDR_FAIL
else:
return INFEAS
s = []
for r in range(12):
s.append(random.randint(0, 1))
s.append(random.randint(0, 2))
opt = HIannealer(s)
opt.steps = 8000
opt.Tmin = 17.0
opt.Tmax = 480000.0
print(opt.anneal())
print(t_sim/3600)
print(len(searched))
# steps = 6000
#
# for i in range(50):
# correct = True
# print('Steps: ' + str(steps))
# for j in range(10):
# s = []
# for r in range(12):
# s.append(random.randint(0,1))
# s.append(random.randint(0,2))
#
# opt = HIannealer(s)
# opt.steps = steps
# opt.Tmin = 17.0
# opt.Tmax = 480000.0
#
# ann_res = opt.anneal()
# print(ann_res)
# if ann_res[1] != 3826.0:
# correct = False
# break
# if correct is True:
# print(steps)
# break
# steps += 1000
| 27.269565 | 77 | 0.442921 |
542b4f622276259ca9900b77a4539149273ea7ad | 2,000 | py | Python | discovery-infra/update_assisted_service_cm.py | yuvalk/assisted-test-infra | 0f933a39f8ecb3dcb38de5ea289d44f3daef69fd | [
"Apache-2.0"
] | null | null | null | discovery-infra/update_assisted_service_cm.py | yuvalk/assisted-test-infra | 0f933a39f8ecb3dcb38de5ea289d44f3daef69fd | [
"Apache-2.0"
] | null | null | null | discovery-infra/update_assisted_service_cm.py | yuvalk/assisted-test-infra | 0f933a39f8ecb3dcb38de5ea289d44f3daef69fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# Idea is to pass os environments to assisted-service config map, to make an easy way to configure assisted-service
#
# Note: defaulting an env var to "" in Makefile, will result in an empty string value in the configmap.
# E.g.
# Makefile:
# MY_VAR := $(or $(MY_VAR), "")
# configmap:
# MY_VAR: ""
#
# Hence, in order to support unset env vars, avoid override in Makefile.
import os
import yaml
CM_PATH = "assisted-service/deploy/assisted-service-configmap.yaml"
ENVS = [
("HW_VALIDATOR_MIN_CPU_CORES", "2"),
("HW_VALIDATOR_MIN_CPU_CORES_WORKER", "2"),
("HW_VALIDATOR_MIN_CPU_CORES_MASTER", "4"),
("HW_VALIDATOR_MIN_RAM_GIB", "3"),
("HW_VALIDATOR_MIN_RAM_GIB_WORKER", "3"),
("HW_VALIDATOR_MIN_RAM_GIB_MASTER", "8"),
("HW_VALIDATOR_MIN_DISK_SIZE_GIB", "10"),
("INSTALLER_IMAGE", ""),
("CONTROLLER_IMAGE", ""),
("SERVICE_BASE_URL", ""),
("AGENT_DOCKER_IMAGE", ""),
("KUBECONFIG_GENERATE_IMAGE", ""),
("BASE_DNS_DOMAINS", ""),
("IMAGE_BUILDER", ""),
("CONNECTIVITY_CHECK_IMAGE", ""),
("HARDWARE_INFO_IMAGE", ""),
("INVENTORY_IMAGE", ""),
("OCM_BASE_URL", ""),
]
def _read_yaml():
if not os.path.exists(CM_PATH):
return
with open(CM_PATH, "r+") as cm_file:
return yaml.load(cm_file)
def _get_relevant_envs():
data = {}
for env in ENVS:
evn_data = os.getenv(env[0], env[1])
# Set value as empty if variable is an empty string (e.g. defaulted in Makefile)
if evn_data == '""':
data[env[0]] = ""
elif evn_data:
data[env[0]] = evn_data
return data
def set_envs_to_service_cm():
cm_data = _read_yaml()
if not cm_data:
raise Exception("%s must exists before setting envs to it" % CM_PATH)
cm_data["data"].update(_get_relevant_envs())
with open(CM_PATH, "w") as cm_file:
yaml.dump(cm_data, cm_file)
if __name__ == "__main__":
set_envs_to_service_cm()
| 27.777778 | 115 | 0.634 |
7db32ab35063bba437a27c010de6f77d99c55432 | 1,970 | py | Python | designate/sqlalchemy/expressions.py | melodous/designate | c0da0c464c07d34a9855ab704302d7662beb7c1d | [
"Apache-2.0"
] | 1 | 2015-02-26T03:23:13.000Z | 2015-02-26T03:23:13.000Z | designate/sqlalchemy/expressions.py | NeCTAR-RC/designate | 99874f5c608954df9c988740e3cbf0d3b6b7a269 | [
"Apache-2.0"
] | null | null | null | designate/sqlalchemy/expressions.py | NeCTAR-RC/designate | 99874f5c608954df9c988740e3cbf0d3b6b7a269 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import Executable, ClauseElement
class InsertFromSelect(Executable, ClauseElement):
execution_options = \
Executable._execution_options.union({'autocommit': True})
def __init__(self, table, select, columns=None):
self.table = table
self.select = select
self.columns = columns
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
# NOTE(kiall): SQLA 0.8.3+ has an InsertFromSelect built in:
# sqlalchemy.sql.expression.Insert.from_select
# This code can be removed once we require 0.8.3+
table = compiler.process(element.table, asfrom=True)
select = compiler.process(element.select)
if element.columns is not None:
columns = [compiler.preparer.format_column(c) for c in element.columns]
columns = ", ".join(columns)
return "INSERT INTO %s (%s) %s" % (
table,
columns,
select
)
else:
return "INSERT INTO %s %s" % (
table,
select
)
# # Dialect specific compilation example, should it be needed.
# @compiles(InsertFromSelect, 'postgresql')
# def visit_insert_from_select(element, compiler, **kw):
# ...
| 33.389831 | 79 | 0.680711 |
7e413909c7c7279f314a8ac90901f77f55c6d5bc | 6,873 | py | Python | tst/schedulers/test_hyperband_sychronous.py | awslabs/syne-tune | 1dd8e157477b86db01047a9a7821780ea04389bc | [
"ECL-2.0",
"Apache-2.0"
] | 97 | 2021-11-18T17:14:30.000Z | 2022-03-29T00:33:12.000Z | tst/schedulers/test_hyperband_sychronous.py | awslabs/syne-tune | 1dd8e157477b86db01047a9a7821780ea04389bc | [
"ECL-2.0",
"Apache-2.0"
] | 54 | 2021-11-18T17:14:12.000Z | 2022-03-22T08:11:48.000Z | tst/schedulers/test_hyperband_sychronous.py | awslabs/syne-tune | 1dd8e157477b86db01047a9a7821780ea04389bc | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2021-11-29T11:47:32.000Z | 2022-02-24T15:28:11.000Z | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import List, Tuple
import numpy as np
from collections import Counter
from syne_tune.optimizer.schedulers.synchronous.hyperband_bracket import (
SynchronousHyperbandBracket,
SlotInRung,
)
from syne_tune.optimizer.schedulers.synchronous.hyperband_bracket_manager import (
SynchronousHyperbandBracketManager,
)
from syne_tune.optimizer.schedulers.synchronous.hyperband_rung_system import (
SynchronousHyperbandRungSystem,
)
def _trial_ids(lst):
return [x[0] for x in lst]
def _ask_for_slots(
bracket: SynchronousHyperbandBracket,
rung_index: int,
level: int,
slot_index: int,
trial_ids: list,
) -> (List[SlotInRung], int):
slots = []
for trial_id in trial_ids:
slot_in_rung = bracket.next_free_slot()
assert slot_in_rung is not None
should_be = SlotInRung(
rung_index=rung_index,
level=level,
slot_index=slot_index,
trial_id=trial_id,
metric_val=None,
)
assert slot_in_rung == should_be, (slot_in_rung, should_be)
slots.append(slot_in_rung)
slot_index += 1
return slots, slot_index
def _send_results(
bracket: SynchronousHyperbandBracket,
slots: List[SlotInRung],
all_results: List[Tuple[int, float]],
):
for slot_in_rung in slots:
trial_id, metric_val = all_results[slot_in_rung.slot_index]
result = SlotInRung(
rung_index=slot_in_rung.rung_index,
level=slot_in_rung.level,
slot_index=slot_in_rung.slot_index,
trial_id=trial_id,
metric_val=metric_val,
)
bracket.on_result(result)
def test_hyperband_bracket():
rungs = [(9, 1), (4, 3), (1, 9)]
results = [
[
(0, 3.0),
(1, 5.0),
(2, 1.0),
(3, 4.0),
(4, 9.0),
(5, 6.0),
(6, 2.0),
(7, 7.0),
(8, 8.0),
],
[(2, 3.1), (6, 3.0), (0, 2.9), (3, 3.0)],
[(0, 1.0)],
]
bracket = SynchronousHyperbandBracket(rungs, mode="min")
# Rung index 0
rung_index = 0
level = rungs[rung_index][1]
slot_index = 0
# Ask for some and return before asking for more
num_jobs = 3
slots, slot_index = _ask_for_slots(
bracket, rung_index, level, slot_index, trial_ids=[None] * num_jobs
)
assert bracket.num_pending_slots() == num_jobs
_send_results(bracket, slots, results[rung_index])
assert bracket.num_pending_slots() == 0
# Ask for some, but do not return all for now
num_jobs = 3
slots_remaining = []
for i in range(2):
slots, slot_index = _ask_for_slots(
bracket, rung_index, level, slot_index, trial_ids=[None] * num_jobs
)
assert bracket.num_pending_slots() == num_jobs + i
slots_remaining.append(slots[0])
slots = slots[1:]
_send_results(bracket, slots, results[rung_index])
assert bracket.num_pending_slots() == i + 1
# At this point, there are no free slots, but some are pending
for slot in slots_remaining:
assert bracket.next_free_slot() is None
_send_results(bracket, [slot], results[rung_index])
# The first rung must be fully occupied now
assert bracket.num_pending_slots() == 0
# Other rungs
for rung_index, all_results in enumerate(results[1:], start=1):
num_jobs, level = rungs[rung_index]
slot_index = 0
trial_ids = [x[0] for x in all_results]
slots, slot_index = _ask_for_slots(
bracket, rung_index, level, slot_index, trial_ids=trial_ids
)
assert bracket.num_pending_slots() == num_jobs
assert bracket.next_free_slot() is None
_send_results(bracket, slots, all_results)
assert bracket.num_pending_slots() == 0
# Now, the bracket must be complete
assert bracket.is_bracket_complete()
def _send_result(
bracket_manager: SynchronousHyperbandBracketManager,
slots: List[Tuple[int, SlotInRung]],
next_trial_id: int,
random_state: np.random.RandomState,
) -> int:
bracket_id, slot_in_rung = slots.pop(0)
if slot_in_rung.trial_id is None:
slot_in_rung.trial_id = next_trial_id
next_trial_id += 1
slot_in_rung.metric_val = random_state.random()
bracket_manager.on_result((bracket_id, slot_in_rung))
return next_trial_id
# def test_hyperband_bracket_manager_create_bracket():
# HIER
# Runs Hyperband for some number of iterations, checking that no assertions
# are raised
def test_hyperband_bracket_manager_running():
random_seed = 31415927
random_state = np.random.RandomState(random_seed)
bracket_rungs = SynchronousHyperbandRungSystem.geometric(
min_resource=2, max_resource=200, reduction_factor=3, num_brackets=6
)
bracket_manager = SynchronousHyperbandBracketManager(bracket_rungs, mode="min")
num_jobs = 4
num_return = 3
num_steps = 5000
next_trial_id = 0
pending_slots = []
for step in range(num_steps):
for _ in range(num_jobs):
pending_slots.append(bracket_manager.next_job())
# Report results for some, but not all
for _ in range(num_return):
next_trial_id = _send_result(
bracket_manager, pending_slots, next_trial_id, random_state
)
# Test whether number of pending are correct
histogram = Counter([x[0] for x in pending_slots])
for bracket_id, num_pending in histogram.items():
assert (
bracket_manager._brackets[bracket_id].num_pending_slots() == num_pending
)
if len(pending_slots) >= 200:
# Clear all pending slots in random ordering
for pos in random_state.permutation(len(pending_slots)):
next_trial_id = _send_result(
bracket_manager, [pending_slots[pos]], next_trial_id, random_state
)
pending_slots = []
# Nothing should be pending anymore
for bracket_id in range(
bracket_manager._primary_bracket_id, bracket_manager._next_bracket_id
):
assert bracket_manager._brackets[bracket_id].num_pending_slots() == 0
| 34.365 | 88 | 0.654299 |
69101bb89750086af43a08346a175a1694a65524 | 1,735 | py | Python | demos/component.py | nickderobertis/nick-derobertis-site | 386061dc258921eed41f2d3965ef69e02adde7ba | [
"MIT"
] | 1 | 2022-03-31T10:55:40.000Z | 2022-03-31T10:55:40.000Z | demos/component.py | nickderobertis/nick-derobertis-site | 386061dc258921eed41f2d3965ef69e02adde7ba | [
"MIT"
] | 8 | 2020-08-28T11:44:37.000Z | 2020-08-31T09:19:19.000Z | demos/component.py | nickderobertis/nick-derobertis-site | 386061dc258921eed41f2d3965ef69e02adde7ba | [
"MIT"
] | null | null | null | import pathlib
import sys
ROOT_PATH = pathlib.Path(__file__).parent.parent
sys.path.insert(0, str(ROOT_PATH))
import param
from nick_derobertis_site.common.component import HTMLComponent
import panel as pn
sub_component_template = """
<p>subcomponent begin</p>
<div id="should-contain-the-subcomponent-items" style="background-color: blue">
<p>some static content in the div before dynamic content</p>
{{ embed(model.inp) }}
{{ embed(model.button) }}
<p>some static content in the div after dynamic content</p>
</div>
<p>subcomponent end</p>
"""
component_template = """
<p>it works begin</p>
<div id="contains-subcomponents">
{% for comp in model.sub_components %}
{{ embed(comp, css_classes=['subcomponent-parent']) }}
{% endfor %}
</div>
<p>it works end</p>
"""
class MySubModel(param.Parameterized):
button = param.ClassSelector(class_=pn.widgets.Button)
inp = param.ClassSelector(class_=pn.widgets.TextInput)
class MySubComp(HTMLComponent):
model = param.ClassSelector(class_=MySubModel)
template_str = sub_component_template
class MyModel(param.Parameterized):
sub_components = param.List(class_=MySubComp)
class MyComp(HTMLComponent):
model = param.ClassSelector(class_=MyModel)
template_str = component_template
sub_components = []
for i in range(3):
button = pn.widgets.Button(name=f"yeah {i}")
inp = pn.widgets.TextInput(value=button.name)
inp.link(button, value="name")
model = MySubModel(button=button, inp=inp)
sub_comp = MySubComp(model=model, css_classes=['parent'], child_css_classes=['child'])
sub_components.append(sub_comp)
model = MyModel(sub_components=sub_components)
view = MyComp(model=model)
view.servable()
| 25.895522 | 90 | 0.724496 |
f8ee9da58964daf11a08fb45da024ef255c6af05 | 378 | py | Python | Guanabara/aula12.py | manuellaAlvesVarella/python | eedb8362f0ebc8074f87d15c9e629e319ff29394 | [
"MIT"
] | 1 | 2022-03-25T20:42:20.000Z | 2022-03-25T20:42:20.000Z | Guanabara/aula12.py | manuellaAlvesVarella/python | eedb8362f0ebc8074f87d15c9e629e319ff29394 | [
"MIT"
] | null | null | null | Guanabara/aula12.py | manuellaAlvesVarella/python | eedb8362f0ebc8074f87d15c9e629e319ff29394 | [
"MIT"
] | null | null | null | nome = str(input('Qual é o seu nome ?')).strip()
if nome == 'Gustavo':
print ('Que nome bonito!')
elif nome == 'Pedro' or nome == 'Maria' or nome == 'Paulo':
print ('Seu nome é bem popular no Brasil')
elif nome in 'Ana Claudia Jéssica Juliana':
print ('Belo nome Feminino')
else:
print ('Seu nome é normal.')
print ('Tenha um bom dia, {}!'.format(nome)) | 37.8 | 60 | 0.616402 |
b4939590391a64e8659c12151c0a43a6c3965f8a | 1,110 | py | Python | install/app_store/tk-desktop/v2.4.2/python/tk_desktop/wait_screen.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-desktop/v2.4.2/python/tk_desktop/wait_screen.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | null | null | null | install/app_store/tk-desktop/v2.4.2/python/tk_desktop/wait_screen.py | JoanAzpeitia/lp_sg | e0ee79555e419dd2ae3a5f31e5515b3f40b22a62 | [
"MIT"
] | 1 | 2020-02-15T10:42:56.000Z | 2020-02-15T10:42:56.000Z | # Copyright (c) 2013 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from sgtk.platform.qt import QtGui
from sgtk.platform.qt import QtCore
from .ui import wait_screen
class WaitScreen(QtGui.QDialog):
""" Simple wait dialog """
def __init__(self, header="", subheader="", parent=None):
QtGui.QDialog.__init__(self, parent)
self.setWindowFlags(QtCore.Qt.Popup)
# setup the GUI
self.ui = wait_screen.Ui_WaitScreen()
self.ui.setupUi(self)
self.ui.header.setText(header)
self.ui.subheader.setText(subheader)
def set_header(self, header):
self.ui.header.setText(header)
def set_subheader(self, subheader):
self.ui.subheader.setText(subheader)
| 32.647059 | 75 | 0.721622 |
6cf9f8c943d4d8850785276788d78a90a7ddaa62 | 6,389 | py | Python | utils/models.py | NitishPuri/MLNDCapstone | f96e3f304fcce93559826a8c554e5d4410c0585b | [
"MIT"
] | null | null | null | utils/models.py | NitishPuri/MLNDCapstone | f96e3f304fcce93559826a8c554e5d4410c0585b | [
"MIT"
] | null | null | null | utils/models.py | NitishPuri/MLNDCapstone | f96e3f304fcce93559826a8c554e5d4410c0585b | [
"MIT"
] | null | null | null | from keras.layers import (Activation, BatchNormalization, Conv2D, Input,
MaxPooling2D, UpSampling2D, concatenate)
from keras.layers import MaxPool2D, GlobalAveragePooling2D, Dense
from keras.optimizers import Adam, RMSprop
from keras.models import Model, Sequential
from keras.optimizers import RMSprop
from utils.params import *
from utils.losses import *
def get_manufacturer_model():
"""
Get model to predict car manufacturer given an input image.
"""
manufacturer_model = Sequential()
manufacturer_model.add(Conv2D(16, kernel_size= (3, 3), activation="relu",
input_shape = (INPUT_SIZE, INPUT_SIZE, 3)))
manufacturer_model.add(MaxPool2D())
manufacturer_model.add(Conv2D(32, kernel_size = (3, 3), activation = "relu"))
manufacturer_model.add(MaxPool2D())
manufacturer_model.add(Conv2D(64, kernel_size = (3, 3), activation = "relu"))
manufacturer_model.add(MaxPool2D())
manufacturer_model.add(Conv2D(128, kernel_size = (3, 3), activation = "relu"))
manufacturer_model.add(MaxPool2D())
# manufacturer_model.add(Conv2D(256, kernel_size = (3, 3), activation = "relu"))
# manufacturer_model.add(MaxPool2D())
# manufacturer_model.add(Conv2D(512, kernel_size = (3, 3), activation = "relu"))
# manufacturer_model.add(MaxPool2D())
manufacturer_model.add(GlobalAveragePooling2D())
manufacturer_model.add(Dense(36, activation = 'softmax')) # Number of makers in the dataset
manufacturer_model.compile(optimizer = Adam(), loss = 'categorical_crossentropy', metrics = ['accuracy'])
return manufacturer_model
def get_baseline_model(input_shape=(128, 128, 3)):
"""
Get a simple 3 layer CNN model to compute mask given an input image.
"""
baseline_model = Sequential()
baseline_model.add( Conv2D(16, kernel_size= (3, 3), activation='relu', padding='same', input_shape=(INPUT_SIZE, INPUT_SIZE, 3)) )
baseline_model.add( Conv2D(32, kernel_size= (3, 3), activation='relu', padding='same') )
baseline_model.add( Conv2D(1, kernel_size=(5, 5), activation='sigmoid', padding='same') )
# baseline_model.summary()
# SVG(model_to_dot(baseline_model).create(prog='dot', format='svg'))
baseline_model.compile(Adam(lr=1e-3), bce_dice_loss, metrics=['accuracy', dice_coeff])
return baseline_model
def get_unet_128(input_shape=(128, 128, 3),
num_classes=1):
"""
Get a Unet based CNN model to compute mask given an input image if size(128X128).
"""
inputs = Input(shape=input_shape)
# 128
down1 = Conv2D(64, (3, 3), padding='same')(inputs)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1 = Conv2D(64, (3, 3), padding='same')(down1)
down1 = BatchNormalization()(down1)
down1 = Activation('relu')(down1)
down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
# 64
down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2 = Conv2D(128, (3, 3), padding='same')(down2)
down2 = BatchNormalization()(down2)
down2 = Activation('relu')(down2)
down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
# 32
down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3 = Conv2D(256, (3, 3), padding='same')(down3)
down3 = BatchNormalization()(down3)
down3 = Activation('relu')(down3)
down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
# 16
# down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
# down4 = BatchNormalization()(down4)
# down4 = Activation('relu')(down4)
# down4 = Conv2D(512, (3, 3), padding='same')(down4)
# down4 = BatchNormalization()(down4)
# down4 = Activation('relu')(down4)
# down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
# 8
center = Conv2D(512, (3, 3), padding='same')(down3_pool)
center = BatchNormalization()(center)
center = Activation('relu')(center)
center = Conv2D(512, (3, 3), padding='same')(center)
center = BatchNormalization()(center)
center = Activation('relu')(center)
# center
# up4 = UpSampling2D((2, 2))(center)
# up4 = concatenate([down4, up4], axis=3)
# up4 = Conv2D(512, (3, 3), padding='same')(up4)
# up4 = BatchNormalization()(up4)
# up4 = Activation('relu')(up4)
# up4 = Conv2D(512, (3, 3), padding='same')(up4)
# up4 = BatchNormalization()(up4)
# up4 = Activation('relu')(up4)
# up4 = Conv2D(512, (3, 3), padding='same')(up4)
# up4 = BatchNormalization()(up4)
# up4 = Activation('relu')(up4)
# 16
up3 = UpSampling2D((2, 2))(center)
up3 = concatenate([down3, up3], axis=3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
up3 = Conv2D(256, (3, 3), padding='same')(up3)
up3 = BatchNormalization()(up3)
up3 = Activation('relu')(up3)
# 32
up2 = UpSampling2D((2, 2))(up3)
up2 = concatenate([down2, up2], axis=3)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
up2 = Conv2D(128, (3, 3), padding='same')(up2)
up2 = BatchNormalization()(up2)
up2 = Activation('relu')(up2)
# 64
up1 = UpSampling2D((2, 2))(up2)
up1 = concatenate([down1, up1], axis=3)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
up1 = Conv2D(64, (3, 3), padding='same')(up1)
up1 = BatchNormalization()(up1)
up1 = Activation('relu')(up1)
# 128
classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up1)
model = Model(inputs=inputs, outputs=classify)
model.compile(optimizer=RMSprop(lr=0.0001), loss=bce_dice_loss, metrics=[dice_coeff])
return model
| 38.721212 | 133 | 0.637345 |
4740b7f79d18c7490290b025477dacf128a94124 | 20,674 | py | Python | examples/language-modeling/run_mlm.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 2 | 2021-04-18T07:58:07.000Z | 2021-07-14T01:50:45.000Z | examples/language-modeling/run_mlm.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 2 | 2021-06-22T23:35:09.000Z | 2022-02-22T21:40:11.000Z | examples/language-modeling/run_mlm.py | JadeMaveric/transformers | fb2b89840bf2ab9f74702bf83af8ddf92b61efb3 | [
"Apache-2.0"
] | 1 | 2020-11-02T06:37:04.000Z | 2020-11-02T06:37:04.000Z | #!/usr/bin/env python
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
https://huggingface.co/models?filter=masked-lm
"""
# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForLanguageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.5.0.dev0")
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"}
)
line_by_line: bool = field(
default=False,
metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_val_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of validation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub
#
# For CSV/JSON files, this script will use the column called 'text' or the first column. You can easily tweak this
# behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config_kwargs = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
elif model_args.model_name_or_path:
config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
tokenizer_kwargs = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.model_name_or_path:
model = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForMaskedLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warn(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --max_seq_length xxx."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if data_args.line_by_line:
# When using line_by_line, we just tokenize each nonempty line.
padding = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(examples):
# Remove empty lines
examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()]
return tokenizer(
examples["text"],
padding=padding,
truncation=True,
max_length=max_seq_length,
# We use this option because DataCollatorForLanguageModeling (see below) is more efficient when it
# receives the `special_tokens_mask`.
return_special_tokens_mask=True,
)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[text_column_name],
load_from_cache_file=not data_args.overwrite_cache,
)
else:
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more
# efficient when it receives the `special_tokens_mask`.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_special_tokens_mask=True)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
# max_seq_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
total_length = (total_length // max_seq_length) * max_seq_length
# Split by chunks of max_len.
result = {
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_train:
if "train" not in tokenized_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = tokenized_datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in tokenized_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = tokenized_datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
# Data collator
# This one will take care of randomly masking the tokens.
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability)
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
perplexity = math.exp(metrics["eval_loss"])
metrics["perplexity"] = perplexity
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 43.070833 | 119 | 0.672536 |
5b52ac519855ddc461ea4706864ee41c786a3ffc | 834 | py | Python | Recursion ana Comninatorial Problems/school_teams .py | borislavstoychev/Algorithms | 629f9d8df00399af85acf8893db1599e46db6013 | [
"MIT"
] | null | null | null | Recursion ana Comninatorial Problems/school_teams .py | borislavstoychev/Algorithms | 629f9d8df00399af85acf8893db1599e46db6013 | [
"MIT"
] | null | null | null | Recursion ana Comninatorial Problems/school_teams .py | borislavstoychev/Algorithms | 629f9d8df00399af85acf8893db1599e46db6013 | [
"MIT"
] | null | null | null | class Combinations:
def __init__(self, elements, num):
self.elements = elements
self.slots = elements[:num]
self.all = []
def solve(self, index=0, start=0):
if index >= len(self.slots):
self.all.append(self.slots[::])
else:
for i in range(start, len(self.elements)):
self.slots[index] = self.elements[i]
self.solve(index + 1, i + 1)
def main():
all_girls = input().split(", ")
all_boys = input().split(", ")
girls = Combinations(all_girls, 3)
boys = Combinations(all_boys, 2)
girls.solve()
boys.solve()
for comb_g in girls.all:
for comb_b in boys.all:
print(*comb_g + comb_b, sep=", ")
if __name__ == "__main__":
main()
# Lisa, Yoana, Marta, Rachel
# Georgi, Garry, Bob
| 23.828571 | 54 | 0.553957 |
701dffb3c6bae19815ddef6a3b981dc13bad0a3c | 113 | py | Python | to-doApp/todo/tasks/admin.py | patrickikhidero/todo-app | 58df1abbd7195685606c7261f828807cee565091 | [
"MIT"
] | null | null | null | to-doApp/todo/tasks/admin.py | patrickikhidero/todo-app | 58df1abbd7195685606c7261f828807cee565091 | [
"MIT"
] | null | null | null | to-doApp/todo/tasks/admin.py | patrickikhidero/todo-app | 58df1abbd7195685606c7261f828807cee565091 | [
"MIT"
] | null | null | null | from django.contrib import admin
# Register your models here.
from .models import *
admin.site.register(Task) | 16.142857 | 32 | 0.769912 |
7a58c3f40015bab1f5a653a0698e7e6e740fb2ce | 650 | py | Python | assignments/A3/sampleXMLReader.py | jajayongjia/366 | 6389723638cef07adbe28a9c7116bc4f71b0e29c | [
"MIT"
] | null | null | null | assignments/A3/sampleXMLReader.py | jajayongjia/366 | 6389723638cef07adbe28a9c7116bc4f71b0e29c | [
"MIT"
] | null | null | null | assignments/A3/sampleXMLReader.py | jajayongjia/366 | 6389723638cef07adbe28a9c7116bc4f71b0e29c | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
tree = ET.parse('items.xml')
root = tree.getroot()
# one specific item attribute
print('Item #2 attribute:')
print(root[0][1].attrib)
# all item attributes
print('\nAll attributes:')
for elem in root:
for subelem in elem:
print(subelem.attrib)
# one specific item's data
print('\nItem #2 data:')
print(root[0][1].text)
# all items data
print('\nAll item data:')
for elem in root:
for subelem in elem:
print(subelem.text)
$ python treeparser.py
Item #2 attribute:
item2
All attributes:
item1
item2
Item #2 data:
item2abc
All item data:
item1abc
item2abc | 17.105263 | 36 | 0.667692 |
418c4e7f29294a840c55f6cc5b6ee071d770b1c1 | 160 | py | Python | packs/kubernetes/actions/lib/action.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 164 | 2015-01-17T16:08:33.000Z | 2021-08-03T02:34:07.000Z | packs/kubernetes/actions/lib/action.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 442 | 2015-01-01T11:19:01.000Z | 2017-09-06T23:26:17.000Z | packs/kubernetes/actions/lib/action.py | userlocalhost2000/st2contrib | 1a5f759e76401743ed9023d298a3d767e3885db1 | [
"Apache-2.0"
] | 202 | 2015-01-13T00:37:40.000Z | 2020-11-07T11:30:10.000Z | from st2actions.runners.pythonrunner import Action
class BaseAction(Action):
def __init__(self, config):
super(BaseAction, self).__init__(config)
| 22.857143 | 50 | 0.75 |
1155efef9a424b4d8df87af86b1be2e2fe20197b | 2,332 | py | Python | nicos_mlz/sans1/setups/sample_table_2.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/sans1/setups/sample_table_2.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_mlz/sans1/setups/sample_table_2.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | description = 'top sample table devices'
includes = ['sample_table_1']
group = 'optional'
tango_base = 'tango://sans1hw.sans1.frm2:10000/sans1/table/'
devices = dict(
st2_z = device('nicos.devices.generic.Axis',
description = 'table 2 z axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-10, 10),
precision = 0.01,
motor = 'st2_zmot',
coder = 'st2_zenc',
),
st2_zmot = device('nicos.devices.tango.Motor',
description = 'sample table 2 z motor',
tangodevice = tango_base + 'st2_zmot',
fmtstr = '%.2f',
abslimits = (-10, 10),
lowlevel = True,
),
st2_zenc = device('nicos.devices.tango.Sensor',
description = 'sample table 2 z encoder',
tangodevice = tango_base + 'st2_zenc',
fmtstr = '%.2f',
lowlevel = True,
),
st2_y = device('nicos.devices.generic.Axis',
description = 'table 2 y axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-25, 25),
precision = 0.01,
motor = 'st2_ymot',
coder = 'st2_yenc',
),
st2_ymot = device('nicos.devices.tango.Motor',
description = 'sample table 2 y motor',
tangodevice = tango_base + 'st2_ymot',
fmtstr = '%.2f',
abslimits = (-25, 25),
lowlevel = True,
),
st2_yenc = device('nicos.devices.tango.Sensor',
description = 'sample table 2 y ',
tangodevice = tango_base + 'st2_yenc',
fmtstr = '%.2f',
lowlevel = True,
),
st2_x = device('nicos.devices.generic.Axis',
description = 'table 2 x axis',
pollinterval = 15,
maxage = 60,
fmtstr = '%.2f',
abslimits = (-25, 25),
precision = 0.01,
motor = 'st2_xmot',
coder = 'st2_xenc',
),
st2_xmot = device('nicos.devices.tango.Motor',
description = 'sample table 2 x motor',
tangodevice = tango_base + 'st2_xmot',
fmtstr = '%.2f',
abslimits = (-25, 25),
lowlevel = True,
),
st2_xenc = device('nicos.devices.tango.Sensor',
description = 'sample table 2 x encoder',
tangodevice = tango_base + 'st2_xenc',
fmtstr = '%.2f',
lowlevel = True,
),
)
| 29.15 | 60 | 0.540309 |
3cea607055b39d04c3c65d4bf19911316b73029e | 219 | py | Python | pipenv/__version__.py | gzzo/pipenv | 5ba7d7d168af3be99b14adeaac5b488bbacc7b68 | [
"MIT"
] | null | null | null | pipenv/__version__.py | gzzo/pipenv | 5ba7d7d168af3be99b14adeaac5b488bbacc7b68 | [
"MIT"
] | null | null | null | pipenv/__version__.py | gzzo/pipenv | 5ba7d7d168af3be99b14adeaac5b488bbacc7b68 | [
"MIT"
] | null | null | null |
# ___ ( ) ___ ___ __
# // ) ) / / // ) ) //___) ) // ) ) || / /
# //___/ / / / //___/ / // // / / || / /
# // / / // ((____ // / / ||/ /
__version__ = '11.0.2'
| 27.375 | 51 | 0.178082 |
fead937c561fee138cb13156a3e0c81a2343b9c8 | 10,251 | py | Python | ee/clickhouse/views/test/test_clickhouse_retention.py | lharress/posthog | 73809d54b14ffc1b6ad6f600e0e4f06ab3090cb1 | [
"MIT"
] | null | null | null | ee/clickhouse/views/test/test_clickhouse_retention.py | lharress/posthog | 73809d54b14ffc1b6ad6f600e0e4f06ab3090cb1 | [
"MIT"
] | null | null | null | ee/clickhouse/views/test/test_clickhouse_retention.py | lharress/posthog | 73809d54b14ffc1b6ad6f600e0e4f06ab3090cb1 | [
"MIT"
] | null | null | null | from dataclasses import asdict, dataclass
from typing import List, Literal, Optional, TypedDict, Union
from django.test import TestCase
from django.test.client import Client
from ee.clickhouse.test.test_journeys import _create_all_events, update_or_create_person
from ee.clickhouse.util import ClickhouseTestMixin
from ee.clickhouse.views.test.funnel.util import EventPattern
from posthog.api.test.test_organization import create_organization
from posthog.api.test.test_team import create_team
from posthog.api.test.test_user import create_user
from posthog.test.base import test_with_materialized_columns
from posthog.utils import encode_get_request_params
class RetentionTests(TestCase, ClickhouseTestMixin):
def test_can_get_retention_cohort_breakdown(self):
organization = create_organization(name="test")
team = create_team(organization=organization)
user = create_user(email="test@posthog.com", password="1234", organization=organization)
self.client.force_login(user)
update_or_create_person(distinct_ids=["person 1"], team_id=team.pk)
update_or_create_person(distinct_ids=["person 2"], team_id=team.pk)
update_or_create_person(distinct_ids=["person 3"], team_id=team.pk)
setup_user_activity_by_day(
daily_activity={
"2020-01-01": {"person 1": [{"event": "target event"}], "person 2": [{"event": "target event"}]},
"2020-01-02": {"person 1": [{"event": "target event"}], "person 3": [{"event": "target event"}]},
"2020-01-03": {"person 1": [{"event": "target event"}], "person 3": [{"event": "target event"}]},
},
team=team,
)
retention = get_retention_ok(
client=self.client,
team_id=team.pk,
request=RetentionRequest(
target_entity={"id": "target event", "type": "events"},
returning_entity={"id": "target event", "type": "events"},
date_from="2020-01-01",
total_intervals=2,
date_to="2020-01-02",
period="Day",
retention_type="retention_first_time",
),
)
retention_by_cohort_by_period = get_by_cohort_by_period_from_response(response=retention)
self.assertEqual(
retention_by_cohort_by_period,
{
"Day 0": {"1": 2, "2": 1,}, # ["person 1", "person 2"] # ["person 1"]
"Day 1": {"1": 1}, # ["person 3"]
},
)
@test_with_materialized_columns(person_properties=["os"])
def test_can_specify_breakdown_person_property(self):
"""
By default, we group users together by the first time they perform the
`target_event`. However, we should also be able to specify, e.g. the
users OS to be able to compare retention between the OSs.
"""
organization = create_organization(name="test")
team = create_team(organization=organization)
user = create_user(email="test@posthog.com", password="1234", organization=organization)
self.client.force_login(user)
update_or_create_person(distinct_ids=["person 1"], team_id=team.pk, properties={"os": "Chrome"})
update_or_create_person(distinct_ids=["person 2"], team_id=team.pk, properties={"os": "Safari"})
setup_user_activity_by_day(
daily_activity={
"2020-01-01": {"person 1": [{"event": "target event"}]},
"2020-01-02": {"person 1": [{"event": "target event"}], "person 2": [{"event": "target event"}]},
# IMPORTANT: we include data past the end of the requested
# window, as we want to ensure that we pick up all retention
# periods for a user. e.g. for "person 2" we do not want to miss
# the count from 2020-01-03 e.g. the second period, otherwise we
# will skew results for users that didn't perform their target
# event right at the beginning of the requested range.
"2020-01-03": {"person 1": [{"event": "target event"}], "person 2": [{"event": "target event"}]},
},
team=team,
)
retention = get_retention_ok(
client=self.client,
team_id=team.pk,
request=RetentionRequest(
target_entity={"id": "target event", "type": "events"},
returning_entity={"id": "target event", "type": "events"},
date_from="2020-01-01",
total_intervals=2,
date_to="2020-01-02",
period="Day",
retention_type="retention_first_time",
breakdowns=[Breakdown(type="person", property="os")],
# NOTE: we need to specify breakdown_type as well, as the
# breakdown logic currently does not support multiple differing
# types
breakdown_type="person",
),
)
retention_by_cohort_by_period = get_by_cohort_by_period_from_response(response=retention)
self.assertEqual(
retention_by_cohort_by_period,
{
"Chrome": {"1": 1, "2": 1},
"Safari": {"1": 1, "2": 1}, # IMPORTANT: the "2" value is from past the requested `date_to`
},
)
@test_with_materialized_columns(event_properties=["os"])
def test_can_specify_breakdown_event_property(self):
"""
By default, we group users together by the first time they perform the
`target_event`. However, we should also be able to specify, e.g. the
users OS to be able to compare retention between the OSs.
"""
organization = create_organization(name="test")
team = create_team(organization=organization)
user = create_user(email="test@posthog.com", password="1234", organization=organization)
self.client.force_login(user)
update_or_create_person(distinct_ids=["person 1"], team_id=team.pk)
update_or_create_person(distinct_ids=["person 2"], team_id=team.pk)
setup_user_activity_by_day(
daily_activity={
"2020-01-01": {"person 1": [{"event": "target event", "properties": {"os": "Chrome"}}]},
"2020-01-02": {
"person 1": [{"event": "target event"}],
"person 2": [{"event": "target event", "properties": {"os": "Safari"}}],
},
# IMPORTANT: we include data past the end of the requested
# window, as we want to ensure that we pick up all retention
# periods for a user. e.g. for "person 2" we do not want to miss
# the count from 2020-01-03 e.g. the second period, otherwise we
# will skew results for users that didn't perform their target
# event right at the beginning of the requested range.
"2020-01-03": {"person 1": [{"event": "target event"}], "person 2": [{"event": "target event"}]},
},
team=team,
)
retention = get_retention_ok(
client=self.client,
team_id=team.pk,
request=RetentionRequest(
target_entity={"id": "target event", "type": "events"},
returning_entity={"id": "target event", "type": "events"},
date_from="2020-01-01",
total_intervals=2,
date_to="2020-01-02",
period="Day",
retention_type="retention_first_time",
breakdowns=[Breakdown(type="event", property="os")],
# NOTE: we need to specify breakdown_type as well, as the
# breakdown logic currently does not support multiple differing
# types
breakdown_type="event",
),
)
retention_by_cohort_by_period = get_by_cohort_by_period_from_response(response=retention)
self.assertEqual(
retention_by_cohort_by_period,
{
"Chrome": {"1": 1, "2": 1},
"Safari": {"1": 1, "2": 1}, # IMPORTANT: the "2" value is from past the requested `date_to`
},
)
def setup_user_activity_by_day(daily_activity, team):
_create_all_events(
[
{"distinct_id": person_id, "team": team, "timestamp": timestamp, **event}
for timestamp, people in daily_activity.items()
for person_id, events in people.items()
for event in events
]
)
@dataclass
class Breakdown:
type: str
property: str
@dataclass
class RetentionRequest:
date_from: str # From what I can tell, this doesn't do anything, rather `total_intervals` is used
total_intervals: int
date_to: str
target_entity: EventPattern
returning_entity: EventPattern
period: Union[Literal["Hour"], Literal["Day"], Literal["Week"], Literal["Month"]]
retention_type: Literal["retention_first_time"] # probably not an exhaustive list
breakdowns: Optional[List[Breakdown]] = None
breakdown_type: Optional[Literal["person", "event"]] = None
class Value(TypedDict):
count: int
class Cohort(TypedDict):
values: List[Value]
date: str
label: str
class RetentionResponse(TypedDict):
result: List[Cohort]
def get_retention_ok(client: Client, team_id: int, request: RetentionRequest) -> RetentionResponse:
response = get_retention(client=client, team_id=team_id, request=request)
assert response.status_code == 200, response.content
return response.json()
def get_retention(client: Client, team_id: int, request: RetentionRequest):
return client.get(
f"/api/projects/{team_id}/insights/retention/",
# NOTE: for get requests we need to JSON encode non-scalars
data=encode_get_request_params(asdict(request)),
)
def get_by_cohort_by_period_from_response(response: RetentionResponse):
return {
cohort["label"]: {f"{period + 1}": value["count"] for period, value in enumerate(cohort["values"])}
for cohort in response["result"]
}
| 41.004 | 113 | 0.605697 |
f27a7d996908d4ec208b981a0efb74158f38928f | 2,226 | py | Python | autotest/gdrivers/lan.py | rcoup/gdal | 31240deb7b71d990a2abbad1bebedd0918989ca0 | [
"MIT"
] | null | null | null | autotest/gdrivers/lan.py | rcoup/gdal | 31240deb7b71d990a2abbad1bebedd0918989ca0 | [
"MIT"
] | null | null | null | autotest/gdrivers/lan.py | rcoup/gdal | 31240deb7b71d990a2abbad1bebedd0918989ca0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test LAN driver
# Author: Even Rouault, <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append('../pymod')
import gdaltest
###############################################################################
# Test reading a - fake - LAN 8 bit dataset
def lan_1():
tst = gdaltest.GDALTest('LAN', 'fakelan.lan', 1, 10)
return tst.testOpen()
###############################################################################
# Test reading a - fake - LAN 4 bit dataset
def lan_2():
tst = gdaltest.GDALTest('LAN', 'fakelan4bit.lan', 1, 10)
return tst.testOpen()
gdaltest_list = [
lan_1,
lan_2]
if __name__ == '__main__':
gdaltest.setup_run('lan')
gdaltest.run_tests(gdaltest_list)
sys.exit(gdaltest.summarize())
| 33.223881 | 79 | 0.60018 |
48bfe580870846c9a1c53b048d23c10f1821335b | 251 | py | Python | example/demo/scripts/__init__.py | iwwxiong/flask_restapi | 57fca3bf07d913b31b6b7ef877328b0e07056c39 | [
"MIT"
] | 6 | 2019-04-23T02:18:55.000Z | 2019-12-10T13:16:21.000Z | example/demo/scripts/__init__.py | dracarysX/flask_scaffold | 57fca3bf07d913b31b6b7ef877328b0e07056c39 | [
"MIT"
] | null | null | null | example/demo/scripts/__init__.py | dracarysX/flask_scaffold | 57fca3bf07d913b31b6b7ef877328b0e07056c39 | [
"MIT"
] | 3 | 2019-05-22T06:00:17.000Z | 2020-01-14T17:02:35.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from demo.author.models import Author
from demo.book.models import Book
def create_tables():
"""
创建数据库table
:return:
"""
Author.create_table()
Book.create_table()
return
| 13.210526 | 37 | 0.625498 |
300205b96b83908b58c5852c6cc22a259bddcd03 | 2,333 | py | Python | tests/configuration.py | KarthikKothareddy/AirFlow | faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2 | 2018-03-07T12:25:05.000Z | 2018-03-19T01:00:10.000Z | tests/configuration.py | KarthikKothareddy/AirFlow | faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | tests/configuration.py | KarthikKothareddy/AirFlow | faaf0b8b4467bcf5bff4a5b49086a9e02cb9c112 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 3 | 2020-11-25T15:01:28.000Z | 2021-04-12T04:09:41.000Z | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import six
from airflow import configuration
from airflow.configuration import conf
class ConfTest(unittest.TestCase):
def setup(self):
configuration.load_test_config()
def test_env_var_config(self):
opt = conf.get('testsection', 'testkey')
self.assertEqual(opt, 'testvalue')
def test_conf_as_dict(self):
cfg_dict = conf.as_dict()
# test that configs are picked up
self.assertEqual(cfg_dict['core']['unit_test_mode'], 'True')
# test env vars
self.assertEqual(cfg_dict['testsection']['testkey'], '< hidden >')
# test display_source
cfg_dict = conf.as_dict(display_source=True)
self.assertEqual(
cfg_dict['core']['load_examples'][1], 'airflow config')
self.assertEqual(
cfg_dict['testsection']['testkey'], ('< hidden >', 'env var'))
# test display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True)
self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
# test display_source and display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True, display_source=True)
self.assertEqual(
cfg_dict['testsection']['testkey'], ('testvalue', 'env var'))
def test_broker_transport_options(self):
section_dict = conf.getsection("celery_broker_transport_options")
self.assertTrue(isinstance(section_dict['visibility_timeout'], int))
self.assertTrue(isinstance(section_dict['_test_only_bool'], bool))
self.assertTrue(isinstance(section_dict['_test_only_float'], float))
self.assertTrue(isinstance(section_dict['_test_only_string'], six.string_types))
| 34.820896 | 88 | 0.696528 |
7cbfda823aed130382fc862d7d4dd28f4f00d6bd | 465 | py | Python | ARFshock_label/config.py | MLD3/MLHC2019_Relaxed_Parameter_Sharing | 152fe3e5069eee91b88a144912eb31ac2fd809cd | [
"MIT"
] | 1 | 2021-05-05T02:06:08.000Z | 2021-05-05T02:06:08.000Z | ARFshock_label/config.py | MLD3/MLHC2019_Relaxed_Parameter_Sharing | 152fe3e5069eee91b88a144912eb31ac2fd809cd | [
"MIT"
] | null | null | null | ARFshock_label/config.py | MLD3/MLHC2019_Relaxed_Parameter_Sharing | 152fe3e5069eee91b88a144912eb31ac2fd809cd | [
"MIT"
] | null | null | null | import os, yaml
with open(os.path.join(os.path.dirname(__file__), 'config.yaml')) as f:
config = yaml.full_load(f)
data_path = os.path.join(os.path.dirname(__file__), config['data_path'])
mimic3_path = os.path.join(os.path.dirname(__file__), config['mimic3_path'])
ID_col = config['column_names']['ID']
t_col = config['column_names']['t']
var_col = config['column_names']['var_name']
val_col = config['column_names']['var_value']
parallel = True
n_jobs = 72
| 31 | 76 | 0.72043 |
fcaefa092df2280fcdb3f11a5301bc1cf8473809 | 1,496 | py | Python | setup.py | JosueDLA/ShapeDetection | caf79ddc97888f69d829d84d2ee0a6599a7a5f2c | [
"MIT"
] | null | null | null | setup.py | JosueDLA/ShapeDetection | caf79ddc97888f69d829d84d2ee0a6599a7a5f2c | [
"MIT"
] | null | null | null | setup.py | JosueDLA/ShapeDetection | caf79ddc97888f69d829d84d2ee0a6599a7a5f2c | [
"MIT"
] | null | null | null | """
# Color Thief
A module for detecting shapes using OpenCV.
Installation
```bash
pip install shape-detection-jdla
```
# Overview
Square detection rules:
- There must be four corners
- All four lines must be the same length
- All four corners must be 90°
- Lines AB and CD must be horizontal lines
- Lines AC and BC must be vertical lines
- The contour must be concave
# Example
```python
from shape_detection.square import Square
import numpy as np
try:
contour = np.array([[[368, 160]], [[391, 163]],
[[384, 200]], [[361, 194]]])
square = Square.is_square(contour)
print(square)
except Exception as e:
print(e)
```
# Links
* [github](https://github.com/JosueDLA/ShapeDetection)
"""
from setuptools import find_packages
from setuptools import setup
setup(
name="shape-detection-jdla",
version="0.0.3",
py_modules=["shape_detection"],
description="Shape detection using OpenCV",
long_description_content_type="text/markdown",
long_description=__doc__,
author="Josué de León",
author_email="josuedlavs@gmail.com",
url="https://github.com/JosueDLA/ShapeDetection",
license='MIT',
packages=find_packages(),
install_requires=[
"numpy==1.22.3",
"opencv-python==4.5.5.64",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.10',
)
| 20.493151 | 54 | 0.659091 |
62499a8380249a4f60cc4ffa77aafbe26530f172 | 548 | py | Python | newbeginning/opensource/correction/correctedstate.py | arnavkapoor/fsmresults | 96daf7e86ed58fea2d7cbbe9364a866c7a548a3e | [
"Apache-2.0"
] | null | null | null | newbeginning/opensource/correction/correctedstate.py | arnavkapoor/fsmresults | 96daf7e86ed58fea2d7cbbe9364a866c7a548a3e | [
"Apache-2.0"
] | null | null | null | newbeginning/opensource/correction/correctedstate.py | arnavkapoor/fsmresults | 96daf7e86ed58fea2d7cbbe9364a866c7a548a3e | [
"Apache-2.0"
] | null | null | null | import os
path = '/home/arnav/fsmresults/newbeginning/opensource/opensrcexpanded'
for folder, sub_folders, files in os.walk(path):
for special_file in files:
file_path = os.path.join(folder, special_file)
filename=file_path.rsplit('/')[-1]
with open(file_path, 'r+') as read_file:
states = 0
for line in read_file:
line = line.strip().split()
if(len(line) == 4):
states = max(states,int(line[1]),int(line[2]))
print(states,filename) | 42.153846 | 71 | 0.583942 |
a3d34ef5ee377ed29607508d27ceceeb63edbe9c | 3,888 | py | Python | utils/utils.py | Jintao-Huang/EfficientNet_PyTorch | 4682c49b60d77f104b021b9c555e804bac8a5bbf | [
"Apache-2.0"
] | 8 | 2020-10-26T03:31:05.000Z | 2021-11-12T07:02:59.000Z | utils/utils.py | Jintao-Huang/efficientnet_pytorch | 4682c49b60d77f104b021b9c555e804bac8a5bbf | [
"Apache-2.0"
] | 2 | 2020-06-01T07:38:10.000Z | 2021-04-11T15:39:16.000Z | utils/utils.py | Jintao-Huang/efficientnet_pytorch | 4682c49b60d77f104b021b9c555e804bac8a5bbf | [
"Apache-2.0"
] | 2 | 2020-11-15T14:38:56.000Z | 2021-05-29T08:29:20.000Z | # Author: Jintao Huang
# Time: 2020-5-24
import pickle
import hashlib
import torch
import numpy as np
from torch.backends import cudnn
import os
def save_to_pickle(data, filepath):
"""$"""
with open(filepath, "wb") as f:
pickle.dump(data, f)
def load_from_pickle(filepath):
"""$"""
with open(filepath, "rb") as f:
obj = pickle.load(f)
return obj
def calculate_hash(filepath):
sha256 = hashlib.sha256()
with open(filepath, "rb") as f:
while True:
buffer = f.read(4096)
if not buffer:
break
sha256.update(buffer)
digest = sha256.hexdigest()
return digest[:8]
def set_seed(seed=0):
"""网络重现"""
torch.manual_seed(seed)
np.random.seed(seed)
# 取消cudnn加速时的省略精度产生的随机性
cudnn.deterministic = True
# cudnn.benchmark = True # if benchmark == True, deterministic will be False
def save_params(model, filepath):
torch.save(model.state_dict(), filepath)
def load_params(model, filepath, prefix="", drop_layers=(), strict=True):
"""
:param model: 变
:param filepath: str
:param prefix: 在pth的state_dict加上前缀.
:param drop_layers: 对加完前缀后的pth进行剔除.
:param strict: bool
"""
load_state_dict = torch.load(filepath)
# 1. 加前缀
if prefix:
for key in list(load_state_dict.keys()):
load_state_dict[prefix + key] = load_state_dict.pop(key)
# 2. drop
for key in list(load_state_dict.keys()):
for layer in drop_layers:
if layer in key:
load_state_dict.pop(key)
break
return model.load_state_dict(load_state_dict, strict)
def load_params_by_order(model, filepath, strict=True):
"""The parameter name of the pre-training model is different from the parameter name of the model"""
load_state_dict = torch.load(filepath)
# --------------------- 算法
load_keys = list(load_state_dict.keys())
model_keys = list(model.state_dict().keys())
assert len(load_keys) == len(model_keys)
# by order
for load_key, model_key in zip(load_keys, model_keys):
load_state_dict[model_key] = load_state_dict.pop(load_key)
return model.load_state_dict(load_state_dict, strict)
# 选择执行设备
def select_device(device, batch_size=None):
"""copy from yolov5. https://github.com/ultralytics/yolov5"""
# device = 'cpu' or '0' or '0,1,2,3'
s = f'EfficientNet torch {torch.__version__} ' # string
cpu = device.lower() == 'cpu'
if cpu:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False
elif device: # non-cpu device requested
os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable
assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability
cuda = not cpu and torch.cuda.is_available()
if cuda:
n = torch.cuda.device_count()
if n > 1 and batch_size: # check that batch_size is compatible with device_count
assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
space = ' ' * len(s)
for i, d in enumerate(device.split(',') if device else range(n)):
p = torch.cuda.get_device_properties(i)
s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB
else:
s += 'CPU\n'
print(s) # emoji-safe
return torch.device('cuda:0' if cuda else 'cpu')
def processing(x, target, transform=None):
"""
:param x: numpy, shape[H, W, C]. RGB
:return: Tensor, shape[C, H ,W]. RGB. 0-255
"""
if transform is not None:
x, target = transform(x, target)
x = x[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to (C, H, W)
x = np.ascontiguousarray(x)
x = torch.from_numpy(x)
return x, target
| 30.375 | 118 | 0.627058 |
83468c5f5e0147717f0d0e1cdda84661be58b8f2 | 5,920 | py | Python | minemeld/flask/config.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 147 | 2016-07-22T18:15:49.000Z | 2022-03-26T23:32:44.000Z | minemeld/flask/config.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 167 | 2016-07-27T07:02:25.000Z | 2021-12-16T16:26:52.000Z | minemeld/flask/config.py | zul126/minemeld-core | 2eb9b9bfd7654aee57aabd5fb280d4e89a438daf | [
"Apache-2.0"
] | 112 | 2016-07-22T07:14:29.000Z | 2022-03-24T18:43:12.000Z | # Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import gevent
import yaml
import filelock
import passlib.apache
from . import utils
from .logger import LOG
CONFIG = {}
API_CONFIG_PATH = None
API_CONFIG_LOCK = None
CONFIG_FILES_RE = '^(?:(?:[0-9]+.*\.yml)|(?:.*\.htpasswd))$'
# if you change things here change also backup/import API
_AUTH_DBS = {
'USERS_DB': 'wsgi.htpasswd',
'FEEDS_USERS_DB': 'feeds.htpasswd'
}
def get(key, default=None):
try:
result = CONFIG[key]
except KeyError:
pass
else:
return result
try:
result = os.environ[key]
except KeyError:
pass
else:
if result == 'False':
result = False
if result == 'True':
result = True
return result
return default
def store(file, value):
with API_CONFIG_LOCK.acquire():
with open(os.path.join(API_CONFIG_PATH, file), 'w+') as f:
yaml.safe_dump(value, stream=f)
def lock():
return API_CONFIG_LOCK.acquire()
class APIConfigDict(object):
def __init__(self, attribute, level=50):
self.attribute = attribute
self.filename = '%d-%s.yml' % (level, attribute.lower().replace('_', '-'))
def set(self, key, value):
curvalues = get(self.attribute, {})
curvalues[key] = value
store(self.filename, {self.attribute: curvalues})
def delete(self, key):
curvalues = get(self.attribute, {})
curvalues.pop(key, None)
store(self.filename, {self.attribute: curvalues})
def value(self):
return get(self.attribute, {})
def _load_config(config_path):
global CONFIG
new_config = {}
# comptaibilty early releases where all the config
# was store in a single file
old_config_file = os.path.join(config_path, 'wsgi.yml')
if os.path.exists(old_config_file):
try:
with open(old_config_file, 'r') as f:
add_config = yaml.safe_load(f)
if add_config is not None:
new_config.update(add_config)
except OSError:
pass
with API_CONFIG_LOCK.acquire():
api_config_path = os.path.join(config_path, 'api')
if os.path.exists(api_config_path):
config_files = sorted(os.listdir(api_config_path))
for cf in config_files:
if not cf.endswith('.yml'):
continue
try:
with open(os.path.join(api_config_path, cf), 'r') as f:
add_config = yaml.safe_load(f)
if add_config is not None:
new_config.update(add_config)
except (OSError, IOError, ValueError):
LOG.exception('Error loading config file %s' % cf)
CONFIG = new_config
LOG.info('Config loaded: %r', new_config)
def _load_auth_dbs(config_path):
with API_CONFIG_LOCK.acquire():
api_config_path = os.path.join(config_path, 'api')
for env, default in _AUTH_DBS.iteritems():
dbname = get(env, default)
new_db = False
dbpath = os.path.join(
api_config_path,
dbname
)
# for compatibility with old releases
if not os.path.exists(dbpath):
old_dbpath = os.path.join(
config_path,
dbname
)
if os.path.exists(old_dbpath):
dbpath = old_dbpath
else:
new_db = True
CONFIG[env] = passlib.apache.HtpasswdFile(
path=dbpath,
new=new_db
)
LOG.info('%s loaded from %s', env, dbpath)
def _config_monitor(config_path):
api_config_path = os.path.join(config_path, 'api')
dirsnapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE)
while True:
try:
with API_CONFIG_LOCK.acquire(timeout=600):
new_snapshot = utils.DirSnapshot(api_config_path, CONFIG_FILES_RE)
if new_snapshot != dirsnapshot:
try:
_load_config(config_path)
_load_auth_dbs(config_path)
except gevent.GreenletExit:
break
except:
LOG.exception('Error loading config')
dirsnapshot = new_snapshot
except filelock.Timeout:
LOG.error('Timeout locking config in config monitor')
gevent.sleep(1)
# initialization
def init():
global API_CONFIG_PATH
global API_CONFIG_LOCK
config_path = os.environ.get('MM_CONFIG', None)
if config_path is None:
LOG.critical('MM_CONFIG environment variable not set')
raise RuntimeError('MM_CONFIG environment variable not set')
if not os.path.isdir(config_path):
config_path = os.path.dirname(config_path)
# init global vars
API_CONFIG_PATH = os.path.join(config_path, 'api')
API_CONFIG_LOCK = filelock.FileLock(
os.environ.get('API_CONFIG_LOCK', '/var/run/minemeld/api-config.lock')
)
_load_config(config_path)
_load_auth_dbs(config_path)
if config_path is not None:
gevent.spawn(_config_monitor, config_path)
| 27.663551 | 82 | 0.595439 |
3ca11c5b9dea1d2a3b7f3f26776e22f1642f2849 | 1,191 | py | Python | examples/sitemapindex_and_generator.py | isabella232/resync | 6e9ddfa83087a0c122f72a6cc375c490f758b016 | [
"Apache-2.0"
] | 1 | 2016-11-30T18:08:02.000Z | 2016-11-30T18:08:02.000Z | examples/sitemapindex_and_generator.py | EHRI/resync | 6e9ddfa83087a0c122f72a6cc375c490f758b016 | [
"Apache-2.0"
] | 1 | 2021-06-22T08:24:40.000Z | 2021-06-22T08:24:40.000Z | examples/sitemapindex_and_generator.py | isabella232/resync | 6e9ddfa83087a0c122f72a6cc375c490f758b016 | [
"Apache-2.0"
] | 1 | 2021-06-22T08:22:25.000Z | 2021-06-22T08:22:25.000Z | #!/usr/bin/env python
from resync.resource import Resource
from resync.resource_list import ResourceList
from resync.list_base_with_index import ListBaseIndexError
import sys
try:
max_sitemap_entries = int(sys.argv[1])
except:
max_sitemap_entries = 2
print("### max_sitemap_entries=%d" % max_sitemap_entries)
basename = 'http://example.com/resourcelist.xml'
my_resources = [ Resource('a'), Resource('b'), Resource('c') ]
def my_resource_list():
"""Simulate the generator used by simulator"""
rl = ResourceList( resources=iter(my_resources), count=len(my_resources) )
rl.max_sitemap_entries = max_sitemap_entries
return(rl)
print("\n### ResourceListHandler")
rl = my_resource_list()
if (rl.requires_multifile):
print("Single sitemap:")
else:
print("Sitemapindex:")
print(rl.as_xml( allow_multifile=True, basename=basename ))
print("\n### ResourceListPartHandler")
rl = my_resource_list()
num_parts=rl.requires_multifile()
if (num_parts):
for part_number in range(0,num_parts):
rl = my_resource_list()
print("Part %d:" % (part_number))
print(rl.as_xml_part( basename=basename, part_number=0 ))
else:
print("404 - no parts")
| 28.357143 | 78 | 0.72628 |
d64a8bebf5d8656a86b858cc847e82484a662966 | 914 | py | Python | sd_range_slider/__init__.py | mjclawar/sd-range-slider | 9c6f28d58c0d136826b8c91644b3759948a67af0 | [
"MIT"
] | 2 | 2018-11-22T08:14:17.000Z | 2020-10-21T22:34:19.000Z | sd_range_slider/__init__.py | mjclawar/sd-range-slider | 9c6f28d58c0d136826b8c91644b3759948a67af0 | [
"MIT"
] | 6 | 2017-12-04T14:47:19.000Z | 2019-07-12T14:56:49.000Z | sd_range_slider/__init__.py | mjclawar/sd-range-slider | 9c6f28d58c0d136826b8c91644b3759948a67af0 | [
"MIT"
] | 1 | 2020-10-21T22:34:30.000Z | 2020-10-21T22:34:30.000Z | import os as _os
import dash as _dash
import sys as _sys
from .version import __version__
_current_path = _os.path.dirname(_os.path.abspath(__file__))
_components = _dash.development.component_loader.load_components(
_os.path.join(_current_path, 'metadata.json'),
'sd_range_slider'
)
_this_module = _sys.modules[__name__]
_js_dist = [
{
"relative_package_path": "bundle.js",
"external_url": (
"https://unpkg.com/sd-range-slider@{}"
"/sd_range_slider/bundle.js"
).format(__version__),
"namespace": "sd_range_slider"
}
]
_css_dist = [
{
"external_url": [
"https://unpkg.com/rc-slider@6.1.2/assets/index.css",
],
}
]
for _component in _components:
setattr(_this_module, _component.__name__, _component)
setattr(_component, '_js_dist', _js_dist)
setattr(_component, '_css_dist', _css_dist)
| 22.85 | 65 | 0.657549 |
22cc1857f2aa0aad436de60e6a9dcea9365abce9 | 3,268 | py | Python | x_tef/flux_qe_salt_budget.py | parkermac/LiveOcean | bef3e1e729ada1069853dd4f57f79f452b54f4fa | [
"MIT"
] | 4 | 2015-06-09T18:53:11.000Z | 2021-08-19T01:39:38.000Z | x_tef/flux_qe_salt_budget.py | parkermac/LiveOcean | bef3e1e729ada1069853dd4f57f79f452b54f4fa | [
"MIT"
] | null | null | null | x_tef/flux_qe_salt_budget.py | parkermac/LiveOcean | bef3e1e729ada1069853dd4f57f79f452b54f4fa | [
"MIT"
] | 1 | 2017-03-07T01:28:49.000Z | 2017-03-07T01:28:49.000Z | """
Calculates an exchange-flow oriented salt budget from TEF terms, and
explores dynamical scaling:
does Qe behave as expected relative to dSbar_dx and K?
"""
# imports
import matplotlib.pyplot as plt
import numpy as np
import pickle
import pandas as pd
from datetime import datetime, timedelta
import os; import sys
sys.path.append(os.path.abspath('../alpha'))
import Lfun
import zrfun
import zfun
import tef_fun
import flux_fun
from importlib import reload
reload(flux_fun)
from time import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-g', '--gridname', type=str, default='cas6')
parser.add_argument('-t', '--tag', type=str, default='v3')
parser.add_argument('-x', '--ex_name', type=str, default='lo8b')
parser.add_argument('-y', '--year', type=int, default=2017)
parser.add_argument('-v', '--volume', type=str, default='Puget Sound')
args = parser.parse_args()
which_vol = args.volume
year_str = str(args.year)
# Get Ldir
Ldir = Lfun.Lstart(args.gridname, args.tag)
gtagex = args.gridname + '_' + args.tag + '_' + args.ex_name
# select input/output location
run_name = gtagex+'_'+year_str+'.01.01_'+year_str+'.12.31'
indir00 = Ldir['LOo'] + 'tef/'
indir0 = indir00 + run_name + '/'
indir = indir0 + 'flux/'
# load low passed segment volume and net salt DataFrames
v_lp_df = pd.read_pickle(indir + 'daily_segment_volume.p')
sv_lp_df = pd.read_pickle(indir + 'daily_segment_net_salt.p')
# get volumes
voldir = indir00 + 'volumes_' + Ldir['gridname'] + '/'
v_df = pd.read_pickle(voldir + 'volumes.p')
# get section definitions
sect_df = tef_fun.get_sect_df()
if which_vol == 'Salish Sea':
seg_list = list(v_lp_df.columns)
sect_sign_dict = {'jdf1':1, 'sog5':-1}
elif which_vol == 'Puget Sound':
seg_list = (flux_fun.ssA + flux_fun.ssM + flux_fun.ssT
+ flux_fun.ssS + flux_fun.ssW + flux_fun.ssH)
sect_sign_dict = {'ai1':1, 'dp':1}
elif which_vol == 'Hood Canal':
seg_list = flux_fun.ssH
sect_sign_dict = {'hc1':1}
elif which_vol == 'South Sound':
seg_list = flux_fun.ssT + flux_fun.ssS
sect_sign_dict = {'tn1':1}
v_lp_df = v_lp_df[seg_list]
sv_lp_df = sv_lp_df[seg_list]
river_list = []
for seg_name in seg_list:
seg = flux_fun.segs[seg_name]
river_list = river_list + seg['R']
riv_df = pd.read_pickle(Ldir['LOo'] + 'river/'
+ Ldir['gtag'] + '_'+year_str+'.01.01_'+year_str+'.12.31.p')
riv_df.index += timedelta(days=0.5)
riv_df = riv_df[river_list]
tef_df_dict = {}
for sn in sect_sign_dict.keys():
in_sign = sect_sign_dict[sn]
tef_df_dict[sn] = flux_fun.get_fluxes(indir0, sn, in_sign=in_sign)
vol_df, salt_df, vol_rel_err, salt_rel_err, salt_rel_err_qe = flux_fun.get_budgets(
sv_lp_df, v_lp_df, riv_df, tef_df_dict, seg_list)
plt.close('all')
fig = plt.figure(figsize=(15,10))
ax = fig.add_subplot(111)
salt_df[['dSnet_dt','-QrSbar','QeDS','Error']].plot(ax=ax, grid=True, legend=False)
ax.set_title('%s Salt Budget $(g\ kg^{-1}\ m^{3}s^{-1})$' % (which_vol))
ax.set_xlim(salt_df.index[0], salt_df.index[-1])
legh = ax.legend(labels=['$d S_{net} / dt$', '$Q_{net} S_{bar}$', '$Q_e \Delta S$', 'Error'])
ax.text(.05,.9, 'Mean Error / Mean QeDS = %0.2f%%' % (salt_rel_err_qe*100), transform=ax.transAxes, fontsize=14)
plt.show()
| 30.830189 | 112 | 0.69492 |
8adff3eaf2e607b45a4a743bdd891f7efa583ac3 | 1,430 | py | Python | gviewer/controller.py | chhsiao90/gviewer | ae34b13236635356b7a7c68448a3c2529e700c8c | [
"MIT"
] | 11 | 2018-12-20T12:00:25.000Z | 2021-09-22T16:14:45.000Z | gviewer/controller.py | chhsiao90/gviewer | ae34b13236635356b7a7c68448a3c2529e700c8c | [
"MIT"
] | 39 | 2016-06-28T14:40:09.000Z | 2016-09-13T09:20:25.000Z | gviewer/controller.py | chhsiao90/gviewer | ae34b13236635356b7a7c68448a3c2529e700c8c | [
"MIT"
] | 1 | 2018-09-07T11:37:20.000Z | 2018-09-07T11:37:20.000Z | class Controller(object):
"""Controller provide UI interaction API
Attributes:
parent: ParentFrame instance
"""
def __init__(self, parent):
self.parent = parent
def open_view(self, widget, push_prev=True):
"""Open view
Args:
widget: a urwid Widget implementation
push_prev: bool defined that should push previous widget
into history
"""
self.parent.open_view(widget, push_prev=push_prev)
def open_view_by_context(self, context):
"""Open view by defined context
Args:
context: DisplayerContext
"""
self.parent.open_view_by_context(context)
def notify(self, message):
"""Notify a message"""
self.parent.notify(message)
def open_edit(self, widget):
"""Open edit box in footer"""
self.parent.open_edit(widget)
def close_edit(self):
"""Close edit box"""
self.parent.close_edit()
def open_error(self):
"""Open error stacktrace view"""
self.parent.open_error()
def back(self):
"""Back to previous view"""
self.parent.back()
def _update_info(self, widget, info):
self.parent.update_info(widget, info)
def _focus_body(self):
self.parent.focus_position = "body"
def _run_before_keypress(self):
self.parent.run_before_keypress()
| 25.535714 | 68 | 0.605594 |
32ea684c8ee8af2713776693d7f8a51b2a94127f | 3,831 | py | Python | experiments/compare_epsilon.py | sio13/turtleNet | 02195eb33a28d238a769a3f34d6f1b24255ab40f | [
"MIT"
] | 1 | 2021-06-10T08:31:35.000Z | 2021-06-10T08:31:35.000Z | experiments/compare_epsilon.py | sio13/turtleNet | 02195eb33a28d238a769a3f34d6f1b24255ab40f | [
"MIT"
] | 2 | 2020-05-15T11:14:14.000Z | 2020-05-15T11:14:46.000Z | experiments/compare_epsilon.py | sio13/turtleNet | 02195eb33a28d238a769a3f34d6f1b24255ab40f | [
"MIT"
] | null | null | null | import config
from attacks import attack
from defences.train import TurtleNet
from keras.datasets import mnist, cifar10
from keras.models import load_model
from keras.utils import to_categorical
from cleverhans.attacks import *
import cleverhans
import numpy as np
import time
from utils import get_keras_dataset, save_image_and_collage, print_evaluation, load_or_train_model
from defences.filters import threshold_data
from evaluation import eval_models
from architectures.target_model_mnist import CNNModelMnist as MnistNetwork
from architectures.target_model_cifar_10_better import CNNCifar10Model as CifarNetwork
from keras import backend
sess = backend.get_session()
def compare_epsilon(dataset_name: str,
dataset: tuple,
compiled_model,
epsilons: list,
clip_min: float,
clip_max: float,
attack_type: cleverhans.attacks,
epochs: int = 5,
need_train: bool = False,
result_picture_image_dir: str = 'results/compare_epsilon',
sample_image_index: int = 2):
x_train, y_train, x_test, y_test = dataset
model = load_or_train_model(compiled_model=compiled_model,
dataset_name=dataset_name,
epochs=epochs,
models_dir_name='models',
model_type='compare_epsilon',
need_train=need_train
)
rows = 3
columns = 3
save_image_and_collage(dir_path=result_picture_image_dir,
image_name=dataset_name,
array=x_test[:rows * columns],
image_type='natural',
rows=rows,
columns=columns,
sample_image_index=sample_image_index)
for epsilon in epsilons:
adv_attack = attack.Attack(attack_type, epsilon, clip_min, clip_max)
start_time_attack = time.time()
adv_samples = adv_attack.generate_perturbations(np.array(x_test[:rows * columns]), model, 1)
end_time_attack = time.time()
save_image_and_collage(dir_path=result_picture_image_dir,
image_name=dataset_name,
array=adv_samples,
image_type=f'adversarial_epsilon{epsilon}',
rows=rows,
columns=columns,
sample_image_index=sample_image_index)
print(f"Attacks on {dataset_name} with epsilon {epsilon} lasted {end_time_attack - start_time_attack}")
print(f"Using {attack_type}")
if __name__ == '__main__':
cifar_model = CifarNetwork()
mnist_model = MnistNetwork()
compare_epsilon(dataset_name='mnist',
dataset=get_keras_dataset(mnist.load_data()),
compiled_model=mnist_model,
epsilons=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 1, 1.2, 1.5, 2, 3],
clip_min=None,
clip_max=None,
epochs=5,
attack_type=ProjectedGradientDescent,
need_train=False)
compare_epsilon(dataset_name='cifar10',
dataset=get_keras_dataset(cifar10.load_data(), input_shape=(-1, 32, 32, 3)),
compiled_model=cifar_model,
epsilons=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 1, 1.2, 1.5, 2, 3],
clip_min=None,
clip_max=None,
epochs=10,
attack_type=ProjectedGradientDescent,
need_train=True)
| 38.31 | 111 | 0.564866 |
d858e38473bda9d2b6d7064fcc3f09826040db4f | 238 | py | Python | tests/eth2/hash-utils/test_hash.py | Gauddel/trinity | 0b12943ac36f4090abc22fc965e9e9a4f42c6f35 | [
"MIT"
] | null | null | null | tests/eth2/hash-utils/test_hash.py | Gauddel/trinity | 0b12943ac36f4090abc22fc965e9e9a4f42c6f35 | [
"MIT"
] | null | null | null | tests/eth2/hash-utils/test_hash.py | Gauddel/trinity | 0b12943ac36f4090abc22fc965e9e9a4f42c6f35 | [
"MIT"
] | null | null | null | from eth_hash.auto import keccak
from eth2._utils.hash import hash_eth2
def test_hash():
output = hash_eth2(b'helloworld')
assert len(output) == 32
def test_hash_is_keccak256():
assert hash_eth2(b'foo') == keccak(b'foo')
| 18.307692 | 46 | 0.714286 |
9d954b83e27eb24661194ecdcf153cdebfb45a3f | 2,419 | py | Python | deepchem/models/tests/test_lcnn.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 3,782 | 2016-02-21T03:53:11.000Z | 2022-03-31T16:10:26.000Z | deepchem/models/tests/test_lcnn.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 2,666 | 2016-02-11T01:54:54.000Z | 2022-03-31T11:14:33.000Z | deepchem/models/tests/test_lcnn.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 1,597 | 2016-02-21T03:10:08.000Z | 2022-03-30T13:21:28.000Z | import pytest
import tempfile
from os import path
import numpy as np
from deepchem.utils import load_dataset_from_disk, download_url, untargz_file
from deepchem.metrics import Metric, mae_score
try:
import dgl
import torch
from deepchem.models import LCNNModel
has_pytorch_and_dgl = True
except:
has_pytorch_and_dgl = False
URL = "https://deepchemdata.s3-us-west-1.amazonaws.com/featurized_datasets/lcnn_data_feature.tar.gz"
@pytest.mark.torch
def test_lcnn_regression():
current_dir = tempfile.mkdtemp()
download_url(url=URL, dest_dir=current_dir)
untargz_file(path.join(current_dir, 'lcnn_data_feature.tar.gz'), current_dir)
tasks, datasets, transformers = load_dataset_from_disk(
path.join(current_dir, 'lcnn_data'))
train, valid, test = datasets
model = LCNNModel(mode='regression', batch_size=8, learning_rate=0.001)
model.fit(train, nb_epoch=10)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (65, 1)
test_preds = model.predict(test)
assert test_preds.shape == (65, 1)
# check overfit
regression_metric = Metric(mae_score)
scores = model.evaluate(test, [regression_metric], transformers)
assert scores[regression_metric.name] < 0.6
@pytest.mark.torch
def test_lcnn_reload():
# needs change
current_dir = tempfile.mkdtemp()
download_url(url=URL, dest_dir=current_dir)
untargz_file(path.join(current_dir, 'lcnn_data_feature.tar.gz'), current_dir)
tasks, datasets, transformers = load_dataset_from_disk(
path.join(current_dir, 'lcnn_data'))
train, valid, test = datasets
model_dir = tempfile.mkdtemp()
model = LCNNModel(
mode='regression', batch_size=8, learning_rate=0.001, model_dir=model_dir)
model.fit(train, nb_epoch=10)
# check predict shape
valid_preds = model.predict_on_batch(valid.X)
assert valid_preds.shape == (65, 1)
test_preds = model.predict(test)
assert test_preds.shape == (65, 1)
# check overfit
regression_metric = Metric(mae_score)
scores = model.evaluate(test, [regression_metric], transformers)
assert scores[regression_metric.name] < 0.6
# reload
reloaded_model = LCNNModel(
mode='regression', batch_size=8, learning_rate=0.001, model_dir=model_dir)
reloaded_model.restore()
original_pred = model.predict(test)
reload_pred = reloaded_model.predict(test)
assert np.all(np.abs(original_pred - reload_pred) < 0.0000001)
| 31.828947 | 100 | 0.757751 |
eda51225a5ea5ebba8e46eb42733c9c403774810 | 1,939 | py | Python | scripts/csv_to_parquet.py | ostrokach/uniparc_xml_parser | eb5b4863aaae2a754f67cf60b19dbb5d4f2fedac | [
"Apache-2.0"
] | null | null | null | scripts/csv_to_parquet.py | ostrokach/uniparc_xml_parser | eb5b4863aaae2a754f67cf60b19dbb5d4f2fedac | [
"Apache-2.0"
] | null | null | null | scripts/csv_to_parquet.py | ostrokach/uniparc_xml_parser | eb5b4863aaae2a754f67cf60b19dbb5d4f2fedac | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from typing import List
import pyarrow as pa
import pyarrow.parquet as pq
from pyarrow import csv
from tqdm import tqdm
def csv_to_parquet(
csv_file: Path,
parquet_file: Path,
*,
delimiter: str,
column_names: List[str],
quiet: bool = False,
) -> None:
block_size = 1 << 24 # 16 MB
read_options = csv.ReadOptions(column_names=column_names, block_size=block_size)
parse_options = csv.ParseOptions(delimiter=delimiter)
writer = None
with csv.open_csv(
csv_file, read_options=read_options, parse_options=parse_options
) as csv_reader:
for batch in tqdm(csv_reader, disable=quiet):
if writer is None:
writer = pq.ParquetWriter(parquet_file, csv_reader.schema, compression="zstd")
table = pa.Table.from_batches([batch])
writer.write_table(table)
if writer is not None:
writer.close()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--input-file", help="input CSV file")
parser.add_argument("-c", "--column-names", help="names of columns")
parser.add_argument("-d", "--delimiter", default="\t", help="delimiter used by the CSV file")
parser.add_argument("-o", "--output-file", default=None, help="output Parquer file")
parser.add_argument("-q", "--quiet", action="store_true", help="disable progressbar")
args = parser.parse_args()
csv_file = Path(args.input_file).resolve(strict=True)
parquet_file = (
Path(args.output_file).resolve(strict=True)
if args.output_file is not None
else csv_file.with_suffix(".parquet")
)
delimiter = args.delimiter
column_names = args.column_names.split(",")
csv_to_parquet(
csv_file,
parquet_file,
column_names=column_names,
delimiter=args.delimiter,
quiet=args.quiet,
)
| 31.786885 | 97 | 0.665807 |
ecdf80045cc08682bbb0364d2220f519410b498c | 1,332 | py | Python | tests/unit/omnibot/services/slack/bot_test.py | isabella232/omnibot | b62ccec1bf77b16a498ce5cd5f16c1812102f75b | [
"Apache-2.0"
] | null | null | null | tests/unit/omnibot/services/slack/bot_test.py | isabella232/omnibot | b62ccec1bf77b16a498ce5cd5f16c1812102f75b | [
"Apache-2.0"
] | 1 | 2021-02-23T23:16:34.000Z | 2021-02-23T23:16:34.000Z | tests/unit/omnibot/services/slack/bot_test.py | mogofinancial/omnibot | b62ccec1bf77b16a498ce5cd5f16c1812102f75b | [
"Apache-2.0"
] | null | null | null | import pytest
from omnibot.services.slack.team import Team
from omnibot.services.slack.bot import Bot, BotInitializationError
def test_team():
_team = Team.get_team_by_name('testteam')
_bot = Bot.get_bot_by_name(_team, 'echobot')
assert _bot.name == 'echobot'
assert _bot.bot_id == 'A12345678'
assert _bot.team == _team
assert _bot.oauth_token == '1234'
assert _bot.oauth_bot_token == '1234'
assert _bot.verification_token == '1234'
_team = Team.get_team_by_id(team_id='TABCDEF12')
_bot = Bot.get_bot_by_bot_id(_team, 'A98765432')
assert _bot.name == 'echobot'
assert _bot.bot_id == 'A98765432'
assert _bot.team == _team
assert _bot.oauth_token == '1234'
assert _bot.oauth_bot_token == ''
assert _bot.verification_token == '1234'
_team = Team.get_team_by_name('testteam')
_bot = Bot.get_bot_by_verification_token('5555')
assert _bot.name == 'pingbot'
assert _bot.bot_id == 'AABCDEF12'
assert _bot.team == _team
assert _bot.oauth_token == '5555'
assert _bot.oauth_bot_token == '5555'
assert _bot.verification_token == '5555'
with pytest.raises(BotInitializationError):
_bot = Bot.get_bot_by_name(_team, 'fakebot')
with pytest.raises(BotInitializationError):
_bot = Bot.get_bot_by_bot_id(_team, 'BADBOTID')
| 33.3 | 66 | 0.70045 |
ce2be5c67dfeb07cf091f48ae969cb768a225031 | 218 | py | Python | src/alvarium/hash/mock.py | welo10/alvarium-sdk-python | a62aeafc62bd6bc4e11538c357e422e7a51cf825 | [
"Apache-2.0"
] | null | null | null | src/alvarium/hash/mock.py | welo10/alvarium-sdk-python | a62aeafc62bd6bc4e11538c357e422e7a51cf825 | [
"Apache-2.0"
] | null | null | null | src/alvarium/hash/mock.py | welo10/alvarium-sdk-python | a62aeafc62bd6bc4e11538c357e422e7a51cf825 | [
"Apache-2.0"
] | null | null | null | from .interfaces import HashProvider
class NoneHashProvider(HashProvider):
"""A mock implementation for the HashProvider interface"""
def derive(self, data: bytes) -> str:
return data.decode("utf-8") | 31.142857 | 62 | 0.715596 |
341782573a5734103bacee0d54cc3b62e9baad9c | 6,195 | py | Python | mmdet/core/bbox/transforms.py | YLyeliang/mmdetection_notes | e5702b3c15290b825f8d9b92bfa79fcf7a4a6888 | [
"Apache-2.0"
] | null | null | null | mmdet/core/bbox/transforms.py | YLyeliang/mmdetection_notes | e5702b3c15290b825f8d9b92bfa79fcf7a4a6888 | [
"Apache-2.0"
] | null | null | null | mmdet/core/bbox/transforms.py | YLyeliang/mmdetection_notes | e5702b3c15290b825f8d9b92bfa79fcf7a4a6888 | [
"Apache-2.0"
] | null | null | null | import mmcv
import numpy as np
import torch
def bbox2delta(proposals, gt, means=[0, 0, 0, 0], stds=[1, 1, 1, 1]):
assert proposals.size() == gt.size()
proposals = proposals.float()
gt = gt.float()
px = (proposals[..., 0] + proposals[..., 2]) * 0.5
py = (proposals[..., 1] + proposals[..., 3]) * 0.5
pw = proposals[..., 2] - proposals[..., 0] + 1.0
ph = proposals[..., 3] - proposals[..., 1] + 1.0
gx = (gt[..., 0] + gt[..., 2]) * 0.5
gy = (gt[..., 1] + gt[..., 3]) * 0.5
gw = gt[..., 2] - gt[..., 0] + 1.0
gh = gt[..., 3] - gt[..., 1] + 1.0
dx = (gx - px) / pw
dy = (gy - py) / ph
dw = torch.log(gw / pw)
dh = torch.log(gh / ph)
deltas = torch.stack([dx, dy, dw, dh], dim=-1)
means = deltas.new_tensor(means).unsqueeze(0)
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
return deltas
def delta2bbox(rois,
deltas,
means=[0, 0, 0, 0],
stds=[1, 1, 1, 1],
max_shape=None,
wh_ratio_clip=16 / 1000):
"""
:param rois:
:param deltas: size(-1,4)
:param means:
:param stds:
:param max_shape:
:param wh_ratio_clip:
"""
means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) # size (1,1)
stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) # size (1,1)
denorm_deltas = deltas * stds + means # (delta-u)/std = norm_delta => denorm_delta = delta*std+mean
dx = denorm_deltas[:, 0::4] # dx size (-1,1)
dy = denorm_deltas[:, 1::4]
dw = denorm_deltas[:, 2::4]
dh = denorm_deltas[:, 3::4]
max_ratio = np.abs(np.log(wh_ratio_clip))
dw = dw.clamp(min=-max_ratio, max=max_ratio)
dh = dh.clamp(min=-max_ratio, max=max_ratio)
px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) # anchor center x ,same shape as dx
py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)
pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)
ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)
gw = pw * dw.exp() # ground_w = anchor_w * exp(delta_w)
gh = ph * dh.exp()
gx = torch.addcmul(px, 1, pw, dx) # gx = px + pw * dx
gy = torch.addcmul(py, 1, ph, dy) # gy = py + ph * dy
x1 = gx - gw * 0.5 + 0.5
y1 = gy - gh * 0.5 + 0.5
x2 = gx + gw * 0.5 - 0.5
y2 = gy + gh * 0.5 - 0.5
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
return bboxes
def bbox_flip(bboxes, img_shape):
"""Flip bboxes horizontally.
Args:
bboxes(Tensor or ndarray): Shape (..., 4*k)
img_shape(tuple): Image shape.
Returns:
Same type as `bboxes`: Flipped bboxes.
"""
if isinstance(bboxes, torch.Tensor):
assert bboxes.shape[-1] % 4 == 0
flipped = bboxes.clone()
flipped[:, 0::4] = img_shape[1] - bboxes[:, 2::4] - 1
flipped[:, 2::4] = img_shape[1] - bboxes[:, 0::4] - 1
return flipped
elif isinstance(bboxes, np.ndarray):
return mmcv.bbox_flip(bboxes, img_shape)
def bbox_mapping(bboxes, img_shape, scale_factor, flip):
"""Map bboxes from the original image scale to testing scale"""
new_bboxes = bboxes * scale_factor
if flip:
new_bboxes = bbox_flip(new_bboxes, img_shape)
return new_bboxes
def bbox_mapping_back(bboxes, img_shape, scale_factor, flip):
"""Map bboxes from testing scale to original image scale"""
new_bboxes = bbox_flip(bboxes, img_shape) if flip else bboxes
new_bboxes = new_bboxes / scale_factor
return new_bboxes
def bbox2roi(bbox_list):
"""Convert a list of bboxes to roi format.
Args:
bbox_list (list[Tensor]): a list of bboxes corresponding to a batch
of images.
Returns:
Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]
"""
rois_list = []
for img_id, bboxes in enumerate(bbox_list):
if bboxes.size(0) > 0:
img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)
rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)
else:
rois = bboxes.new_zeros((0, 5))
rois_list.append(rois)
rois = torch.cat(rois_list, 0)
return rois
def roi2bbox(rois):
bbox_list = []
img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)
for img_id in img_ids:
inds = (rois[:, 0] == img_id.item())
bbox = rois[inds, 1:]
bbox_list.append(bbox)
return bbox_list
def bbox2result(bboxes, labels, num_classes):
"""Convert detection results to a list of numpy arrays.
Args:
bboxes (Tensor): shape (n, 5)
labels (Tensor): shape (n, )
num_classes (int): class number, including background class
Returns:
list(ndarray): bbox results of each class
"""
if bboxes.shape[0] == 0:
return [
np.zeros((0, 5), dtype=np.float32) for i in range(num_classes - 1)
]
else:
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
return [bboxes[labels == i, :] for i in range(num_classes - 1)]
def distance2bbox(points, distance, max_shape=None):
"""Decode distance prediction to bounding box.
Args:
points (Tensor): Shape (n, 2), [x, y].
distance (Tensor): Distance from the given point to 4
boundaries (left, top, right, bottom).
max_shape (tuple): Shape of the image.
Returns:
Tensor: Decoded bboxes.
"""
x1 = points[:, 0] - distance[:, 0]
y1 = points[:, 1] - distance[:, 1]
x2 = points[:, 0] + distance[:, 2]
y2 = points[:, 1] + distance[:, 3]
if max_shape is not None:
x1 = x1.clamp(min=0, max=max_shape[1] - 1)
y1 = y1.clamp(min=0, max=max_shape[0] - 1)
x2 = x2.clamp(min=0, max=max_shape[1] - 1)
y2 = y2.clamp(min=0, max=max_shape[0] - 1)
return torch.stack([x1, y1, x2, y2], -1)
| 32.605263 | 109 | 0.560452 |
9cd7da42c85197a9810d82d72cba886ca392840a | 8,913 | py | Python | sdk/python/pulumi_azure_nextgen/network/v20170601/get_inbound_nat_rule.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_nextgen/network/v20170601/get_inbound_nat_rule.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_nextgen/network/v20170601/get_inbound_nat_rule.py | pulumi/pulumi-azure-nextgen | 452736b0a1cf584c2d4c04666e017af6e9b2c15c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetInboundNatRuleResult',
'AwaitableGetInboundNatRuleResult',
'get_inbound_nat_rule',
]
@pulumi.output_type
class GetInboundNatRuleResult:
"""
Inbound NAT rule of the load balancer.
"""
def __init__(__self__, backend_ip_configuration=None, backend_port=None, enable_floating_ip=None, etag=None, frontend_ip_configuration=None, frontend_port=None, id=None, idle_timeout_in_minutes=None, name=None, protocol=None, provisioning_state=None):
if backend_ip_configuration and not isinstance(backend_ip_configuration, dict):
raise TypeError("Expected argument 'backend_ip_configuration' to be a dict")
pulumi.set(__self__, "backend_ip_configuration", backend_ip_configuration)
if backend_port and not isinstance(backend_port, int):
raise TypeError("Expected argument 'backend_port' to be a int")
pulumi.set(__self__, "backend_port", backend_port)
if enable_floating_ip and not isinstance(enable_floating_ip, bool):
raise TypeError("Expected argument 'enable_floating_ip' to be a bool")
pulumi.set(__self__, "enable_floating_ip", enable_floating_ip)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if frontend_ip_configuration and not isinstance(frontend_ip_configuration, dict):
raise TypeError("Expected argument 'frontend_ip_configuration' to be a dict")
pulumi.set(__self__, "frontend_ip_configuration", frontend_ip_configuration)
if frontend_port and not isinstance(frontend_port, int):
raise TypeError("Expected argument 'frontend_port' to be a int")
pulumi.set(__self__, "frontend_port", frontend_port)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if protocol and not isinstance(protocol, str):
raise TypeError("Expected argument 'protocol' to be a str")
pulumi.set(__self__, "protocol", protocol)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="backendIPConfiguration")
def backend_ip_configuration(self) -> 'outputs.NetworkInterfaceIPConfigurationResponse':
"""
A reference to a private IP address defined on a network interface of a VM. Traffic sent to the frontend port of each of the frontend IP configurations is forwarded to the backend IP.
"""
return pulumi.get(self, "backend_ip_configuration")
@property
@pulumi.getter(name="backendPort")
def backend_port(self) -> Optional[int]:
"""
The port used for the internal endpoint. Acceptable values range from 1 to 65535.
"""
return pulumi.get(self, "backend_port")
@property
@pulumi.getter(name="enableFloatingIP")
def enable_floating_ip(self) -> Optional[bool]:
"""
Configures a virtual machine's endpoint for the floating IP capability required to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
"""
return pulumi.get(self, "enable_floating_ip")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfiguration")
def frontend_ip_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
A reference to frontend IP addresses.
"""
return pulumi.get(self, "frontend_ip_configuration")
@property
@pulumi.getter(name="frontendPort")
def frontend_port(self) -> Optional[int]:
"""
The port for the external endpoint. Port numbers for each rule must be unique within the Load Balancer. Acceptable values range from 1 to 65534.
"""
return pulumi.get(self, "frontend_port")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The timeout for the TCP idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The transport protocol for the endpoint. Possible values are: 'Udp' or 'Tcp'
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Gets the provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
class AwaitableGetInboundNatRuleResult(GetInboundNatRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInboundNatRuleResult(
backend_ip_configuration=self.backend_ip_configuration,
backend_port=self.backend_port,
enable_floating_ip=self.enable_floating_ip,
etag=self.etag,
frontend_ip_configuration=self.frontend_ip_configuration,
frontend_port=self.frontend_port,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
name=self.name,
protocol=self.protocol,
provisioning_state=self.provisioning_state)
def get_inbound_nat_rule(expand: Optional[str] = None,
inbound_nat_rule_name: Optional[str] = None,
load_balancer_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInboundNatRuleResult:
"""
Inbound NAT rule of the load balancer.
:param str expand: Expands referenced resources.
:param str inbound_nat_rule_name: The name of the inbound nat rule.
:param str load_balancer_name: The name of the load balancer.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['inboundNatRuleName'] = inbound_nat_rule_name
__args__['loadBalancerName'] = load_balancer_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20170601:getInboundNatRule', __args__, opts=opts, typ=GetInboundNatRuleResult).value
return AwaitableGetInboundNatRuleResult(
backend_ip_configuration=__ret__.backend_ip_configuration,
backend_port=__ret__.backend_port,
enable_floating_ip=__ret__.enable_floating_ip,
etag=__ret__.etag,
frontend_ip_configuration=__ret__.frontend_ip_configuration,
frontend_port=__ret__.frontend_port,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
name=__ret__.name,
protocol=__ret__.protocol,
provisioning_state=__ret__.provisioning_state)
| 43.906404 | 284 | 0.686525 |
ffcb2e77cc6122fc7d011df491ee8d7acd1950fb | 2,863 | py | Python | lib-src/lv2/suil/waflib/extras/lv2.py | joshrose/audacity | e2b1a2be6b92661628bbb054f915bc50b211c020 | [
"CC-BY-3.0"
] | 7,892 | 2015-03-31T09:24:05.000Z | 2022-03-31T12:30:32.000Z | lib-src/lv2/suil/waflib/extras/lv2.py | joshrose/audacity | e2b1a2be6b92661628bbb054f915bc50b211c020 | [
"CC-BY-3.0"
] | 2,050 | 2015-04-03T13:27:52.000Z | 2022-03-31T19:14:10.000Z | lib-src/lv2/suil/waflib/extras/lv2.py | joshrose/audacity | e2b1a2be6b92661628bbb054f915bc50b211c020 | [
"CC-BY-3.0"
] | 2,613 | 2015-03-26T11:28:10.000Z | 2022-03-30T13:17:03.000Z | import os
import sys
from waflib import Logs
from waflib import Options
def options(opt):
conf_opts = opt.get_option_group('Configuration options')
conf_opts.add_option('--lv2-user', action='store_true', default=False, dest='lv2_user',
help='install LV2 bundles to user location')
conf_opts.add_option('--lv2dir', type='string',
help='LV2 bundles [Default: LIBDIR/lv2]')
def register_lv2_path(conf, path):
"""Return the default LV2_PATH to use for this system"""
if 'LV2_PATH' not in conf.run_env and 'LV2_PATH' not in os.environ:
conf.run_env['LV2_PATH'] = [conf.env['LV2DIR']]
conf.run_env.append_unique('LV2_PATH', path)
def default_lv2_path(conf):
"""Return the default LV2_PATH for the build target as a list"""
if conf.env.DEST_OS == 'darwin':
return ['~/Library/Audio/Plug-Ins/LV2',
'~/.lv2',
'/usr/local/lib/lv2',
'/usr/lib/lv2',
'/Library/Audio/Plug-Ins/LV2']
elif conf.env.DEST_OS == 'haiku':
return ['~/.lv2',
'/boot/common/add-ons/lv2']
elif conf.env.DEST_OS == 'win32':
return ['%APPDATA%\\\\LV2',
'%COMMONPROGRAMFILES%\\\\LV2']
else:
libdirname = os.path.basename(conf.env.LIBDIR)
return ['~/.lv2',
'/usr/%s/lv2' % libdirname,
'/usr/local/%s/lv2' % libdirname]
def configure(conf):
def env_path(parent_dir_var, name):
parent = os.getenv(parent_dir_var)
if parent:
return os.path.join(parent, name)
else:
Logs.warn('Environment variable %s unset, using LIBDIR\n' % parent_dir_var)
return os.path.join(conf.env['LIBDIR'], name)
def normpath(path):
if sys.platform == 'win32':
return os.path.normpath(path).replace('\\', '/')
else:
return os.path.normpath(path)
if Options.options.lv2dir:
conf.env['LV2DIR'] = Options.options.lv2dir
elif Options.options.lv2_user:
if conf.env.DEST_OS == 'darwin':
conf.env['LV2DIR'] = env_path('HOME', 'Library/Audio/Plug-Ins/LV2')
elif conf.env.DEST_OS == 'win32':
conf.env['LV2DIR'] = env_path('APPDATA', 'LV2')
else:
conf.env['LV2DIR'] = env_path('HOME', '.lv2')
else:
if conf.env.DEST_OS == 'darwin':
conf.env['LV2DIR'] = '/Library/Audio/Plug-Ins/LV2'
elif conf.env.DEST_OS == 'win32':
conf.env['LV2DIR'] = env_path('COMMONPROGRAMFILES', 'LV2')
else:
conf.env['LV2DIR'] = os.path.join(conf.env['LIBDIR'], 'lv2')
# Add default LV2_PATH to runtime environment for tests that use plugins
if 'LV2_PATH' not in os.environ:
conf.run_env['LV2_PATH'] = default_lv2_path(conf)
| 37.671053 | 91 | 0.582606 |
f8ed21fdc7506d832a420a6a38b5411f7b12a288 | 8,824 | py | Python | segmentation_models/models/unet.py | cersar/segmentation_models | 3a4b8e415388965a183fa6e28ee98a82ab497cd9 | [
"MIT"
] | null | null | null | segmentation_models/models/unet.py | cersar/segmentation_models | 3a4b8e415388965a183fa6e28ee98a82ab497cd9 | [
"MIT"
] | null | null | null | segmentation_models/models/unet.py | cersar/segmentation_models | 3a4b8e415388965a183fa6e28ee98a82ab497cd9 | [
"MIT"
] | null | null | null | from keras_applications import get_submodules_from_kwargs
from ._common_blocks import Conv2dBn
from ._utils import freeze_model, filter_keras_submodules
from ..backbones.backbones_factory import Backbones
backend = None
layers = None
models = None
keras_utils = None
# ---------------------------------------------------------------------
# Utility functions
# ---------------------------------------------------------------------
def get_submodules():
return {
'backend': backend,
'models': models,
'layers': layers,
'utils': keras_utils,
}
# ---------------------------------------------------------------------
# Blocks
# ---------------------------------------------------------------------
def Conv3x3BnReLU(filters, use_batchnorm, name=None):
kwargs = get_submodules()
def wrapper(input_tensor):
return Conv2dBn(
filters,
kernel_size=3,
activation='relu',
kernel_initializer='he_uniform',
padding='same',
use_batchnorm=use_batchnorm,
name=name,
**kwargs
)(input_tensor)
return wrapper
def DecoderUpsamplingX2Block(filters, stage, use_batchnorm=False):
up_name = 'decoder_stage{}_upsampling'.format(stage)
conv1_name = 'decoder_stage{}a'.format(stage)
conv2_name = 'decoder_stage{}b'.format(stage)
concat_name = 'decoder_stage{}_concat'.format(stage)
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def wrapper(input_tensor, skip=None):
x = layers.UpSampling2D(size=2, name=up_name)(input_tensor)
if skip is not None:
x = layers.Concatenate(axis=concat_axis, name=concat_name)([x, skip])
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv1_name)(x)
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv2_name)(x)
return x
return wrapper
def DecoderTransposeX2Block(filters, stage, use_batchnorm=False):
transp_name = 'decoder_stage{}a_transpose'.format(stage)
bn_name = 'decoder_stage{}a_bn'.format(stage)
relu_name = 'decoder_stage{}a_relu'.format(stage)
conv_block_name = 'decoder_stage{}b'.format(stage)
concat_name = 'decoder_stage{}_concat'.format(stage)
concat_axis = bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
def layer(input_tensor, skip=None):
x = layers.Conv2DTranspose(
filters,
kernel_size=(4, 4),
strides=(2, 2),
padding='same',
name=transp_name,
use_bias=not use_batchnorm,
)(input_tensor)
if use_batchnorm:
x = layers.BatchNormalization(axis=bn_axis, name=bn_name)(x)
x = layers.Activation('relu', name=relu_name)(x)
if skip is not None:
x = layers.Concatenate(axis=concat_axis, name=concat_name)([x, skip])
x = Conv3x3BnReLU(filters, use_batchnorm, name=conv_block_name)(x)
return x
return layer
# ---------------------------------------------------------------------
# Unet Decoder
# ---------------------------------------------------------------------
def build_unet(
backbone,
decoder_block,
skip_connection_layers,
decoder_filters=(256, 128, 64, 32, 16),
n_upsample_blocks=5,
classes=1,
activation='sigmoid',
use_batchnorm=True,
global_feature=False
):
input_ = backbone.input
x = backbone.output
if global_feature:
concat_axis = 3 if backend.image_data_format() == 'channels_last' else 1
gf = layers.GlobalAveragePooling2D()(x)
gf = layers.Reshape((gf.shape[0],1,1,gf.shape[1]))(gf)
gf = layers.UpSampling2D()(gf)
x = layers.Concatenate(axis=concat_axis)([x, gf])
# extract skip connections
skips = ([backbone.get_layer(name=i).output if isinstance(i, str)
else backbone.get_layer(index=i).output for i in skip_connection_layers])
# add center block if previous operation was maxpooling (for vgg models)
if isinstance(backbone.layers[-1], layers.MaxPooling2D):
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block1')(x)
x = Conv3x3BnReLU(512, use_batchnorm, name='center_block2')(x)
# building decoder blocks
for i in range(n_upsample_blocks):
if i < len(skips):
skip = skips[i]
else:
skip = None
x = decoder_block(decoder_filters[i], stage=i, use_batchnorm=use_batchnorm)(x, skip)
# model head (define number of output classes)
x = layers.Conv2D(
filters=classes,
kernel_size=(3, 3),
padding='same',
use_bias=True,
kernel_initializer='glorot_uniform',
name='final_conv',
)(x)
x = layers.Activation(activation, name=activation)(x)
# create keras model instance
model = models.Model(input_, x)
return model
# ---------------------------------------------------------------------
# Unet Model
# ---------------------------------------------------------------------
def Unet(
backbone_name='vgg16',
input_shape=(None, None, 3),
classes=1,
activation='sigmoid',
weights=None,
encoder_weights='imagenet',
encoder_freeze=False,
encoder_features='default',
decoder_block_type='upsampling',
decoder_filters=(256, 128, 64, 32, 16),
decoder_use_batchnorm=True,
global_feature=False,
**kwargs
):
""" Unet_ is a fully convolution neural network for image semantic segmentation
Args:
backbone_name: name of classification model (without last dense layers) used as feature
extractor to build segmentation model.
input_shape: shape of input data/image ``(H, W, C)``, in general
case you do not need to set ``H`` and ``W`` shapes, just pass ``(None, None, C)`` to make your model be
able to process images af any size, but ``H`` and ``W`` of input images should be divisible by factor ``32``.
classes: a number of classes for output (output shape - ``(h, w, classes)``).
activation: name of one of ``keras.activations`` for last model layer
(e.g. ``sigmoid``, ``softmax``, ``linear``).
weights: optional, path to model weights.
encoder_weights: one of ``None`` (random initialization), ``imagenet`` (pre-training on ImageNet).
encoder_freeze: if ``True`` set all layers of encoder (backbone model) as non-trainable.
encoder_features: a list of layer numbers or names starting from top of the model.
Each of these layers will be concatenated with corresponding decoder block. If ``default`` is used
layer names are taken from ``DEFAULT_SKIP_CONNECTIONS``.
decoder_block_type: one of blocks with following layers structure:
- `upsampling`: ``UpSampling2D`` -> ``Conv2D`` -> ``Conv2D``
- `transpose`: ``Transpose2D`` -> ``Conv2D``
decoder_filters: list of numbers of ``Conv2D`` layer filters in decoder blocks
decoder_use_batchnorm: if ``True``, ``BatchNormalisation`` layer between ``Conv2D`` and ``Activation`` layers
is used.
Returns:
``keras.models.Model``: **Unet**
.. _Unet:
https://arxiv.org/pdf/1505.04597
"""
global backend, layers, models, keras_utils
submodule_args = filter_keras_submodules(kwargs)
backend, layers, models, keras_utils = get_submodules_from_kwargs(submodule_args)
if decoder_block_type == 'upsampling':
decoder_block = DecoderUpsamplingX2Block
elif decoder_block_type == 'transpose':
decoder_block = DecoderTransposeX2Block
else:
raise ValueError('Decoder block type should be in ("upsampling", "transpose"). '
'Got: {}'.format(decoder_block_type))
backbone = Backbones.get_backbone(
backbone_name,
input_shape=input_shape,
weights=encoder_weights,
include_top=False,
**kwargs,
)
if encoder_features == 'default':
encoder_features = Backbones.get_feature_layers(backbone_name, n=4)
model = build_unet(
backbone=backbone,
decoder_block=decoder_block,
skip_connection_layers=encoder_features,
decoder_filters=decoder_filters,
classes=classes,
activation=activation,
n_upsample_blocks=len(decoder_filters),
use_batchnorm=decoder_use_batchnorm,
global_feature=global_feature
)
# lock encoder weights for fine-tuning
if encoder_freeze:
freeze_model(backbone, **kwargs)
# loading model weights
if weights is not None:
model.load_weights(weights)
return model
| 33.551331 | 121 | 0.603921 |
8411bbd9f23296a127cef1d0809d10a413ce3554 | 1,929 | py | Python | src/ssp/spark/udf/spacy_ner_udf.py | gyan42/spark-streaming-playground | 147ef9cbc31b7aed242663dee36143ebf0e8043f | [
"Apache-2.0"
] | 10 | 2020-03-12T11:51:46.000Z | 2022-03-24T04:56:05.000Z | src/ssp/spark/udf/spacy_ner_udf.py | gyan42/spark-streaming-playground | 147ef9cbc31b7aed242663dee36143ebf0e8043f | [
"Apache-2.0"
] | 12 | 2020-04-23T07:28:14.000Z | 2022-03-12T00:20:24.000Z | src/ssp/spark/udf/spacy_ner_udf.py | gyan42/spark-streaming-playground | 147ef9cbc31b7aed242663dee36143ebf0e8043f | [
"Apache-2.0"
] | 1 | 2020-04-20T14:48:38.000Z | 2020-04-20T14:48:38.000Z | from pyspark.sql.functions import udf
import requests
import ast
from pyspark.sql.types import *
# Interesting use case : What if the end point gone for toss and
# Streaming has to recomputed for failed cases ?
from ssp.logger.pretty_print import print_info
from ssp.logger.pretty_print import print_error
def get_ner(text, url):
data = {'text': text}
tags = []
try:
r = requests.post(url=url, json=data)
r.raise_for_status()
data = r.json()["res"]
data = eval(data)
for key, value in zip(data.keys(), data.values()):
tags.append((str(key), str(value)))
except requests.exceptions.HTTPError as err:
print(err)
tags = ["URL_ERROR"]
return tags
schema = ArrayType(StructType([
StructField("ner", StringType(), False),
StructField("word", StringType(), False)
]))
# TODO fixed for now, expose/configure the URL through gin config
def get_ner_udf(is_docker):
if is_docker:
url = "http://host.docker.internal:30123/text/ner/spacy"
return udf(lambda x: get_ner(text=x, url=url), schema)
else:
url = "http://127.0.0.1:30123/text/ner/spacy"
return udf(lambda x: get_ner(text=x, url=url), schema)
if __name__ == "__main__":
try:
URL = "http://host.docker.internal:30123/text/ner/spacy"
print_info(f"Trying URL : {URL} ")
# sending get request and saving the response as response object
data = get_ner(text="Wow! this is Wednesday night now and here the lazy Mageswaran coding me", url=URL)
print(data)
except:
print_error("Failed!")
URL = "http://127.0.0.1:30123/text/ner/spacy"
print_info(f"Trying URL : {URL} ")
# sending get request and saving the response as response object
data = get_ner(text="Wow! this is Wednesday night now and here the lazy Mageswaran coding me", url=URL)
print(data)
| 32.15 | 111 | 0.648523 |
9b20660a9a8d73ad3ffecc9cc9b69a20b25caf2b | 397 | py | Python | Web_django/Web_django/wsgi.py | LikeFrost/homework | b59ef80cccd70579609cc9656a5b0dcf5a742fa1 | [
"Unlicense"
] | null | null | null | Web_django/Web_django/wsgi.py | LikeFrost/homework | b59ef80cccd70579609cc9656a5b0dcf5a742fa1 | [
"Unlicense"
] | null | null | null | Web_django/Web_django/wsgi.py | LikeFrost/homework | b59ef80cccd70579609cc9656a5b0dcf5a742fa1 | [
"Unlicense"
] | null | null | null | """
WSGI config for Web_django project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Web_django.settings')
application = get_wsgi_application()
| 23.352941 | 78 | 0.788413 |
8ea11ded97fd1ec6171f4f04cb262a725fab7c9d | 41,586 | py | Python | src/python/turicreate/toolkits/image_classifier/image_classifier.py | sirahd/turicreate | 386efa4eb5033573ee9120704a8c88a9a6151133 | [
"BSD-3-Clause"
] | null | null | null | src/python/turicreate/toolkits/image_classifier/image_classifier.py | sirahd/turicreate | 386efa4eb5033573ee9120704a8c88a9a6151133 | [
"BSD-3-Clause"
] | 3 | 2022-02-15T04:42:24.000Z | 2022-03-12T01:05:15.000Z | src/python/turicreate/toolkits/image_classifier/image_classifier.py | sirahd/turicreate | 386efa4eb5033573ee9120704a8c88a9a6151133 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Class definition and utilities for the image classification toolkit.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _tc
import time as _time
from turicreate.toolkits._model import CustomModel as _CustomModel
import turicreate.toolkits._internal_utils as _tkutl
import turicreate.toolkits._private_utils as _pvtutl
from turicreate.toolkits import _coreml_utils
from turicreate.toolkits._main import ToolkitError as _ToolkitError
from turicreate.toolkits._model import PythonProxy as _PythonProxy
from turicreate import config as _tc_config
from .._internal_utils import _mac_ver
from .. import _pre_trained_models
from .. import _image_feature_extractor
from ._evaluation import Evaluation as _Evaluation
from turicreate.toolkits._internal_utils import (_raise_error_if_not_sframe,
_numeric_param_check_range)
_DEFAULT_SOLVER_OPTIONS = {
'convergence_threshold': 1e-2,
'step_size': 1.0,
'lbfgs_memory_level': 11,
'max_iterations': 10}
from six.moves import reduce as _reduce
def create(dataset, target, feature=None, model = 'resnet-50',
l2_penalty=0.01,
l1_penalty=0.0,
solver='auto', feature_rescaling=True,
convergence_threshold = _DEFAULT_SOLVER_OPTIONS['convergence_threshold'],
step_size = _DEFAULT_SOLVER_OPTIONS['step_size'],
lbfgs_memory_level = _DEFAULT_SOLVER_OPTIONS['lbfgs_memory_level'],
max_iterations = _DEFAULT_SOLVER_OPTIONS['max_iterations'],
class_weights = None,
validation_set = 'auto',
verbose=True,
seed=None,
batch_size=64):
"""
Create a :class:`ImageClassifier` model.
Parameters
----------
dataset : SFrame
Input data. The column named by the 'feature' parameter will be
extracted for modeling.
target : string, or int
Name of the column containing the target variable. The values in this
column must be of string or integer type. String target variables are
automatically mapped to integers in the order in which they are provided.
For example, a target variable with 'cat' and 'dog' as possible
values is mapped to 0 and 1 respectively with 0 being the base class
and 1 being the reference class. Use `model.classes` to retrieve
the order in which the classes are mapped.
feature : string, optional
indicates that the SFrame has only column of Image type and that will
Name of the column containing the input images. 'None' (the default)
indicates the only image column in `dataset` should be used as the
feature.
l2_penalty : float, optional
Weight on l2 regularization of the model. The larger this weight, the
more the model coefficients shrink toward 0. This introduces bias into
the model but decreases variance, potentially leading to better
predictions. The default value is 0.01; setting this parameter to 0
corresponds to unregularized logistic regression. See the ridge
regression reference for more detail.
l1_penalty : float, optional
Weight on l1 regularization of the model. Like the l2 penalty, the
higher the l1 penalty, the more the estimated coefficients shrink toward
0. The l1 penalty, however, completely zeros out sufficiently small
coefficients, automatically indicating features that are not useful
for the model. The default weight of 0 prevents any features from
being discarded. See the LASSO regression reference for more detail.
solver : string, optional
Name of the solver to be used to solve the regression. See the
references for more detail on each solver. Available solvers are:
- *auto (default)*: automatically chooses the best solver for the data
and model parameters.
- *newton*: Newton-Raphson
- *lbfgs*: limited memory BFGS
- *fista*: accelerated gradient descent
For this model, the Newton-Raphson method is equivalent to the
iteratively re-weighted least squares algorithm. If the l1_penalty is
greater than 0, use the 'fista' solver.
The model is trained using a carefully engineered collection of methods
that are automatically picked based on the input data. The ``newton``
method works best for datasets with plenty of examples and few features
(long datasets). Limited memory BFGS (``lbfgs``) is a robust solver for
wide datasets (i.e datasets with many coefficients). ``fista`` is the
default solver for l1-regularized linear regression. The solvers are all
automatically tuned and the default options should function well. See
the solver options guide for setting additional parameters for each of
the solvers.
See the user guide for additional details on how the solver is chosen.
(see `here
<https://apple.github.io/turicreate/docs/userguide/supervised-learning/linear-regression.html>`_)
feature_rescaling : boolean, optional
Feature rescaling is an important pre-processing step that ensures that
all features are on the same scale. An l2-norm rescaling is performed
to make sure that all features are of the same norm. Categorical
features are also rescaled by rescaling the dummy variables that are
used to represent them. The coefficients are returned in original scale
of the problem. This process is particularly useful when features
vary widely in their ranges.
convergence_threshold : float, optional
Convergence is tested using variation in the training objective. The
variation in the training objective is calculated using the difference
between the objective values between two steps. Consider reducing this
below the default value (0.01) for a more accurately trained model.
Beware of overfitting (i.e a model that works well only on the training
data) if this parameter is set to a very low value.
lbfgs_memory_level : float, optional
The L-BFGS algorithm keeps track of gradient information from the
previous ``lbfgs_memory_level`` iterations. The storage requirement for
each of these gradients is the ``num_coefficients`` in the problem.
Increasing the ``lbfgs_memory_level ``can help improve the quality of
the model trained. Setting this to more than ``max_iterations`` has the
same effect as setting it to ``max_iterations``.
model : string optional
Uses a pretrained model to bootstrap an image classifier:
- "resnet-50" : Uses a pretrained resnet model.
Exported Core ML model will be ~90M.
- "squeezenet_v1.1" : Uses a pretrained squeezenet model.
Exported Core ML model will be ~4.7M.
- "VisionFeaturePrint_Scene": Uses an OS internal feature extractor.
Only on available on iOS 12.0+,
macOS 10.14+ and tvOS 12.0+.
Exported Core ML model will be ~41K.
Models are downloaded from the internet if not available locally. Once
downloaded, the models are cached for future use.
step_size : float, optional
The starting step size to use for the ``fista`` solver. The default is
set to 1.0, this is an aggressive setting. If the first iteration takes
a considerable amount of time, reducing this parameter may speed up
model training.
class_weights : {dict, `auto`}, optional
Weights the examples in the training data according to the given class
weights. If set to `None`, all classes are supposed to have weight one. The
`auto` mode set the class weight to be inversely proportional to number of
examples in the training data with the given class.
validation_set : SFrame, optional
A dataset for monitoring the model's generalization performance.
The format of this SFrame must be the same as the training set.
By default this argument is set to 'auto' and a validation set is
automatically sampled and used for progress printing. If
validation_set is set to None, then no additional metrics
are computed. The default value is 'auto'.
max_iterations : int, optional
The maximum number of allowed passes through the data. More passes over
the data can result in a more accurately trained model. Consider
increasing this (the default value is 10) if the training accuracy is
low and the *Grad-Norm* in the display is large.
verbose : bool, optional
If True, prints progress updates and model details.
seed : int, optional
Seed for random number generation. Set this value to ensure that the
same model is created every time.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : ImageClassifier
A trained :class:`ImageClassifier` model.
Examples
--------
.. sourcecode:: python
>>> model = turicreate.image_classifier.create(data, target='is_expensive')
# Make predictions (in various forms)
>>> predictions = model.predict(data) # predictions
>>> predictions = model.classify(data) # predictions with confidence
>>> predictions = model.predict_topk(data) # Top-5 predictions (multiclass)
# Evaluate the model with ground truth data
>>> results = model.evaluate(data)
See Also
--------
ImageClassifier
"""
start_time = _time.time()
if not isinstance(dataset, _tc.SFrame):
raise TypeError('"dataset" must be of type SFrame.')
# Check model parameter
allowed_models = list(_pre_trained_models.MODELS.keys())
if _mac_ver() >= (10,14):
allowed_models.append('VisionFeaturePrint_Scene')
# Also, to make sure existing code doesn't break, replace incorrect name
# with the correct name version
if model == "VisionFeaturePrint_Screen":
print("WARNING: Correct spelling of model name is VisionFeaturePrint_Scene; VisionFeaturePrint_Screen will be removed in subsequent versions.")
model = "VisionFeaturePrint_Scene"
_tkutl._check_categorical_option_type('model', model, allowed_models)
# Check dataset parameter
if not isinstance(dataset, _tc.SFrame):
raise TypeError("Unrecognized type for 'dataset'. An SFrame is expected.")
if len(dataset) == 0:
raise _ToolkitError('Unable to train on empty dataset')
if (feature is not None) and (feature not in dataset.column_names()):
raise _ToolkitError("Image feature column '%s' does not exist" % feature)
if target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % target)
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
if not (isinstance(validation_set, _tc.SFrame) or validation_set == 'auto' or validation_set is None):
raise TypeError("Unrecognized value for 'validation_set'.")
if feature is None:
feature = _tkutl._find_only_image_column(dataset)
_tkutl._handle_missing_values(dataset, feature, 'training_dataset')
feature_extractor = _image_feature_extractor._create_feature_extractor(model)
# Extract features
extracted_features = _tc.SFrame({
target: dataset[target],
'__image_features__': feature_extractor.extract_features(dataset, feature, verbose=verbose, batch_size=batch_size),
})
if isinstance(validation_set, _tc.SFrame):
_tkutl._handle_missing_values(dataset, feature, 'validation_set')
extracted_features_validation = _tc.SFrame({
target: validation_set[target],
'__image_features__': feature_extractor.extract_features(validation_set, feature, verbose=verbose, batch_size=batch_size),
})
else:
extracted_features_validation = validation_set
# Train a classifier using the extracted features
extracted_features[target] = dataset[target]
lr_model = _tc.logistic_classifier.create(extracted_features,
features=['__image_features__'],
target=target,
max_iterations=max_iterations,
validation_set=extracted_features_validation,
seed=seed,
verbose=verbose, l2_penalty=l2_penalty, l1_penalty=l1_penalty,
solver=solver, feature_rescaling=feature_rescaling,
convergence_threshold=convergence_threshold,
step_size=step_size,
lbfgs_memory_level=lbfgs_memory_level,
class_weights=class_weights)
# set input image shape
if model in _pre_trained_models.MODELS:
input_image_shape = _pre_trained_models.MODELS[model].input_image_shape
else: # model == VisionFeaturePrint_Scene
input_image_shape = (3, 299, 299)
# Save the model
state = {
'classifier': lr_model,
'model': model,
'max_iterations': max_iterations,
'feature_extractor': feature_extractor,
'input_image_shape': input_image_shape,
'target': target,
'feature': feature,
'num_features': 1,
'num_classes': lr_model.num_classes,
'classes': lr_model.classes,
'num_examples': lr_model.num_examples,
'training_time': _time.time() - start_time,
'training_loss': lr_model.training_loss,
}
return ImageClassifier(state)
class ImageClassifier(_CustomModel):
"""
A trained model that is ready to use for classification or export to CoreML.
This model should not be constructed directly.
"""
_PYTHON_IMAGE_CLASSIFIER_VERSION = 1
def __init__(self, state):
self.__proxy__ = _PythonProxy(state)
@classmethod
def _native_name(cls):
return "image_classifier"
def _get_version(self):
return self._PYTHON_IMAGE_CLASSIFIER_VERSION
def _get_native_state(self):
"""
Save the model as a dictionary, which can be loaded with the
:py:func:`~turicreate.load_model` method.
"""
state = self.__proxy__.get_state()
state['classifier'] = state['classifier'].__proxy__
del state['feature_extractor']
del state['classes']
return state
@classmethod
def _load_version(cls, state, version):
"""
A function to load a previously saved ImageClassifier
instance.
"""
_tkutl._model_version_check(version, cls._PYTHON_IMAGE_CLASSIFIER_VERSION)
from turicreate.toolkits.classifier.logistic_classifier import LogisticClassifier
state['classifier'] = LogisticClassifier(state['classifier'])
state['classes'] = state['classifier'].classes
# Correct models saved with a previous typo
if state['model'] == "VisionFeaturePrint_Screen":
state['model'] = "VisionFeaturePrint_Scene"
# Load pre-trained model & feature extractor
model_name = state['model']
if model_name == "VisionFeaturePrint_Scene" and _mac_ver() < (10,14):
raise ToolkitError("Can not load model on this operating system. This model uses VisionFeaturePrint_Scene, "
"which is only supported on macOS 10.14 and higher.")
state['feature_extractor'] = _image_feature_extractor._create_feature_extractor(model_name)
state['input_image_shape'] = tuple([int(i) for i in state['input_image_shape']])
return ImageClassifier(state)
def __str__(self):
"""
Return a string description of the model to the ``print`` method.
Returns
-------
out : string
A description of the ImageClassifier.
"""
return self.__repr__()
def __repr__(self):
"""
Print a string description of the model when the model name is entered
in the terminal.
"""
width = 40
sections, section_titles = self._get_summary_struct()
out = _tkutl._toolkit_repr_print(self, sections, section_titles,
width=width)
return out
def _get_summary_struct(self):
"""
Returns a structured description of the model, including (where
relevant) the schema of the training data, description of the training
data, training statistics, and model hyperparameters.
Returns
-------
sections : list (of list of tuples)
A list of summary sections.
Each section is a list.
Each item in a section list is a tuple of the form:
('<label>','<field>')
section_titles: list
A list of section titles.
The order matches that of the 'sections' object.
"""
model_fields = [
('Number of classes', 'num_classes'),
('Number of feature columns', 'num_features'),
('Input image shape', 'input_image_shape'),
]
training_fields = [
('Number of examples', 'num_examples'),
("Training loss", 'training_loss'),
("Training time (sec)", 'training_time'),
]
section_titles = ['Schema', 'Training summary']
return([model_fields, training_fields], section_titles)
def _canonize_input(self, dataset):
"""
Takes input and returns tuple of the input in canonical form (SFrame)
along with an unpack callback function that can be applied to
prediction results to "undo" the canonization.
"""
unpack = lambda x: x
if isinstance(dataset, _tc.SArray):
dataset = _tc.SFrame({self.feature: dataset})
elif isinstance(dataset, _tc.Image):
dataset = _tc.SFrame({self.feature: [dataset]})
unpack = lambda x: x[0]
return dataset, unpack
def predict(self, dataset, output_type='class', batch_size=64):
"""
Return predictions for ``dataset``, using the trained logistic
regression model. Predictions can be generated as class labels,
probabilities that the target value is True, or margins (i.e. the
distance of the observations from the hyperplane separating the
classes). `probability_vector` returns a vector of probabilities by
each class.
For each new example in ``dataset``, the margin---also known as the
linear predictor---is the inner product of the example and the model
coefficients. The probability is obtained by passing the margin through
the logistic function. Predicted classes are obtained by thresholding
the predicted probabilities at 0.5. If you would like to threshold
predictions at a different probability level, you can use the
Turi Create evaluation toolkit.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
The images to be classified.
If dataset is an SFrame, it must have columns with the same names as
the features used for model training, but does not require a target
column. Additional columns are ignored.
output_type : {'probability', 'margin', 'class', 'probability_vector'}, optional
Form of the predictions which are one of:
- 'probability': Prediction probability associated with the True
class (not applicable for multi-class classification)
- 'probability_vector': Prediction probability associated with each
class as a vector. The probability of the first class (sorted
alphanumerically by name of the class in the training set) is in
position 0 of the vector, the second in position 1 and so on.
- 'class': Class prediction. For multi-class classification, this
returns the class with maximum probability.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : SArray
An SArray with model predictions. If `dataset` is a single image, the
return value will be a single prediction.
See Also
----------
create, evaluate, classify
Examples
----------
>>> probability_predictions = model.predict(data, output_type='probability')
>>> margin_predictions = model.predict(data, output_type='margin')
>>> class_predictions = model.predict(data, output_type='class')
"""
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):
raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image')
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
dataset, unpack = self._canonize_input(dataset)
extracted_features = self._extract_features(dataset, batch_size=batch_size)
return unpack(self.classifier.predict(extracted_features, output_type=output_type))
def classify(self, dataset, batch_size=64):
"""
Return a classification, for each example in the ``dataset``, using the
trained logistic regression model. The output SFrame contains predictions
as both class labels (0 or 1) as well as probabilities that the predicted
value is the associated label.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
Images to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : SFrame
An SFrame with model predictions i.e class labels and
probabilities. If `dataset` is a single image, the return will be a
single row (dict).
See Also
----------
create, evaluate, predict
Examples
----------
>>> classes = model.classify(data)
"""
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):
raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image')
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
dataset, unpack = self._canonize_input(dataset)
extracted_features = self._extract_features(dataset, batch_size=batch_size)
return unpack(self.classifier.classify(extracted_features))
def predict_topk(self, dataset, output_type="probability", k=3, batch_size=64):
"""
Return top-k predictions for the ``dataset``, using the trained model.
Predictions are returned as an SFrame with three columns: `id`,
`class`, and `probability`, `margin`, or `rank`, depending on the ``output_type``
parameter. Input dataset size must be the same as for training of the model.
Parameters
----------
dataset : SFrame | SArray | turicreate.Image
Images to be classified.
If dataset is an SFrame, it must include columns with the same
names as the features used for model training, but does not require
a target column. Additional columns are ignored.
output_type : {'probability', 'rank', 'margin'}, optional
Choose the return type of the prediction:
- `probability`: Probability associated with each label in the prediction.
- `rank` : Rank associated with each label in the prediction.
- `margin` : Margin associated with each label in the prediction.
k : int, optional
Number of classes to return for each input example.
Returns
-------
out : SFrame
An SFrame with model predictions.
See Also
--------
predict, classify, evaluate
Examples
--------
>>> pred = m.predict_topk(validation_data, k=3)
>>> pred
+----+-------+-------------------+
| id | class | probability |
+----+-------+-------------------+
| 0 | 4 | 0.995623886585 |
| 0 | 9 | 0.0038311756216 |
| 0 | 7 | 0.000301006948575 |
| 1 | 1 | 0.928708016872 |
| 1 | 3 | 0.0440889261663 |
| 1 | 2 | 0.0176190119237 |
| 2 | 3 | 0.996967732906 |
| 2 | 2 | 0.00151345680933 |
| 2 | 7 | 0.000637513934635 |
| 3 | 1 | 0.998070061207 |
| .. | ... | ... |
+----+-------+-------------------+
[35688 rows x 3 columns]
"""
if not isinstance(dataset, (_tc.SFrame, _tc.SArray, _tc.Image)):
raise TypeError('dataset must be either an SFrame, SArray or turicreate.Image')
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
dataset, _ = self._canonize_input(dataset)
extracted_features = self._extract_features(dataset)
return self.classifier.predict_topk(extracted_features, output_type = output_type, k = k)
def evaluate(self, dataset, metric='auto', verbose=True, batch_size=64):
"""
Evaluate the model by making predictions of target values and comparing
these to actual values.
Parameters
----------
dataset : SFrame
Dataset of new observations. Must include columns with the same
names as the target and features used for model training. Additional
columns are ignored.
metric : str, optional
Name of the evaluation metric. Possible values are:
- 'auto' : Returns all available metrics.
- 'accuracy' : Classification accuracy (micro average).
- 'auc' : Area under the ROC curve (macro average)
- 'precision' : Precision score (macro average)
- 'recall' : Recall score (macro average)
- 'f1_score' : F1 score (macro average)
- 'log_loss' : Log loss
- 'confusion_matrix' : An SFrame with counts of possible prediction/true label combinations.
- 'roc_curve' : An SFrame containing information needed for an ROC curve
For more flexibility in calculating evaluation metrics, use the
:class:`~turicreate.evaluation` module.
verbose : bool, optional
If True, prints progress updates and model details.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve performance.
Returns
-------
out : dict
Dictionary of evaluation results where the key is the name of the
evaluation metric (e.g. `accuracy`) and the value is the evaluation
score.
See Also
----------
create, predict, classify
Examples
----------
.. sourcecode:: python
>>> results = model.evaluate(data)
>>> print results['accuracy']
"""
import os, json, math
if(batch_size < 1):
raise ValueError("'batch_size' must be greater than or equal to 1")
if self.target not in dataset.column_names():
raise _ToolkitError("Target column '%s' does not exist" % self.target)
extracted_features = self._extract_features(dataset, verbose=verbose, batch_size=batch_size)
extracted_features[self.target] = dataset[self.target]
metrics = self.classifier.evaluate(extracted_features, metric=metric, with_predictions=True)
predictions = metrics["predictions"]["probs"]
state = self.__proxy__.get_state()
labels = state["classes"]
from .._evaluate_utils import (
entropy,
confidence,
relative_confidence,
get_confusion_matrix,
hclusterSort,
l2Dist
)
evaluation_result = {k: metrics[k] for k in ['accuracy', 'f1_score', 'log_loss', 'precision',
'recall', 'auc', 'roc_curve', 'confusion_matrix']}
evaluation_result['num_test_examples'] = len(dataset)
for k in ['num_classes', 'num_features', 'input_image_shape', 'num_examples', 'training_loss', 'training_time', 'model', 'max_iterations']:
evaluation_result[k] = getattr(self, k)
# Extend the given test data
extended_test = dataset.add_column(predictions, 'probs')
extended_test['label'] = dataset[self.target]
extended_test = extended_test.add_columns( [extended_test.apply(lambda d: labels[d['probs'].index(confidence(d['probs']))]),
extended_test.apply(lambda d: entropy(d['probs'])),
extended_test.apply(lambda d: confidence(d['probs'])),
extended_test.apply(lambda d: relative_confidence(d['probs']))],
['predicted_label', 'entropy', 'confidence', 'relative_confidence'])
extended_test = extended_test.add_column(extended_test.apply(lambda d: d['label'] == d['predicted_label']), 'correct')
evaluation_result['model_name'] = state['model']
# Calculate the confusion matrix
sf_conf_mat = get_confusion_matrix(extended_test, labels)
confidence_threshold = 0.5
hesitant_threshold = 0.2
evaluation_result['confidence_threshold'] = confidence_threshold
evaluation_result['hesitant_threshold'] = hesitant_threshold
evaluation_result['confidence_metric_for_threshold'] = 'relative_confidence'
evaluation_result['conf_mat'] = list(sf_conf_mat)
# Get sorted labels (sorted by hCluster)
vectors = map(lambda l: {'name': l, 'pos':list(sf_conf_mat[sf_conf_mat['target_label']==l].sort('predicted_label')['norm_prob'])},
labels)
evaluation_result['sorted_labels'] = hclusterSort(vectors, l2Dist)[0]['name'].split("|")
# Get recall and precision per label
per_l = extended_test.groupby(['label'], {'count': _tc.aggregate.COUNT, 'correct_count': _tc.aggregate.SUM('correct') })
per_l['recall'] = per_l.apply(lambda l: l['correct_count']*1.0 / l['count'])
per_pl = extended_test.groupby(['predicted_label'], {'predicted_count': _tc.aggregate.COUNT, 'correct_count': _tc.aggregate.SUM('correct') })
per_pl['precision'] = per_pl.apply(lambda l: l['correct_count']*1.0 / l['predicted_count'])
per_pl = per_pl.rename({'predicted_label': 'label'})
evaluation_result['label_metrics'] = list(per_l.join(per_pl, on='label', how='outer').select_columns(['label', 'count', 'correct_count', 'predicted_count', 'recall', 'precision']))
evaluation_result['labels'] = labels
extended_test = extended_test.add_row_number('__idx').rename({'label': 'target_label'})
evaluation_result['test_data'] = extended_test
evaluation_result['feature'] = self.feature
return _Evaluation(evaluation_result)
def _extract_features(self, dataset, verbose=False, batch_size=64):
return _tc.SFrame({
'__image_features__': self.feature_extractor.extract_features(dataset, self.feature, verbose=verbose, batch_size=batch_size)
})
def export_coreml(self, filename):
"""
Save the model in Core ML format.
See Also
--------
save
Examples
--------
>>> model.export_coreml('myModel.mlmodel')
"""
import coremltools
# First define three internal helper functions
# Internal helper function
def _create_vision_feature_print_scene():
prob_name = self.target + 'Probability'
#
# Setup the top level (pipeline classifier) spec
#
top_spec = coremltools.proto.Model_pb2.Model()
top_spec.specificationVersion = 3
desc = top_spec.description
desc.output.add().name = prob_name
desc.output.add().name = self.target
desc.predictedFeatureName = self.target
desc.predictedProbabilitiesName = prob_name
input = desc.input.add()
input.name = self.feature
input.type.imageType.width = 299
input.type.imageType.height = 299
BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')
input.type.imageType.colorSpace = BGR_VALUE
#
# VisionFeaturePrint extractor
#
pipelineClassifier = top_spec.pipelineClassifier
scene_print = pipelineClassifier.pipeline.models.add()
scene_print.specificationVersion = 3
scene_print.visionFeaturePrint.scene.version = 1
input = scene_print.description.input.add()
input.name = self.feature
input.type.imageType.width = 299
input.type.imageType.height = 299
input.type.imageType.colorSpace = BGR_VALUE
output = scene_print.description.output.add()
output.name = "output_name"
DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')
output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
output.type.multiArrayType.shape.append(2048)
#
# Neural Network Classifier, which is just logistic regression, in order to use GPUs
#
temp = top_spec.pipelineClassifier.pipeline.models.add()
temp.specificationVersion = 3
# Empty inner product layer
nn_spec = temp.neuralNetworkClassifier
feature_layer = nn_spec.layers.add()
feature_layer.name = "feature_layer"
feature_layer.input.append("output_name")
feature_layer.output.append("softmax_input")
fc_layer_params = feature_layer.innerProduct
fc_layer_params.inputChannels = 2048
# Softmax layer
softmax = nn_spec.layers.add()
softmax.name = "softmax"
softmax.softmax.MergeFromString(b'')
softmax.input.append("softmax_input")
softmax.output.append(prob_name)
input = temp.description.input.add()
input.name = "output_name"
input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE
input.type.multiArrayType.shape.append(2048)
# Set outputs
desc = temp.description
prob_output = desc.output.add()
prob_output.name = prob_name
label_output = desc.output.add()
label_output.name = self.target
if type(self.classifier.classes[0]) == int:
prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')
label_output.type.int64Type.MergeFromString(b'')
else:
prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')
label_output.type.stringType.MergeFromString(b'')
temp.description.predictedFeatureName = self.target
temp.description.predictedProbabilitiesName = prob_name
return top_spec
# Internal helper function
def _update_last_two_layers(nn_spec):
# Replace the softmax layer with new coeffients
num_classes = self.num_classes
fc_layer = nn_spec.layers[-2]
fc_layer_params = fc_layer.innerProduct
fc_layer_params.outputChannels = self.classifier.num_classes
inputChannels = fc_layer_params.inputChannels
fc_layer_params.hasBias = True
coefs = self.classifier.coefficients
weights = fc_layer_params.weights
bias = fc_layer_params.bias
del weights.floatValue[:]
del bias.floatValue[:]
import numpy as np
W = np.array(coefs[coefs['index'] != None]['value'], ndmin = 2).reshape(
inputChannels, num_classes - 1, order = 'F')
b = coefs[coefs['index'] == None]['value']
Wa = np.hstack((np.zeros((inputChannels, 1)), W))
weights.floatValue.extend(Wa.flatten(order = 'F'))
bias.floatValue.extend([0.0] + list(b))
# Internal helper function
def _set_inputs_outputs_and_metadata(spec, nn_spec):
# Replace the classifier with the new classes
class_labels = self.classifier.classes
probOutput = spec.description.output[0]
classLabel = spec.description.output[1]
probOutput.type.dictionaryType.MergeFromString(b'')
if type(class_labels[0]) == int:
nn_spec.ClearField('int64ClassLabels')
probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')
classLabel.type.int64Type.MergeFromString(b'')
del nn_spec.int64ClassLabels.vector[:]
for c in class_labels:
nn_spec.int64ClassLabels.vector.append(c)
else:
nn_spec.ClearField('stringClassLabels')
probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')
classLabel.type.stringType.MergeFromString(b'')
del nn_spec.stringClassLabels.vector[:]
for c in class_labels:
nn_spec.stringClassLabels.vector.append(c)
prob_name = self.target + 'Probability'
label_name = self.target
old_output_name = nn_spec.layers[-1].name
coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)
coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)
if nn_spec.layers[-1].name == old_output_name:
nn_spec.layers[-1].name = prob_name
if nn_spec.labelProbabilityLayerName == old_output_name:
nn_spec.labelProbabilityLayerName = prob_name
coremltools.models.utils.rename_feature(spec, 'data', self.feature)
if len(nn_spec.preprocessing) > 0:
nn_spec.preprocessing[0].featureName = self.feature
mlmodel = coremltools.models.MLModel(spec)
model_type = 'image classifier (%s)' % self.model
mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)
mlmodel.input_description[self.feature] = u'Input image'
mlmodel.output_description[prob_name] = 'Prediction probabilities'
mlmodel.output_description[label_name] = 'Class label of top prediction'
_coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {
'model': self.model,
'target': self.target,
'features': self.feature,
'max_iterations': str(self.max_iterations),
}, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION)
return mlmodel
# main part of the export_coreml function
if self.model in _pre_trained_models.MODELS:
ptModel = _pre_trained_models.MODELS[self.model]()
feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel)
coreml_model = feature_extractor.get_coreml_model()
spec = coreml_model.get_spec()
nn_spec = spec.neuralNetworkClassifier
else: # model == VisionFeaturePrint_Scene
spec = _create_vision_feature_print_scene()
nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier
_update_last_two_layers(nn_spec)
mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec)
mlmodel.save(filename)
| 43.913411 | 188 | 0.636777 |
758394b9040e1103b63da7d75b8bf7e738ab7f28 | 8,410 | py | Python | tools/train.py | zhangtingyu11/mmdetection3d | 6e8285b73e42884aff20af123e3499c44e31cfe1 | [
"Apache-2.0"
] | null | null | null | tools/train.py | zhangtingyu11/mmdetection3d | 6e8285b73e42884aff20af123e3499c44e31cfe1 | [
"Apache-2.0"
] | null | null | null | tools/train.py | zhangtingyu11/mmdetection3d | 6e8285b73e42884aff20af123e3499c44e31cfe1 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import argparse
import copy
import mmcv
import os
import time
import torch
import warnings
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from os import path as osp
from mmdet import __version__ as mmdet_version
from mmdet3d import __version__ as mmdet3d_version
from mmdet3d.apis import train_model
from mmdet3d.datasets import build_dataset
from mmdet3d.models import build_model
from mmdet3d.utils import collect_env, get_root_logger
from mmdet.apis import set_random_seed
from mmseg import __version__ as mmseg_version
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=0, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both specified, '
'--options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * len(cfg.gpu_ids) / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if cfg.model.type in ['EncoderDecoder3D']:
logger_name = 'mmseg'
else:
logger_name = 'mmdet'
logger = get_root_logger(
log_file=log_file, log_level=cfg.log_level, name=logger_name)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_model(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
logger.info(f'Model:\n{model}')
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
# in case we use a dataset wrapper
if 'dataset' in cfg.data.train:
val_dataset.pipeline = cfg.data.train.dataset.pipeline
else:
val_dataset.pipeline = cfg.data.train.pipeline
# set test_mode=False here in deep copied config
# which do not affect AP/AR calculation later
# refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa
val_dataset.test_mode = False
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=mmdet_version,
mmseg_version=mmseg_version,
mmdet3d_version=mmdet3d_version,
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE # for segmentors
if hasattr(datasets[0], 'PALETTE') else None)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 37.713004 | 125 | 0.654102 |
9775ba52cdb1a57f2b98beb88d450e980d0a9d8b | 529 | py | Python | python3/sherlock-and-anagrams.py | ahavrylyuk/hackerrank | a8be83c8166a05f6f91bdd86cca3d4c544428b4b | [
"MIT"
] | null | null | null | python3/sherlock-and-anagrams.py | ahavrylyuk/hackerrank | a8be83c8166a05f6f91bdd86cca3d4c544428b4b | [
"MIT"
] | null | null | null | python3/sherlock-and-anagrams.py | ahavrylyuk/hackerrank | a8be83c8166a05f6f91bdd86cca3d4c544428b4b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from collections import defaultdict
from collections import Counter
def count_anagrams(s):
len_s = len(s)
substrs = defaultdict(list)
count = 0
for length in range(1, len_s):
same_len = substrs[length]
for i in range(len_s - length + 1):
substr = Counter(s[i:i + length])
count += sum(1 for _ in same_len if _ == substr)
same_len.append(substr)
return count
t = int(input())
for _ in range(t):
print(count_anagrams(input()))
| 23 | 60 | 0.618147 |
827433061e3c24a318f1d8921a3727683f8834ed | 1,918 | py | Python | fake_sudir/app/website/app.py | IlyaVkBot/moscow_deg2021_local_exec | ba6272a5141d83d6a2d2a1e6639b8670c154fea6 | [
"BSD-3-Clause"
] | null | null | null | fake_sudir/app/website/app.py | IlyaVkBot/moscow_deg2021_local_exec | ba6272a5141d83d6a2d2a1e6639b8670c154fea6 | [
"BSD-3-Clause"
] | null | null | null | fake_sudir/app/website/app.py | IlyaVkBot/moscow_deg2021_local_exec | ba6272a5141d83d6a2d2a1e6639b8670c154fea6 | [
"BSD-3-Clause"
] | 1 | 2022-01-24T00:29:19.000Z | 2022-01-24T00:29:19.000Z | import os
import time
from flask import Flask
from .models import db, User, OAuth2Client
from .oauth2 import config_oauth
from .routes import bp
def create_db_elements():
if User.query.count() > 0:
return
admin_user = User(
username='admin',
first_name='admin_first',
last_name='admin_last',
middle_name='admin_middle',
mail='admin_mail',
mobile='admin_mobile'
)
db.session.add(admin_user)
db.session.commit()
client = OAuth2Client(
client_id='deg_client_id',
client_secret='deg_client_secret',
client_id_issued_at=int(time.time()),
user_id=admin_user.id,
)
client.set_client_metadata({
"client_name": 'deg_client',
"client_uri": 'http://deg_client_uri',
"grant_types": ['authorization_code'],
"redirect_uris": ['http://localhost/fake_redirect_uri'],
"response_types": ['code'],
"scope": 'openid profile contacts',
"token_endpoint_auth_method": 'client_secret_post',
})
db.session.add(client)
db.session.commit()
def create_app(config=None):
app = Flask(__name__)
# load default configuration
app.config.from_object('website.settings')
# load environment configuration
if 'WEBSITE_CONF' in os.environ:
app.config.from_envvar('WEBSITE_CONF')
# load app specified configuration
if config is not None:
if isinstance(config, dict):
app.config.update(config)
elif config.endswith('.py'):
app.config.from_pyfile(config)
setup_app(app)
return app
def setup_app(app):
# Create tables if they do not exist already
@app.before_first_request
def create_tables():
db.create_all()
create_db_elements()
db.init_app(app)
config_oauth(app)
app.register_blueprint(bp, url_prefix='')
| 26.638889 | 64 | 0.639208 |
9e3b89155d9f1ae640e99f3fd8e71890d2b8f8b6 | 3,253 | py | Python | pinkfish/fetch.py | jimmyhzuk/pinkfish | d9bba22ba19a548b5ac65da63bfee40623c77cda | [
"MIT"
] | 1 | 2020-03-31T04:02:19.000Z | 2020-03-31T04:02:19.000Z | pinkfish/fetch.py | jimmyhzuk/pinkfish | d9bba22ba19a548b5ac65da63bfee40623c77cda | [
"MIT"
] | null | null | null | pinkfish/fetch.py | jimmyhzuk/pinkfish | d9bba22ba19a548b5ac65da63bfee40623c77cda | [
"MIT"
] | null | null | null | """
fetch
---------
Retrive time series data
"""
# Use future imports for python 3.0 forward compatibility
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Other imports
import sys
import pandas as pd
import pandas_datareader.data as pdr
import datetime
import os
import pinkfish as pf
def _get_cache_dir(dir_name):
""" returns the path to the cache_dir """
base_dir = ''
try:
conf = pf.read_config()
base_dir = conf['base_dir']
except:
pass
finally:
dir_name = os.path.join(base_dir, dir_name)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
return dir_name
def _adj_column_names(ts):
"""
ta-lib expects columns to be lower case; to be consistent,
change date index
"""
ts.columns = [col.lower().replace(' ','_') for col in ts.columns]
ts.index.names = ['date']
return ts
def fetch_timeseries(symbol, dir_name='data', use_cache=True, from_year=None):
"""
Read time series data. Use cached version if it exists and
use_cache is True, otherwise retrive, cache, then read.
"""
if from_year is None:
from_year = 1900 if not sys.platform.startswith('win') else 1971
symbol = symbol.upper()
timeseries_cache = os.path.join(_get_cache_dir(dir_name), symbol + '.csv')
if os.path.isfile(timeseries_cache) and use_cache:
pass
else:
ts = pdr.DataReader(symbol, 'yahoo', start=datetime.datetime(from_year, 1, 1))
ts.to_csv(timeseries_cache, encoding='utf-8')
ts = pd.read_csv(timeseries_cache, index_col='Date', parse_dates=True)
ts = _adj_column_names(ts)
return ts
def clear_timeseries(symbols=None, dir_name='data'):
"""
Remove cached timeseries for list of symbols.
If symbols is None, remove all timeseries.
"""
cache_dir = _get_cache_dir(dir_name)
if symbols:
# in case user forgot to put single symbol in list
if not isinstance(symbols, list):
symbols = [symbols]
filenames = [symbol.upper() + '.csv' for symbol in symbols]
else:
filenames = [f for f in os.listdir(cache_dir) if f.endswith('.csv')]
for f in filenames:
filepath = os.path.join(cache_dir, f)
if os.path.exists(filepath):
os.remove(filepath)
def _adj_prices(ts):
""" Back adjust prices relative to adj_close for dividends and splits """
ts['open'] = ts['open'] * ts['adj_close'] / ts['close']
ts['high'] = ts['high'] * ts['adj_close'] / ts['close']
ts['low'] = ts['low'] * ts['adj_close'] / ts['close']
ts['close'] = ts['close'] * ts['adj_close'] / ts['close']
return ts
def select_tradeperiod(ts, start, end, use_adj=False, pad=True):
"""
Select a time slice of the data to trade from ts. If pad=True,
back date a year to allow time for long term indicators,
e.g. 200sma is become valid
"""
if use_adj:
_adj_prices(ts)
if start < ts.index[0]: start = ts.index[0]
if end > ts.index[-1]: end = ts.index[-1]
if pad:
ts = ts[start - datetime.timedelta(365):end]
else:
ts = ts[start:end]
return ts
| 29.044643 | 86 | 0.646173 |
08da1eefb34a6f5a6324fc0e8f1156f1f0312d37 | 10,227 | py | Python | pystream/sequential_stream.py | RikiTikkiTavi/PyStream-API | 8a7101fcf5aa0db5e00a0fac3285548497c81145 | [
"MIT"
] | null | null | null | pystream/sequential_stream.py | RikiTikkiTavi/PyStream-API | 8a7101fcf5aa0db5e00a0fac3285548497c81145 | [
"MIT"
] | null | null | null | pystream/sequential_stream.py | RikiTikkiTavi/PyStream-API | 8a7101fcf5aa0db5e00a0fac3285548497c81145 | [
"MIT"
] | null | null | null | from functools import reduce
from itertools import chain, islice, count
from typing import Generic, TypeVar, Callable, Iterable, Any, Tuple, Iterator, List, Union, Generator
from multiprocessing import cpu_count
import pystream.infrastructure.nullable as nullable
import pystream.parallel_stream as parallel_stream
import pystream.infrastructure.collectors as collectors
import pystream.core.utils as utils
_AT = TypeVar('_AT')
_RT = TypeVar('_RT')
class SequentialStream(Generic[_AT], Iterable[_AT]):
"""
SequentialStream class to perform functional-style operations in an aesthetically-pleasing manner.
Performs operations sequentially.
:param `*iterables`: Source iterables for the SequentialStream object. When multiple iterables are given, they will be concatenated.
"""
__iterable: Iterable[_AT]
def __init__(self, *iterables: Iterable[_AT]):
self.__iterable = chain(*iterables)
def __iter__(self):
return self.iterator()
def iterator(self) -> Iterator[_AT]:
"""
Creates iterator from stream.
This is terminal operation.
:returns: Iterator over stream elements
"""
return iter(self.__iterable)
def partition_iterator(self, partition_size: int) -> Generator[List[_AT], None, None]:
"""
Creates iterator over partitions of stream. This is terminal operation.
:param partition_size: Length of partition
:returns: Iterator over partitions of stream.
"""
return utils.partition_generator(self.iterator(), partition_size)
def map(self, mapper: Callable[[_AT], _RT]) -> "SequentialStream[_RT]":
"""
Returns a stream consisting of the results of applying the given function to the elements of this stream.
This is an intermediate operation.
:param mapper: Mapper function
:return: Stream with mapper operation lazily applied
"""
return SequentialStream(map(mapper, self.__iterable))
def filter(self, predicate: Callable[[_AT], bool]) -> "SequentialStream[_AT]":
"""
Returns a stream consisting of the elements of this stream that match the given predicate.
This is an intermediate operation.
:param predicate: Predicate to apply to each element to determine if it should be included
:return: The new stream
"""
return SequentialStream(filter(predicate, self.__iterable))
def reduce(self, identity: _RT, accumulator: Callable[[_RT, _AT], _RT]) -> _RT:
"""
Performs a reduction on the elements of this stream, using the provided identity value and an associative
accumulation function, and returns the reduced value.
:param identity: The identity value for the accumulating function
:param accumulator: Function for combining two values
:return: The result of the reduction
"""
return reduce(accumulator, self.__iterable, identity)
def for_each(self, action: Callable[[_AT], Any]) -> None:
"""
Performs an action for each element of this stream.
This is terminal operation.
:param action: An action to perform on the elements
"""
for i in self.__iterable:
action(i)
def any_match(self, predicate: Callable[[_AT], bool]) -> bool:
"""
Returns whether any elements of this stream match the provided predicate.
May not evaluate the predicate on all elements if not necessary for determining the result.
If the stream is empty then false is returned and the predicate is not evaluated.
:param predicate: A predicate to apply to elements of this stream.
:return: true if any elements of the stream match the provided predicate, otherwise false.
"""
return any(self.map(predicate))
def all_match(self, predicate: Callable[[_AT], bool]) -> bool:
"""
Returns whether all elements of this stream match the provided predicate.
May not evaluate the predicate on all elements if not necessary for determining the result.
If the stream is empty then true is returned and the predicate is not evaluated.
:param predicate: A predicate to apply to elements of this stream.
:return: true if either all elements of the stream match the provided predicate or the stream is empty, otherwise false
"""
return all(self.map(predicate))
def none_match(self, predicate: Callable[[_AT], bool]) -> bool:
"""
Returns whether no elements of this stream match the provided predicate. May not evaluate the predicate on
all elements if not necessary for determining the result. If the stream is empty then true is returned and
the predicate is not evaluated.
:param predicate: A predicate to apply to elements of this stream.
:return: true if either no elements of the stream match the provided predicate or the stream is empty, otherwise false
"""
return not self.any_match(predicate)
def flat_map(self, mapper: Callable[[_AT], "SequentialStream[_RT]"]) -> "SequentialStream[_RT]":
"""
Returns a stream consisting of the results of replacing each element of this stream with the contents of a
mapped stream produced by applying the provided mapping function to each element.
**API Note**:
The flatMap() operation has the effect of applying a one-to-many transformation to the elements of the stream,
and then flattening the resulting elements into a new stream.
:param mapper: Function to apply to each element which produces a stream of new values.
:return: The new stream
"""
return SequentialStream(chain.from_iterable(map(mapper, self.__iterable)))
def count(self) -> int:
"""
Returns the count of elements in this stream. This is a special case of a reduction.
:return: The count of elements in this stream
"""
if hasattr(self.__iterable, '__len__'):
# noinspection PyTypeChecker
return len(self.__iterable)
return self.reduce(0, lambda accumulator, element: accumulator + 1)
def sum(self) -> Union[_AT, int]:
"""
:return: The sum of elements in this stream
"""
return sum(self.__iterable)
def min(self) -> nullable.Nullable[_AT]:
"""
:return: Returns a Nullable describing the minimum element of this stream, or an empty Nullable if this stream is empty.
"""
return nullable.Nullable(min(self.__iterable, default=None))
def max(self) -> nullable.Nullable[_AT]:
"""
:return: Returns a Nullable describing the maximum element of this stream, or an empty Nullable if this stream is empty.
"""
return nullable.Nullable(max(self.__iterable, default=None))
def limit(self, max_size: int) -> "SequentialStream[_AT]":
"""
Returns a stream consisting of the elements of this stream, truncated to be no longer than max_size in length.
:param max_size: The number of elements the stream should be limited to
:return: The new stream
"""
return SequentialStream(islice(self.__iterable, max_size))
def find_first(self) -> nullable.Nullable[_AT]:
"""
Returns an Nullable describing the first element of this stream, or an empty Nullable if the stream is empty.
:return: An Nullable describing the first element of this stream, or an empty Nullable if the stream is empty
"""
return nullable.Nullable(next(self.__iterable, None))
def peek(self, action: Callable[[_AT], Any]) -> 'SequentialStream[_AT]':
"""
Returns a stream consisting of the elements of this stream, additionally performing the provided action on each
element as elements are consumed from the resulting stream.
This is an intermediate operation.
:param action: An action to perform on the elements as they are consumed from the stream
:return: the new stream
"""
def with_action(x):
action(x)
return x
return self.map(with_action)
def collect(self, collector: 'collectors.Collector[_AT, _RT]') -> _RT:
"""
Collects the stream using supplied collector.
This is terminal operation.
:param collector: Collector instance
:return: The result of collector.collect(...)
"""
return collector.collect(self)
def parallel(self, n_processes: int = cpu_count(), chunk_size: int = 1) -> "parallel_stream.ParallelStream[_AT]":
"""
Creates parallel (multiprocessing) stream from current stream. All following operations will be performed in parallel.
:param n_processes: Number of processes to use.
:param chunk_size: The size of chunk.
:return: New parallel stream
"""
return parallel_stream.ParallelStream(self.__iterable, n_processes=n_processes, chunk_size=chunk_size)
@staticmethod
def range(*args) -> "SequentialStream[int]":
"""
Creates an incrementing, integer stream.
If arguments are supplied, they are passed as-is to the builtin `range` function.
Otherwise, an infinite stream is created, starting at 0.
:return: New SequentialStream.
"""
if len(args) == 0:
return SequentialStream(count())
else:
return SequentialStream(range(*args))
@staticmethod
def of(*args: _RT) -> "SequentialStream[_RT]":
"""
Creates a stream with non iterable arguments.
:param `*args`: Arguments of the same type from wich the stream will be created.
:return: The new stream.
"""
return SequentialStream(args)
@staticmethod
def zip(*iterables: Iterable[_AT]) -> "SequentialStream[Tuple[_AT, ...]]":
"""
Creates a stream by *zipping* the iterables, instead of concatenating them.
:param `*iterables`: Iterables
:returns The new stream.
"""
return SequentialStream(zip(*iterables))
| 40.74502 | 137 | 0.667351 |
66e7e24fe838d1b5eb59dd9162d5d099d3a2865d | 213 | py | Python | adaptive_alerting_detector_build/exceptions.py | brett-miller/adaptive-alerting-detector-build | eec9f514feb4f65586c408fae36176ebcd8989ae | [
"Apache-2.0"
] | null | null | null | adaptive_alerting_detector_build/exceptions.py | brett-miller/adaptive-alerting-detector-build | eec9f514feb4f65586c408fae36176ebcd8989ae | [
"Apache-2.0"
] | null | null | null | adaptive_alerting_detector_build/exceptions.py | brett-miller/adaptive-alerting-detector-build | eec9f514feb4f65586c408fae36176ebcd8989ae | [
"Apache-2.0"
] | null | null | null | # All exceptions in this class should subclass from AdaptiveAlertingDetectorBuildError.
class AdaptiveAlertingDetectorBuildError(Exception):
"""Base class for all Adaptive Alerting Detector Build errors."""
| 35.5 | 87 | 0.816901 |
fa51d913018c86b2dfe9216916ae1886f71dfd5e | 36,711 | py | Python | subcmds/sync.py | sky8336/git-repo | e910f33d64d0608de2d940c512e9ca3f140fb24f | [
"Apache-2.0"
] | null | null | null | subcmds/sync.py | sky8336/git-repo | e910f33d64d0608de2d940c512e9ca3f140fb24f | [
"Apache-2.0"
] | null | null | null | subcmds/sync.py | sky8336/git-repo | e910f33d64d0608de2d940c512e9ca3f140fb24f | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import shutil
import socket
import subprocess
import sys
import tempfile
import time
from pyversion import is_python3
if is_python3():
import http.cookiejar as cookielib
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
else:
import cookielib
import imp
import urllib2
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object direcotry. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
try:
self.jobs = self.manifest.default.sync_j
except ManifestParseError:
self.jobs = 1
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q', '--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j', '--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--no-tags',
dest='no_tags', action='store_true',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchProjectList(self, opt, projects, *args, **kwargs):
"""Main function of the fetch threads when jobs are > 1.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and not opt.force_broken:
break
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
if not opt.quiet:
print('Fetching project %s' % project.name)
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=not opt.no_clone_bundle,
no_tags=opt.no_tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
prune=opt.prune)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot fetch %s' % project.name, file=sys.stderr)
if opt.force_broken:
print('warn: --force-broken, continuing to sync',
file=sys.stderr)
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)' \
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
return success
def _Fetch(self, projects, opt):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects))
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.isSet() and not opt.force_broken:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
lock=lock,
fetched=fetched,
pm=pm,
sem=sem,
err_event=err_event)
if self.jobs > 1:
t = _threading.Thread(target = self._FetchProjectList,
kwargs = kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print('\nerror: Exited sync due to fetch errors', file=sys.stderr)
sys.exit(1)
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
gc_gitdirs = {}
for project in projects:
if len(project.manifest.GetProjectsWithName(project.name)) > 1:
print('Shared project %s found, disabling pruning.' % project.name)
project.bare_git.config('--replace-all', 'gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
has_dash_c = git_require((1, 7, 2))
if multiprocessing and has_dash_c:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print('\nerror: Exited sync due to gc errors', file=sys.stderr)
sys.exit(1)
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def _DeleteProject(self, path):
print('Deleting obsolete path %s' % path, file=sys.stderr)
# Delete the .git directory first, so we're less likely to have a partially
# working git repository around. There shouldn't be any git projects here,
# so rmtree works.
try:
shutil.rmtree(os.path.join(path, '.git'))
except OSError:
print('Failed to remove %s' % os.path.join(path, '.git'), file=sys.stderr)
print('error: Failed to delete obsolete path %s' % path, file=sys.stderr)
print(' remove manually, then run sync again', file=sys.stderr)
return -1
# Delete everything under the worktree, except for directories that contain
# another git project
dirs_to_remove = []
failed = False
for root, dirs, files in os.walk(path):
for f in files:
try:
os.remove(os.path.join(root, f))
except OSError:
print('Failed to remove %s' % os.path.join(root, f), file=sys.stderr)
failed = True
dirs[:] = [d for d in dirs
if not os.path.lexists(os.path.join(root, d, '.git'))]
dirs_to_remove += [os.path.join(root, d) for d in dirs
if os.path.join(root, d) not in dirs_to_remove]
for d in reversed(dirs_to_remove):
if os.path.islink(d):
try:
os.remove(d)
except OSError:
print('Failed to remove %s' % os.path.join(root, d), file=sys.stderr)
failed = True
elif len(os.listdir(d)) == 0:
try:
os.rmdir(d)
except OSError:
print('Failed to remove %s' % os.path.join(root, d), file=sys.stderr)
failed = True
continue
if failed:
print('error: Failed to delete obsolete path %s' % path, file=sys.stderr)
print(' remove manually, then run sync again', file=sys.stderr)
return -1
# Try deleting parent dirs if they are empty
project_dir = path
while project_dir != self.manifest.topdir:
if len(os.listdir(project_dir)) == 0:
os.rmdir(project_dir)
else:
break
project_dir = os.path.dirname(project_dir)
return 0
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = gitdir,
objdir = gitdir,
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print('error: Cannot remove project "%s": uncommitted changes '
'are present' % project.relpath, file=sys.stderr)
print(' commit changes, then run sync again',
file=sys.stderr)
return -1
elif self._DeleteProject(project.worktree):
return -1
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print('error: cannot combine -n and -d', file=sys.stderr)
sys.exit(1)
if opt.network_only and opt.local_only:
print('error: cannot combine -n and -l', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print('error: cannot combine -m and -s', file=sys.stderr)
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print('error: cannot combine -m and -t', file=sys.stderr)
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print('error: -u and -p may only be combined with -s or -t',
file=sys.stderr)
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print('error: both -u and -p must be given', file=sys.stderr)
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_name = "smart_sync_override.xml"
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, smart_sync_manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if 'SYNC_TARGET' in env:
target = env['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif 'TARGET_PRODUCT' in env and 'TARGET_BUILD_VARIANT' in env:
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = smart_sync_manifest_name
try:
f = open(smart_sync_manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
else: # Not smart sync or smart tag mode
if os.path.isfile(smart_sync_manifest_path):
try:
os.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
no_tags=opt.no_tags,
optimized_fetch=opt.optimized_fetch)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self._ReloadManifest(manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[p].worktree, os.getcwd())
for p in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
#if _ONE_DAY_S <= (now - rp.LastFetch):
#to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt)
#_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt))
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
pm.end()
print(file=sys.stderr)
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print(file=sys.stderr)
print(out, file=sys.stderr)
print(err, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
try:
self._times = json.load(f)
finally:
f.close()
except (IOError, ValueError):
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'w')
try:
json.dump(self._times, f, indent=2)
finally:
f.close()
except (IOError, TypeError):
try:
os.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile()
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy })
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
| 34.665722 | 93 | 0.627223 |
053e93c3e2442360155d059f3d2e1f2e3adf96f9 | 1,200 | py | Python | molecule/default/tests/test_minio_default.py | Cloud-Temple/ansible-minio | 401e45be33428e6502b9dcf28cbc717ea410f5a5 | [
"MIT"
] | 4 | 2019-06-11T22:10:43.000Z | 2022-01-26T04:55:01.000Z | molecule/default/tests/test_minio_default.py | Cloud-Temple/ansible-minio | 401e45be33428e6502b9dcf28cbc717ea410f5a5 | [
"MIT"
] | 5 | 2020-10-29T22:01:21.000Z | 2021-03-30T15:59:27.000Z | molecule/default/tests/test_minio_default.py | Cloud-Temple/ansible-minio | 401e45be33428e6502b9dcf28cbc717ea410f5a5 | [
"MIT"
] | 5 | 2019-09-10T12:46:17.000Z | 2022-01-14T15:47:11.000Z | import os
import yaml
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.fixture()
def AnsibleDefaults():
dir_path = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dir_path, './../../../defaults/main.yml'), 'r') as stream:
return yaml.load(stream)
@pytest.mark.parametrize('minio_bin_var', [
'minio_server_bin',
'minio_client_bin',
])
def test_minio_installed(host, AnsibleDefaults, minio_bin_var):
f = host.file(AnsibleDefaults[minio_bin_var])
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
assert oct(f.mode) == '0o755'
def test_minio_server_data_directories(host, AnsibleDefaults):
for datadir in AnsibleDefaults['minio_server_datadirs']:
d = host.file(datadir)
assert d.is_directory
assert d.exists
assert d.user == AnsibleDefaults['minio_user']
assert d.group == AnsibleDefaults['minio_group']
assert oct(d.mode) == '0o750'
def test_minio_server_webserver(host):
host.socket('tcp://127.0.0.1:9091').is_listening
| 26.666667 | 85 | 0.701667 |
1507cd3ffe3d3a55f10b208149cfa96c4b69a10e | 176 | py | Python | Sakura_Infinity/__init__.py | ranyong1997/Sakura_Infinity | 700c3e8497077b266958f1d26525469d1f0cd87f | [
"MIT"
] | 1 | 2022-03-24T05:33:30.000Z | 2022-03-24T05:33:30.000Z | Sakura_Infinity/__init__.py | ranyong1997/Sakura_Infinity | 700c3e8497077b266958f1d26525469d1f0cd87f | [
"MIT"
] | null | null | null | Sakura_Infinity/__init__.py | ranyong1997/Sakura_Infinity | 700c3e8497077b266958f1d26525469d1f0cd87f | [
"MIT"
] | null | null | null | import pymysql
# from .celery import app as celery_app
# 连接mysql 排除版本错误
pymysql.version_info = (1, 4, 14, "final", 0)
pymysql.install_as_MySQLdb()
# __all__ = ('celery_app',) | 22 | 45 | 0.727273 |
306c32aa9d2bcafd22318534c7f68c2eef7eb5c9 | 168,639 | py | Python | eu.modelwriter.smtlib.texteditor/lib/z3-4.8.4/win/python/z3/z3core.py | ModelWriter/smtlib-tool | b075a8b6bf6188134a50f3884aad480d468fe558 | [
"MIT"
] | null | null | null | eu.modelwriter.smtlib.texteditor/lib/z3-4.8.4/win/python/z3/z3core.py | ModelWriter/smtlib-tool | b075a8b6bf6188134a50f3884aad480d468fe558 | [
"MIT"
] | null | null | null | eu.modelwriter.smtlib.texteditor/lib/z3-4.8.4/win/python/z3/z3core.py | ModelWriter/smtlib-tool | b075a8b6bf6188134a50f3884aad480d468fe558 | [
"MIT"
] | null | null | null |
# Automatically generated file
import sys, os
import ctypes
import pkg_resources
from .z3types import *
from .z3consts import *
_ext = 'dll' if sys.platform in ('win32', 'cygwin') else 'dylib' if sys.platform == 'darwin' else 'so'
_lib = None
_default_dirs = ['.',
os.path.dirname(os.path.abspath(__file__)),
pkg_resources.resource_filename('z3', 'lib'),
os.path.join(sys.prefix, 'lib'),
None]
_all_dirs = []
if sys.version < '3':
import __builtin__
if hasattr(__builtin__, "Z3_LIB_DIRS"):
_all_dirs = __builtin__.Z3_LIB_DIRS
else:
import builtins
if hasattr(builtins, "Z3_LIB_DIRS"):
_all_dirs = builtins.Z3_LIB_DIRS
for v in ('Z3_LIBRARY_PATH', 'PATH', 'PYTHONPATH'):
if v in os.environ:
lp = os.environ[v];
lds = lp.split(';') if sys.platform in ('win32') else lp.split(':')
_all_dirs.extend(lds)
_all_dirs.extend(_default_dirs)
_failures = []
for d in _all_dirs:
try:
d = os.path.realpath(d)
if os.path.isdir(d):
d = os.path.join(d, 'libz3.%s' % _ext)
if os.path.isfile(d):
_lib = ctypes.CDLL(d)
break
except Exception as e:
_failures += [e]
pass
if _lib is None:
# If all else failed, ask the system to find it.
try:
_lib = ctypes.CDLL('libz3.%s' % _ext)
except Exception as e:
_failures += [e]
pass
if _lib is None:
print("Could not find libz3.%s; consider adding the directory containing it to" % _ext)
print(" - your system's PATH environment variable,")
print(" - the Z3_LIBRARY_PATH environment variable, or ")
print(" - to the custom Z3_LIBRARY_DIRS Python-builtin before importing the z3 module, e.g. via")
if sys.version < '3':
print(" import __builtin__")
print(" __builtin__.Z3_LIB_DIRS = [ '/path/to/libz3.%s' ] " % _ext)
else:
print(" import builtins")
print(" builtins.Z3_LIB_DIRS = [ '/path/to/libz3.%s' ] " % _ext)
raise Z3Exception("libz3.%s not found." % _ext)
def _to_ascii(s):
if isinstance(s, str):
try:
return s.encode('ascii')
except:
# kick the bucket down the road. :-J
return s
else:
return s
if sys.version < '3':
def _to_pystr(s):
return s
else:
def _to_pystr(s):
if s != None:
enc = sys.stdout.encoding
if enc != None: return s.decode(enc)
else: return s.decode('ascii')
else:
return ""
_error_handler_type = ctypes.CFUNCTYPE(None, ctypes.c_void_p, ctypes.c_uint)
_lib.Z3_set_error_handler.restype = None
_lib.Z3_set_error_handler.argtypes = [ContextObj, _error_handler_type]
_lib.Z3_global_param_set.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
_lib.Z3_global_param_reset_all.argtypes = []
_lib.Z3_global_param_get.restype = ctypes.c_bool
_lib.Z3_global_param_get.argtypes = [ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)]
_lib.Z3_mk_config.restype = Config
_lib.Z3_mk_config.argtypes = []
_lib.Z3_del_config.argtypes = [Config]
_lib.Z3_set_param_value.argtypes = [Config, ctypes.c_char_p, ctypes.c_char_p]
_lib.Z3_mk_context.restype = ContextObj
_lib.Z3_mk_context.argtypes = [Config]
_lib.Z3_mk_context_rc.restype = ContextObj
_lib.Z3_mk_context_rc.argtypes = [Config]
_lib.Z3_del_context.argtypes = [ContextObj]
_lib.Z3_inc_ref.argtypes = [ContextObj, Ast]
_lib.Z3_dec_ref.argtypes = [ContextObj, Ast]
_lib.Z3_update_param_value.argtypes = [ContextObj, ctypes.c_char_p, ctypes.c_char_p]
_lib.Z3_interrupt.argtypes = [ContextObj]
_lib.Z3_mk_params.restype = Params
_lib.Z3_mk_params.argtypes = [ContextObj]
_lib.Z3_params_inc_ref.argtypes = [ContextObj, Params]
_lib.Z3_params_dec_ref.argtypes = [ContextObj, Params]
_lib.Z3_params_set_bool.argtypes = [ContextObj, Params, Symbol, ctypes.c_bool]
_lib.Z3_params_set_uint.argtypes = [ContextObj, Params, Symbol, ctypes.c_uint]
_lib.Z3_params_set_double.argtypes = [ContextObj, Params, Symbol, ctypes.c_double]
_lib.Z3_params_set_symbol.argtypes = [ContextObj, Params, Symbol, Symbol]
_lib.Z3_params_to_string.restype = ctypes.c_char_p
_lib.Z3_params_to_string.argtypes = [ContextObj, Params]
_lib.Z3_params_validate.argtypes = [ContextObj, Params, ParamDescrs]
_lib.Z3_param_descrs_inc_ref.argtypes = [ContextObj, ParamDescrs]
_lib.Z3_param_descrs_dec_ref.argtypes = [ContextObj, ParamDescrs]
_lib.Z3_param_descrs_get_kind.restype = ctypes.c_uint
_lib.Z3_param_descrs_get_kind.argtypes = [ContextObj, ParamDescrs, Symbol]
_lib.Z3_param_descrs_size.restype = ctypes.c_uint
_lib.Z3_param_descrs_size.argtypes = [ContextObj, ParamDescrs]
_lib.Z3_param_descrs_get_name.restype = Symbol
_lib.Z3_param_descrs_get_name.argtypes = [ContextObj, ParamDescrs, ctypes.c_uint]
_lib.Z3_param_descrs_get_documentation.restype = ctypes.c_char_p
_lib.Z3_param_descrs_get_documentation.argtypes = [ContextObj, ParamDescrs, Symbol]
_lib.Z3_param_descrs_to_string.restype = ctypes.c_char_p
_lib.Z3_param_descrs_to_string.argtypes = [ContextObj, ParamDescrs]
_lib.Z3_mk_int_symbol.restype = Symbol
_lib.Z3_mk_int_symbol.argtypes = [ContextObj, ctypes.c_int]
_lib.Z3_mk_string_symbol.restype = Symbol
_lib.Z3_mk_string_symbol.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_mk_uninterpreted_sort.restype = Sort
_lib.Z3_mk_uninterpreted_sort.argtypes = [ContextObj, Symbol]
_lib.Z3_mk_bool_sort.restype = Sort
_lib.Z3_mk_bool_sort.argtypes = [ContextObj]
_lib.Z3_mk_int_sort.restype = Sort
_lib.Z3_mk_int_sort.argtypes = [ContextObj]
_lib.Z3_mk_real_sort.restype = Sort
_lib.Z3_mk_real_sort.argtypes = [ContextObj]
_lib.Z3_mk_bv_sort.restype = Sort
_lib.Z3_mk_bv_sort.argtypes = [ContextObj, ctypes.c_uint]
_lib.Z3_mk_finite_domain_sort.restype = Sort
_lib.Z3_mk_finite_domain_sort.argtypes = [ContextObj, Symbol, ctypes.c_ulonglong]
_lib.Z3_mk_array_sort.restype = Sort
_lib.Z3_mk_array_sort.argtypes = [ContextObj, Sort, Sort]
_lib.Z3_mk_array_sort_n.restype = Sort
_lib.Z3_mk_array_sort_n.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Sort), Sort]
_lib.Z3_mk_tuple_sort.restype = Sort
_lib.Z3_mk_tuple_sort.argtypes = [ContextObj, Symbol, ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(Sort), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl)]
_lib.Z3_mk_enumeration_sort.restype = Sort
_lib.Z3_mk_enumeration_sort.argtypes = [ContextObj, Symbol, ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl)]
_lib.Z3_mk_list_sort.restype = Sort
_lib.Z3_mk_list_sort.argtypes = [ContextObj, Symbol, Sort, ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl)]
_lib.Z3_mk_constructor.restype = Constructor
_lib.Z3_mk_constructor.argtypes = [ContextObj, Symbol, Symbol, ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(Sort), ctypes.POINTER(ctypes.c_uint)]
_lib.Z3_del_constructor.argtypes = [ContextObj, Constructor]
_lib.Z3_mk_datatype.restype = Sort
_lib.Z3_mk_datatype.argtypes = [ContextObj, Symbol, ctypes.c_uint, ctypes.POINTER(Constructor)]
_lib.Z3_mk_constructor_list.restype = ConstructorList
_lib.Z3_mk_constructor_list.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Constructor)]
_lib.Z3_del_constructor_list.argtypes = [ContextObj, ConstructorList]
_lib.Z3_mk_datatypes.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(Sort), ctypes.POINTER(ConstructorList)]
_lib.Z3_query_constructor.argtypes = [ContextObj, Constructor, ctypes.c_uint, ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl), ctypes.POINTER(FuncDecl)]
_lib.Z3_mk_func_decl.restype = FuncDecl
_lib.Z3_mk_func_decl.argtypes = [ContextObj, Symbol, ctypes.c_uint, ctypes.POINTER(Sort), Sort]
_lib.Z3_mk_app.restype = Ast
_lib.Z3_mk_app.argtypes = [ContextObj, FuncDecl, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_const.restype = Ast
_lib.Z3_mk_const.argtypes = [ContextObj, Symbol, Sort]
_lib.Z3_mk_fresh_func_decl.restype = FuncDecl
_lib.Z3_mk_fresh_func_decl.argtypes = [ContextObj, ctypes.c_char_p, ctypes.c_uint, ctypes.POINTER(Sort), Sort]
_lib.Z3_mk_fresh_const.restype = Ast
_lib.Z3_mk_fresh_const.argtypes = [ContextObj, ctypes.c_char_p, Sort]
_lib.Z3_mk_rec_func_decl.restype = FuncDecl
_lib.Z3_mk_rec_func_decl.argtypes = [ContextObj, Symbol, ctypes.c_uint, ctypes.POINTER(Sort), Sort]
_lib.Z3_add_rec_def.argtypes = [ContextObj, FuncDecl, ctypes.c_uint, ctypes.POINTER(Ast), Ast]
_lib.Z3_mk_true.restype = Ast
_lib.Z3_mk_true.argtypes = [ContextObj]
_lib.Z3_mk_false.restype = Ast
_lib.Z3_mk_false.argtypes = [ContextObj]
_lib.Z3_mk_eq.restype = Ast
_lib.Z3_mk_eq.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_distinct.restype = Ast
_lib.Z3_mk_distinct.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_not.restype = Ast
_lib.Z3_mk_not.argtypes = [ContextObj, Ast]
_lib.Z3_mk_ite.restype = Ast
_lib.Z3_mk_ite.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_iff.restype = Ast
_lib.Z3_mk_iff.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_implies.restype = Ast
_lib.Z3_mk_implies.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_xor.restype = Ast
_lib.Z3_mk_xor.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_and.restype = Ast
_lib.Z3_mk_and.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_or.restype = Ast
_lib.Z3_mk_or.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_add.restype = Ast
_lib.Z3_mk_add.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_mul.restype = Ast
_lib.Z3_mk_mul.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_sub.restype = Ast
_lib.Z3_mk_sub.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_unary_minus.restype = Ast
_lib.Z3_mk_unary_minus.argtypes = [ContextObj, Ast]
_lib.Z3_mk_div.restype = Ast
_lib.Z3_mk_div.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_mod.restype = Ast
_lib.Z3_mk_mod.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_rem.restype = Ast
_lib.Z3_mk_rem.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_power.restype = Ast
_lib.Z3_mk_power.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_lt.restype = Ast
_lib.Z3_mk_lt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_le.restype = Ast
_lib.Z3_mk_le.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_gt.restype = Ast
_lib.Z3_mk_gt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_ge.restype = Ast
_lib.Z3_mk_ge.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_int2real.restype = Ast
_lib.Z3_mk_int2real.argtypes = [ContextObj, Ast]
_lib.Z3_mk_real2int.restype = Ast
_lib.Z3_mk_real2int.argtypes = [ContextObj, Ast]
_lib.Z3_mk_is_int.restype = Ast
_lib.Z3_mk_is_int.argtypes = [ContextObj, Ast]
_lib.Z3_mk_bvnot.restype = Ast
_lib.Z3_mk_bvnot.argtypes = [ContextObj, Ast]
_lib.Z3_mk_bvredand.restype = Ast
_lib.Z3_mk_bvredand.argtypes = [ContextObj, Ast]
_lib.Z3_mk_bvredor.restype = Ast
_lib.Z3_mk_bvredor.argtypes = [ContextObj, Ast]
_lib.Z3_mk_bvand.restype = Ast
_lib.Z3_mk_bvand.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvor.restype = Ast
_lib.Z3_mk_bvor.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvxor.restype = Ast
_lib.Z3_mk_bvxor.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvnand.restype = Ast
_lib.Z3_mk_bvnand.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvnor.restype = Ast
_lib.Z3_mk_bvnor.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvxnor.restype = Ast
_lib.Z3_mk_bvxnor.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvneg.restype = Ast
_lib.Z3_mk_bvneg.argtypes = [ContextObj, Ast]
_lib.Z3_mk_bvadd.restype = Ast
_lib.Z3_mk_bvadd.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsub.restype = Ast
_lib.Z3_mk_bvsub.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvmul.restype = Ast
_lib.Z3_mk_bvmul.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvudiv.restype = Ast
_lib.Z3_mk_bvudiv.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsdiv.restype = Ast
_lib.Z3_mk_bvsdiv.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvurem.restype = Ast
_lib.Z3_mk_bvurem.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsrem.restype = Ast
_lib.Z3_mk_bvsrem.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsmod.restype = Ast
_lib.Z3_mk_bvsmod.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvult.restype = Ast
_lib.Z3_mk_bvult.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvslt.restype = Ast
_lib.Z3_mk_bvslt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvule.restype = Ast
_lib.Z3_mk_bvule.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsle.restype = Ast
_lib.Z3_mk_bvsle.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvuge.restype = Ast
_lib.Z3_mk_bvuge.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsge.restype = Ast
_lib.Z3_mk_bvsge.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvugt.restype = Ast
_lib.Z3_mk_bvugt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsgt.restype = Ast
_lib.Z3_mk_bvsgt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_concat.restype = Ast
_lib.Z3_mk_concat.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_extract.restype = Ast
_lib.Z3_mk_extract.argtypes = [ContextObj, ctypes.c_uint, ctypes.c_uint, Ast]
_lib.Z3_mk_sign_ext.restype = Ast
_lib.Z3_mk_sign_ext.argtypes = [ContextObj, ctypes.c_uint, Ast]
_lib.Z3_mk_zero_ext.restype = Ast
_lib.Z3_mk_zero_ext.argtypes = [ContextObj, ctypes.c_uint, Ast]
_lib.Z3_mk_repeat.restype = Ast
_lib.Z3_mk_repeat.argtypes = [ContextObj, ctypes.c_uint, Ast]
_lib.Z3_mk_bvshl.restype = Ast
_lib.Z3_mk_bvshl.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvlshr.restype = Ast
_lib.Z3_mk_bvlshr.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvashr.restype = Ast
_lib.Z3_mk_bvashr.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_rotate_left.restype = Ast
_lib.Z3_mk_rotate_left.argtypes = [ContextObj, ctypes.c_uint, Ast]
_lib.Z3_mk_rotate_right.restype = Ast
_lib.Z3_mk_rotate_right.argtypes = [ContextObj, ctypes.c_uint, Ast]
_lib.Z3_mk_ext_rotate_left.restype = Ast
_lib.Z3_mk_ext_rotate_left.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_ext_rotate_right.restype = Ast
_lib.Z3_mk_ext_rotate_right.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_int2bv.restype = Ast
_lib.Z3_mk_int2bv.argtypes = [ContextObj, ctypes.c_uint, Ast]
_lib.Z3_mk_bv2int.restype = Ast
_lib.Z3_mk_bv2int.argtypes = [ContextObj, Ast, ctypes.c_bool]
_lib.Z3_mk_bvadd_no_overflow.restype = Ast
_lib.Z3_mk_bvadd_no_overflow.argtypes = [ContextObj, Ast, Ast, ctypes.c_bool]
_lib.Z3_mk_bvadd_no_underflow.restype = Ast
_lib.Z3_mk_bvadd_no_underflow.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsub_no_overflow.restype = Ast
_lib.Z3_mk_bvsub_no_overflow.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvsub_no_underflow.restype = Ast
_lib.Z3_mk_bvsub_no_underflow.argtypes = [ContextObj, Ast, Ast, ctypes.c_bool]
_lib.Z3_mk_bvsdiv_no_overflow.restype = Ast
_lib.Z3_mk_bvsdiv_no_overflow.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_bvneg_no_overflow.restype = Ast
_lib.Z3_mk_bvneg_no_overflow.argtypes = [ContextObj, Ast]
_lib.Z3_mk_bvmul_no_overflow.restype = Ast
_lib.Z3_mk_bvmul_no_overflow.argtypes = [ContextObj, Ast, Ast, ctypes.c_bool]
_lib.Z3_mk_bvmul_no_underflow.restype = Ast
_lib.Z3_mk_bvmul_no_underflow.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_select.restype = Ast
_lib.Z3_mk_select.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_select_n.restype = Ast
_lib.Z3_mk_select_n.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_store.restype = Ast
_lib.Z3_mk_store.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_store_n.restype = Ast
_lib.Z3_mk_store_n.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast), Ast]
_lib.Z3_mk_const_array.restype = Ast
_lib.Z3_mk_const_array.argtypes = [ContextObj, Sort, Ast]
_lib.Z3_mk_map.restype = Ast
_lib.Z3_mk_map.argtypes = [ContextObj, FuncDecl, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_array_default.restype = Ast
_lib.Z3_mk_array_default.argtypes = [ContextObj, Ast]
_lib.Z3_mk_as_array.restype = Ast
_lib.Z3_mk_as_array.argtypes = [ContextObj, FuncDecl]
_lib.Z3_mk_set_sort.restype = Sort
_lib.Z3_mk_set_sort.argtypes = [ContextObj, Sort]
_lib.Z3_mk_empty_set.restype = Ast
_lib.Z3_mk_empty_set.argtypes = [ContextObj, Sort]
_lib.Z3_mk_full_set.restype = Ast
_lib.Z3_mk_full_set.argtypes = [ContextObj, Sort]
_lib.Z3_mk_set_add.restype = Ast
_lib.Z3_mk_set_add.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_set_del.restype = Ast
_lib.Z3_mk_set_del.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_set_union.restype = Ast
_lib.Z3_mk_set_union.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_set_intersect.restype = Ast
_lib.Z3_mk_set_intersect.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_set_difference.restype = Ast
_lib.Z3_mk_set_difference.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_set_complement.restype = Ast
_lib.Z3_mk_set_complement.argtypes = [ContextObj, Ast]
_lib.Z3_mk_set_member.restype = Ast
_lib.Z3_mk_set_member.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_set_subset.restype = Ast
_lib.Z3_mk_set_subset.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_array_ext.restype = Ast
_lib.Z3_mk_array_ext.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_numeral.restype = Ast
_lib.Z3_mk_numeral.argtypes = [ContextObj, ctypes.c_char_p, Sort]
_lib.Z3_mk_real.restype = Ast
_lib.Z3_mk_real.argtypes = [ContextObj, ctypes.c_int, ctypes.c_int]
_lib.Z3_mk_int.restype = Ast
_lib.Z3_mk_int.argtypes = [ContextObj, ctypes.c_int, Sort]
_lib.Z3_mk_unsigned_int.restype = Ast
_lib.Z3_mk_unsigned_int.argtypes = [ContextObj, ctypes.c_uint, Sort]
_lib.Z3_mk_int64.restype = Ast
_lib.Z3_mk_int64.argtypes = [ContextObj, ctypes.c_longlong, Sort]
_lib.Z3_mk_unsigned_int64.restype = Ast
_lib.Z3_mk_unsigned_int64.argtypes = [ContextObj, ctypes.c_ulonglong, Sort]
_lib.Z3_mk_bv_numeral.restype = Ast
_lib.Z3_mk_bv_numeral.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(ctypes.c_bool)]
_lib.Z3_mk_seq_sort.restype = Sort
_lib.Z3_mk_seq_sort.argtypes = [ContextObj, Sort]
_lib.Z3_is_seq_sort.restype = ctypes.c_bool
_lib.Z3_is_seq_sort.argtypes = [ContextObj, Sort]
_lib.Z3_mk_re_sort.restype = Sort
_lib.Z3_mk_re_sort.argtypes = [ContextObj, Sort]
_lib.Z3_is_re_sort.restype = ctypes.c_bool
_lib.Z3_is_re_sort.argtypes = [ContextObj, Sort]
_lib.Z3_mk_string_sort.restype = Sort
_lib.Z3_mk_string_sort.argtypes = [ContextObj]
_lib.Z3_is_string_sort.restype = ctypes.c_bool
_lib.Z3_is_string_sort.argtypes = [ContextObj, Sort]
_lib.Z3_mk_string.restype = Ast
_lib.Z3_mk_string.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_is_string.restype = ctypes.c_bool
_lib.Z3_is_string.argtypes = [ContextObj, Ast]
_lib.Z3_get_string.restype = ctypes.c_char_p
_lib.Z3_get_string.argtypes = [ContextObj, Ast]
_lib.Z3_mk_seq_empty.restype = Ast
_lib.Z3_mk_seq_empty.argtypes = [ContextObj, Sort]
_lib.Z3_mk_seq_unit.restype = Ast
_lib.Z3_mk_seq_unit.argtypes = [ContextObj, Ast]
_lib.Z3_mk_seq_concat.restype = Ast
_lib.Z3_mk_seq_concat.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_seq_prefix.restype = Ast
_lib.Z3_mk_seq_prefix.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_seq_suffix.restype = Ast
_lib.Z3_mk_seq_suffix.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_seq_contains.restype = Ast
_lib.Z3_mk_seq_contains.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_seq_extract.restype = Ast
_lib.Z3_mk_seq_extract.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_seq_replace.restype = Ast
_lib.Z3_mk_seq_replace.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_seq_at.restype = Ast
_lib.Z3_mk_seq_at.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_seq_length.restype = Ast
_lib.Z3_mk_seq_length.argtypes = [ContextObj, Ast]
_lib.Z3_mk_seq_index.restype = Ast
_lib.Z3_mk_seq_index.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_str_to_int.restype = Ast
_lib.Z3_mk_str_to_int.argtypes = [ContextObj, Ast]
_lib.Z3_mk_int_to_str.restype = Ast
_lib.Z3_mk_int_to_str.argtypes = [ContextObj, Ast]
_lib.Z3_mk_seq_to_re.restype = Ast
_lib.Z3_mk_seq_to_re.argtypes = [ContextObj, Ast]
_lib.Z3_mk_seq_in_re.restype = Ast
_lib.Z3_mk_seq_in_re.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_re_plus.restype = Ast
_lib.Z3_mk_re_plus.argtypes = [ContextObj, Ast]
_lib.Z3_mk_re_star.restype = Ast
_lib.Z3_mk_re_star.argtypes = [ContextObj, Ast]
_lib.Z3_mk_re_option.restype = Ast
_lib.Z3_mk_re_option.argtypes = [ContextObj, Ast]
_lib.Z3_mk_re_union.restype = Ast
_lib.Z3_mk_re_union.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_re_concat.restype = Ast
_lib.Z3_mk_re_concat.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_re_range.restype = Ast
_lib.Z3_mk_re_range.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_re_loop.restype = Ast
_lib.Z3_mk_re_loop.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.c_uint]
_lib.Z3_mk_re_intersect.restype = Ast
_lib.Z3_mk_re_intersect.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_re_complement.restype = Ast
_lib.Z3_mk_re_complement.argtypes = [ContextObj, Ast]
_lib.Z3_mk_re_empty.restype = Ast
_lib.Z3_mk_re_empty.argtypes = [ContextObj, Sort]
_lib.Z3_mk_re_full.restype = Ast
_lib.Z3_mk_re_full.argtypes = [ContextObj, Sort]
_lib.Z3_mk_pattern.restype = Pattern
_lib.Z3_mk_pattern.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_mk_bound.restype = Ast
_lib.Z3_mk_bound.argtypes = [ContextObj, ctypes.c_uint, Sort]
_lib.Z3_mk_forall.restype = Ast
_lib.Z3_mk_forall.argtypes = [ContextObj, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(Pattern), ctypes.c_uint, ctypes.POINTER(Sort), ctypes.POINTER(Symbol), Ast]
_lib.Z3_mk_exists.restype = Ast
_lib.Z3_mk_exists.argtypes = [ContextObj, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(Pattern), ctypes.c_uint, ctypes.POINTER(Sort), ctypes.POINTER(Symbol), Ast]
_lib.Z3_mk_quantifier.restype = Ast
_lib.Z3_mk_quantifier.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(Pattern), ctypes.c_uint, ctypes.POINTER(Sort), ctypes.POINTER(Symbol), Ast]
_lib.Z3_mk_quantifier_ex.restype = Ast
_lib.Z3_mk_quantifier_ex.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_uint, Symbol, Symbol, ctypes.c_uint, ctypes.POINTER(Pattern), ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint, ctypes.POINTER(Sort), ctypes.POINTER(Symbol), Ast]
_lib.Z3_mk_forall_const.restype = Ast
_lib.Z3_mk_forall_const.argtypes = [ContextObj, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint, ctypes.POINTER(Pattern), Ast]
_lib.Z3_mk_exists_const.restype = Ast
_lib.Z3_mk_exists_const.argtypes = [ContextObj, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint, ctypes.POINTER(Pattern), Ast]
_lib.Z3_mk_quantifier_const.restype = Ast
_lib.Z3_mk_quantifier_const.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_uint, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint, ctypes.POINTER(Pattern), Ast]
_lib.Z3_mk_quantifier_const_ex.restype = Ast
_lib.Z3_mk_quantifier_const_ex.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_uint, Symbol, Symbol, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint, ctypes.POINTER(Pattern), ctypes.c_uint, ctypes.POINTER(Ast), Ast]
_lib.Z3_mk_lambda.restype = Ast
_lib.Z3_mk_lambda.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Sort), ctypes.POINTER(Symbol), Ast]
_lib.Z3_mk_lambda_const.restype = Ast
_lib.Z3_mk_lambda_const.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast), Ast]
_lib.Z3_get_symbol_kind.restype = ctypes.c_uint
_lib.Z3_get_symbol_kind.argtypes = [ContextObj, Symbol]
_lib.Z3_get_symbol_int.restype = ctypes.c_int
_lib.Z3_get_symbol_int.argtypes = [ContextObj, Symbol]
_lib.Z3_get_symbol_string.restype = ctypes.c_char_p
_lib.Z3_get_symbol_string.argtypes = [ContextObj, Symbol]
_lib.Z3_get_sort_name.restype = Symbol
_lib.Z3_get_sort_name.argtypes = [ContextObj, Sort]
_lib.Z3_get_sort_id.restype = ctypes.c_uint
_lib.Z3_get_sort_id.argtypes = [ContextObj, Sort]
_lib.Z3_sort_to_ast.restype = Ast
_lib.Z3_sort_to_ast.argtypes = [ContextObj, Sort]
_lib.Z3_is_eq_sort.restype = ctypes.c_bool
_lib.Z3_is_eq_sort.argtypes = [ContextObj, Sort, Sort]
_lib.Z3_get_sort_kind.restype = ctypes.c_uint
_lib.Z3_get_sort_kind.argtypes = [ContextObj, Sort]
_lib.Z3_get_bv_sort_size.restype = ctypes.c_uint
_lib.Z3_get_bv_sort_size.argtypes = [ContextObj, Sort]
_lib.Z3_get_finite_domain_sort_size.restype = ctypes.c_bool
_lib.Z3_get_finite_domain_sort_size.argtypes = [ContextObj, Sort, ctypes.POINTER(ctypes.c_ulonglong)]
_lib.Z3_get_array_sort_domain.restype = Sort
_lib.Z3_get_array_sort_domain.argtypes = [ContextObj, Sort]
_lib.Z3_get_array_sort_range.restype = Sort
_lib.Z3_get_array_sort_range.argtypes = [ContextObj, Sort]
_lib.Z3_get_tuple_sort_mk_decl.restype = FuncDecl
_lib.Z3_get_tuple_sort_mk_decl.argtypes = [ContextObj, Sort]
_lib.Z3_get_tuple_sort_num_fields.restype = ctypes.c_uint
_lib.Z3_get_tuple_sort_num_fields.argtypes = [ContextObj, Sort]
_lib.Z3_get_tuple_sort_field_decl.restype = FuncDecl
_lib.Z3_get_tuple_sort_field_decl.argtypes = [ContextObj, Sort, ctypes.c_uint]
_lib.Z3_get_datatype_sort_num_constructors.restype = ctypes.c_uint
_lib.Z3_get_datatype_sort_num_constructors.argtypes = [ContextObj, Sort]
_lib.Z3_get_datatype_sort_constructor.restype = FuncDecl
_lib.Z3_get_datatype_sort_constructor.argtypes = [ContextObj, Sort, ctypes.c_uint]
_lib.Z3_get_datatype_sort_recognizer.restype = FuncDecl
_lib.Z3_get_datatype_sort_recognizer.argtypes = [ContextObj, Sort, ctypes.c_uint]
_lib.Z3_get_datatype_sort_constructor_accessor.restype = FuncDecl
_lib.Z3_get_datatype_sort_constructor_accessor.argtypes = [ContextObj, Sort, ctypes.c_uint, ctypes.c_uint]
_lib.Z3_datatype_update_field.restype = Ast
_lib.Z3_datatype_update_field.argtypes = [ContextObj, FuncDecl, Ast, Ast]
_lib.Z3_get_relation_arity.restype = ctypes.c_uint
_lib.Z3_get_relation_arity.argtypes = [ContextObj, Sort]
_lib.Z3_get_relation_column.restype = Sort
_lib.Z3_get_relation_column.argtypes = [ContextObj, Sort, ctypes.c_uint]
_lib.Z3_mk_atmost.restype = Ast
_lib.Z3_mk_atmost.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint]
_lib.Z3_mk_atleast.restype = Ast
_lib.Z3_mk_atleast.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.c_uint]
_lib.Z3_mk_pble.restype = Ast
_lib.Z3_mk_pble.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.POINTER(ctypes.c_int), ctypes.c_int]
_lib.Z3_mk_pbge.restype = Ast
_lib.Z3_mk_pbge.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.POINTER(ctypes.c_int), ctypes.c_int]
_lib.Z3_mk_pbeq.restype = Ast
_lib.Z3_mk_pbeq.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.POINTER(ctypes.c_int), ctypes.c_int]
_lib.Z3_func_decl_to_ast.restype = Ast
_lib.Z3_func_decl_to_ast.argtypes = [ContextObj, FuncDecl]
_lib.Z3_is_eq_func_decl.restype = ctypes.c_bool
_lib.Z3_is_eq_func_decl.argtypes = [ContextObj, FuncDecl, FuncDecl]
_lib.Z3_get_func_decl_id.restype = ctypes.c_uint
_lib.Z3_get_func_decl_id.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_decl_name.restype = Symbol
_lib.Z3_get_decl_name.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_decl_kind.restype = ctypes.c_uint
_lib.Z3_get_decl_kind.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_domain_size.restype = ctypes.c_uint
_lib.Z3_get_domain_size.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_arity.restype = ctypes.c_uint
_lib.Z3_get_arity.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_domain.restype = Sort
_lib.Z3_get_domain.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_range.restype = Sort
_lib.Z3_get_range.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_decl_num_parameters.restype = ctypes.c_uint
_lib.Z3_get_decl_num_parameters.argtypes = [ContextObj, FuncDecl]
_lib.Z3_get_decl_parameter_kind.restype = ctypes.c_uint
_lib.Z3_get_decl_parameter_kind.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_int_parameter.restype = ctypes.c_int
_lib.Z3_get_decl_int_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_double_parameter.restype = ctypes.c_double
_lib.Z3_get_decl_double_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_symbol_parameter.restype = Symbol
_lib.Z3_get_decl_symbol_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_sort_parameter.restype = Sort
_lib.Z3_get_decl_sort_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_ast_parameter.restype = Ast
_lib.Z3_get_decl_ast_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_func_decl_parameter.restype = FuncDecl
_lib.Z3_get_decl_func_decl_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_get_decl_rational_parameter.restype = ctypes.c_char_p
_lib.Z3_get_decl_rational_parameter.argtypes = [ContextObj, FuncDecl, ctypes.c_uint]
_lib.Z3_app_to_ast.restype = Ast
_lib.Z3_app_to_ast.argtypes = [ContextObj, Ast]
_lib.Z3_get_app_decl.restype = FuncDecl
_lib.Z3_get_app_decl.argtypes = [ContextObj, Ast]
_lib.Z3_get_app_num_args.restype = ctypes.c_uint
_lib.Z3_get_app_num_args.argtypes = [ContextObj, Ast]
_lib.Z3_get_app_arg.restype = Ast
_lib.Z3_get_app_arg.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_is_eq_ast.restype = ctypes.c_bool
_lib.Z3_is_eq_ast.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_get_ast_id.restype = ctypes.c_uint
_lib.Z3_get_ast_id.argtypes = [ContextObj, Ast]
_lib.Z3_get_ast_hash.restype = ctypes.c_uint
_lib.Z3_get_ast_hash.argtypes = [ContextObj, Ast]
_lib.Z3_get_sort.restype = Sort
_lib.Z3_get_sort.argtypes = [ContextObj, Ast]
_lib.Z3_is_well_sorted.restype = ctypes.c_bool
_lib.Z3_is_well_sorted.argtypes = [ContextObj, Ast]
_lib.Z3_get_bool_value.restype = ctypes.c_int
_lib.Z3_get_bool_value.argtypes = [ContextObj, Ast]
_lib.Z3_get_ast_kind.restype = ctypes.c_uint
_lib.Z3_get_ast_kind.argtypes = [ContextObj, Ast]
_lib.Z3_is_app.restype = ctypes.c_bool
_lib.Z3_is_app.argtypes = [ContextObj, Ast]
_lib.Z3_is_numeral_ast.restype = ctypes.c_bool
_lib.Z3_is_numeral_ast.argtypes = [ContextObj, Ast]
_lib.Z3_is_algebraic_number.restype = ctypes.c_bool
_lib.Z3_is_algebraic_number.argtypes = [ContextObj, Ast]
_lib.Z3_to_app.restype = Ast
_lib.Z3_to_app.argtypes = [ContextObj, Ast]
_lib.Z3_to_func_decl.restype = FuncDecl
_lib.Z3_to_func_decl.argtypes = [ContextObj, Ast]
_lib.Z3_get_numeral_string.restype = ctypes.c_char_p
_lib.Z3_get_numeral_string.argtypes = [ContextObj, Ast]
_lib.Z3_get_numeral_decimal_string.restype = ctypes.c_char_p
_lib.Z3_get_numeral_decimal_string.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_get_numeral_double.restype = ctypes.c_double
_lib.Z3_get_numeral_double.argtypes = [ContextObj, Ast]
_lib.Z3_get_numerator.restype = Ast
_lib.Z3_get_numerator.argtypes = [ContextObj, Ast]
_lib.Z3_get_denominator.restype = Ast
_lib.Z3_get_denominator.argtypes = [ContextObj, Ast]
_lib.Z3_get_numeral_small.restype = ctypes.c_bool
_lib.Z3_get_numeral_small.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_longlong), ctypes.POINTER(ctypes.c_longlong)]
_lib.Z3_get_numeral_int.restype = ctypes.c_bool
_lib.Z3_get_numeral_int.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_int)]
_lib.Z3_get_numeral_uint.restype = ctypes.c_bool
_lib.Z3_get_numeral_uint.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_uint)]
_lib.Z3_get_numeral_uint64.restype = ctypes.c_bool
_lib.Z3_get_numeral_uint64.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_ulonglong)]
_lib.Z3_get_numeral_int64.restype = ctypes.c_bool
_lib.Z3_get_numeral_int64.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_longlong)]
_lib.Z3_get_numeral_rational_int64.restype = ctypes.c_bool
_lib.Z3_get_numeral_rational_int64.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_longlong), ctypes.POINTER(ctypes.c_longlong)]
_lib.Z3_get_algebraic_number_lower.restype = Ast
_lib.Z3_get_algebraic_number_lower.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_get_algebraic_number_upper.restype = Ast
_lib.Z3_get_algebraic_number_upper.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_pattern_to_ast.restype = Ast
_lib.Z3_pattern_to_ast.argtypes = [ContextObj, Pattern]
_lib.Z3_get_pattern_num_terms.restype = ctypes.c_uint
_lib.Z3_get_pattern_num_terms.argtypes = [ContextObj, Pattern]
_lib.Z3_get_pattern.restype = Ast
_lib.Z3_get_pattern.argtypes = [ContextObj, Pattern, ctypes.c_uint]
_lib.Z3_get_index_value.restype = ctypes.c_uint
_lib.Z3_get_index_value.argtypes = [ContextObj, Ast]
_lib.Z3_is_quantifier_forall.restype = ctypes.c_bool
_lib.Z3_is_quantifier_forall.argtypes = [ContextObj, Ast]
_lib.Z3_is_quantifier_exists.restype = ctypes.c_bool
_lib.Z3_is_quantifier_exists.argtypes = [ContextObj, Ast]
_lib.Z3_is_lambda.restype = ctypes.c_bool
_lib.Z3_is_lambda.argtypes = [ContextObj, Ast]
_lib.Z3_get_quantifier_weight.restype = ctypes.c_uint
_lib.Z3_get_quantifier_weight.argtypes = [ContextObj, Ast]
_lib.Z3_get_quantifier_num_patterns.restype = ctypes.c_uint
_lib.Z3_get_quantifier_num_patterns.argtypes = [ContextObj, Ast]
_lib.Z3_get_quantifier_pattern_ast.restype = Pattern
_lib.Z3_get_quantifier_pattern_ast.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_get_quantifier_num_no_patterns.restype = ctypes.c_uint
_lib.Z3_get_quantifier_num_no_patterns.argtypes = [ContextObj, Ast]
_lib.Z3_get_quantifier_no_pattern_ast.restype = Ast
_lib.Z3_get_quantifier_no_pattern_ast.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_get_quantifier_num_bound.restype = ctypes.c_uint
_lib.Z3_get_quantifier_num_bound.argtypes = [ContextObj, Ast]
_lib.Z3_get_quantifier_bound_name.restype = Symbol
_lib.Z3_get_quantifier_bound_name.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_get_quantifier_bound_sort.restype = Sort
_lib.Z3_get_quantifier_bound_sort.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_get_quantifier_body.restype = Ast
_lib.Z3_get_quantifier_body.argtypes = [ContextObj, Ast]
_lib.Z3_simplify.restype = Ast
_lib.Z3_simplify.argtypes = [ContextObj, Ast]
_lib.Z3_simplify_ex.restype = Ast
_lib.Z3_simplify_ex.argtypes = [ContextObj, Ast, Params]
_lib.Z3_simplify_get_help.restype = ctypes.c_char_p
_lib.Z3_simplify_get_help.argtypes = [ContextObj]
_lib.Z3_simplify_get_param_descrs.restype = ParamDescrs
_lib.Z3_simplify_get_param_descrs.argtypes = [ContextObj]
_lib.Z3_update_term.restype = Ast
_lib.Z3_update_term.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_substitute.restype = Ast
_lib.Z3_substitute.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.POINTER(Ast)]
_lib.Z3_substitute_vars.restype = Ast
_lib.Z3_substitute_vars.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_translate.restype = Ast
_lib.Z3_translate.argtypes = [ContextObj, Ast, ContextObj]
_lib.Z3_mk_model.restype = Model
_lib.Z3_mk_model.argtypes = [ContextObj]
_lib.Z3_model_inc_ref.argtypes = [ContextObj, Model]
_lib.Z3_model_dec_ref.argtypes = [ContextObj, Model]
_lib.Z3_model_eval.restype = ctypes.c_bool
_lib.Z3_model_eval.argtypes = [ContextObj, Model, Ast, ctypes.c_bool, ctypes.POINTER(Ast)]
_lib.Z3_model_get_const_interp.restype = Ast
_lib.Z3_model_get_const_interp.argtypes = [ContextObj, Model, FuncDecl]
_lib.Z3_model_has_interp.restype = ctypes.c_bool
_lib.Z3_model_has_interp.argtypes = [ContextObj, Model, FuncDecl]
_lib.Z3_model_get_func_interp.restype = FuncInterpObj
_lib.Z3_model_get_func_interp.argtypes = [ContextObj, Model, FuncDecl]
_lib.Z3_model_get_num_consts.restype = ctypes.c_uint
_lib.Z3_model_get_num_consts.argtypes = [ContextObj, Model]
_lib.Z3_model_get_const_decl.restype = FuncDecl
_lib.Z3_model_get_const_decl.argtypes = [ContextObj, Model, ctypes.c_uint]
_lib.Z3_model_get_num_funcs.restype = ctypes.c_uint
_lib.Z3_model_get_num_funcs.argtypes = [ContextObj, Model]
_lib.Z3_model_get_func_decl.restype = FuncDecl
_lib.Z3_model_get_func_decl.argtypes = [ContextObj, Model, ctypes.c_uint]
_lib.Z3_model_get_num_sorts.restype = ctypes.c_uint
_lib.Z3_model_get_num_sorts.argtypes = [ContextObj, Model]
_lib.Z3_model_get_sort.restype = Sort
_lib.Z3_model_get_sort.argtypes = [ContextObj, Model, ctypes.c_uint]
_lib.Z3_model_get_sort_universe.restype = AstVectorObj
_lib.Z3_model_get_sort_universe.argtypes = [ContextObj, Model, Sort]
_lib.Z3_model_translate.restype = Model
_lib.Z3_model_translate.argtypes = [ContextObj, Model, ContextObj]
_lib.Z3_is_as_array.restype = ctypes.c_bool
_lib.Z3_is_as_array.argtypes = [ContextObj, Ast]
_lib.Z3_get_as_array_func_decl.restype = FuncDecl
_lib.Z3_get_as_array_func_decl.argtypes = [ContextObj, Ast]
_lib.Z3_add_func_interp.restype = FuncInterpObj
_lib.Z3_add_func_interp.argtypes = [ContextObj, Model, FuncDecl, Ast]
_lib.Z3_add_const_interp.argtypes = [ContextObj, Model, FuncDecl, Ast]
_lib.Z3_func_interp_inc_ref.argtypes = [ContextObj, FuncInterpObj]
_lib.Z3_func_interp_dec_ref.argtypes = [ContextObj, FuncInterpObj]
_lib.Z3_func_interp_get_num_entries.restype = ctypes.c_uint
_lib.Z3_func_interp_get_num_entries.argtypes = [ContextObj, FuncInterpObj]
_lib.Z3_func_interp_get_entry.restype = FuncEntryObj
_lib.Z3_func_interp_get_entry.argtypes = [ContextObj, FuncInterpObj, ctypes.c_uint]
_lib.Z3_func_interp_get_else.restype = Ast
_lib.Z3_func_interp_get_else.argtypes = [ContextObj, FuncInterpObj]
_lib.Z3_func_interp_set_else.argtypes = [ContextObj, FuncInterpObj, Ast]
_lib.Z3_func_interp_get_arity.restype = ctypes.c_uint
_lib.Z3_func_interp_get_arity.argtypes = [ContextObj, FuncInterpObj]
_lib.Z3_func_interp_add_entry.argtypes = [ContextObj, FuncInterpObj, AstVectorObj, Ast]
_lib.Z3_func_entry_inc_ref.argtypes = [ContextObj, FuncEntryObj]
_lib.Z3_func_entry_dec_ref.argtypes = [ContextObj, FuncEntryObj]
_lib.Z3_func_entry_get_value.restype = Ast
_lib.Z3_func_entry_get_value.argtypes = [ContextObj, FuncEntryObj]
_lib.Z3_func_entry_get_num_args.restype = ctypes.c_uint
_lib.Z3_func_entry_get_num_args.argtypes = [ContextObj, FuncEntryObj]
_lib.Z3_func_entry_get_arg.restype = Ast
_lib.Z3_func_entry_get_arg.argtypes = [ContextObj, FuncEntryObj, ctypes.c_uint]
_lib.Z3_open_log.restype = ctypes.c_int
_lib.Z3_open_log.argtypes = [ctypes.c_char_p]
_lib.Z3_append_log.argtypes = [ctypes.c_char_p]
_lib.Z3_close_log.argtypes = []
_lib.Z3_toggle_warning_messages.argtypes = [ctypes.c_bool]
_lib.Z3_set_ast_print_mode.argtypes = [ContextObj, ctypes.c_uint]
_lib.Z3_ast_to_string.restype = ctypes.c_char_p
_lib.Z3_ast_to_string.argtypes = [ContextObj, Ast]
_lib.Z3_pattern_to_string.restype = ctypes.c_char_p
_lib.Z3_pattern_to_string.argtypes = [ContextObj, Pattern]
_lib.Z3_sort_to_string.restype = ctypes.c_char_p
_lib.Z3_sort_to_string.argtypes = [ContextObj, Sort]
_lib.Z3_func_decl_to_string.restype = ctypes.c_char_p
_lib.Z3_func_decl_to_string.argtypes = [ContextObj, FuncDecl]
_lib.Z3_model_to_string.restype = ctypes.c_char_p
_lib.Z3_model_to_string.argtypes = [ContextObj, Model]
_lib.Z3_benchmark_to_smtlib_string.restype = ctypes.c_char_p
_lib.Z3_benchmark_to_smtlib_string.argtypes = [ContextObj, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_uint, ctypes.POINTER(Ast), Ast]
_lib.Z3_parse_smtlib2_string.restype = AstVectorObj
_lib.Z3_parse_smtlib2_string.argtypes = [ContextObj, ctypes.c_char_p, ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(Sort), ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(FuncDecl)]
_lib.Z3_parse_smtlib2_file.restype = AstVectorObj
_lib.Z3_parse_smtlib2_file.argtypes = [ContextObj, ctypes.c_char_p, ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(Sort), ctypes.c_uint, ctypes.POINTER(Symbol), ctypes.POINTER(FuncDecl)]
_lib.Z3_eval_smtlib2_string.restype = ctypes.c_char_p
_lib.Z3_eval_smtlib2_string.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_get_error_code.restype = ctypes.c_uint
_lib.Z3_get_error_code.argtypes = [ContextObj]
_lib.Z3_set_error.argtypes = [ContextObj, ctypes.c_uint]
_lib.Z3_get_error_msg.restype = ctypes.c_char_p
_lib.Z3_get_error_msg.argtypes = [ContextObj, ctypes.c_uint]
_lib.Z3_get_version.argtypes = [ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint)]
_lib.Z3_get_full_version.restype = ctypes.c_char_p
_lib.Z3_get_full_version.argtypes = []
_lib.Z3_enable_trace.argtypes = [ctypes.c_char_p]
_lib.Z3_disable_trace.argtypes = [ctypes.c_char_p]
_lib.Z3_reset_memory.argtypes = []
_lib.Z3_finalize_memory.argtypes = []
_lib.Z3_mk_goal.restype = GoalObj
_lib.Z3_mk_goal.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_bool, ctypes.c_bool]
_lib.Z3_goal_inc_ref.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_dec_ref.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_precision.restype = ctypes.c_uint
_lib.Z3_goal_precision.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_assert.argtypes = [ContextObj, GoalObj, Ast]
_lib.Z3_goal_inconsistent.restype = ctypes.c_bool
_lib.Z3_goal_inconsistent.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_depth.restype = ctypes.c_uint
_lib.Z3_goal_depth.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_reset.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_size.restype = ctypes.c_uint
_lib.Z3_goal_size.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_formula.restype = Ast
_lib.Z3_goal_formula.argtypes = [ContextObj, GoalObj, ctypes.c_uint]
_lib.Z3_goal_num_exprs.restype = ctypes.c_uint
_lib.Z3_goal_num_exprs.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_is_decided_sat.restype = ctypes.c_bool
_lib.Z3_goal_is_decided_sat.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_is_decided_unsat.restype = ctypes.c_bool
_lib.Z3_goal_is_decided_unsat.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_translate.restype = GoalObj
_lib.Z3_goal_translate.argtypes = [ContextObj, GoalObj, ContextObj]
_lib.Z3_goal_convert_model.restype = Model
_lib.Z3_goal_convert_model.argtypes = [ContextObj, GoalObj, Model]
_lib.Z3_goal_to_string.restype = ctypes.c_char_p
_lib.Z3_goal_to_string.argtypes = [ContextObj, GoalObj]
_lib.Z3_goal_to_dimacs_string.restype = ctypes.c_char_p
_lib.Z3_goal_to_dimacs_string.argtypes = [ContextObj, GoalObj]
_lib.Z3_mk_tactic.restype = TacticObj
_lib.Z3_mk_tactic.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_tactic_inc_ref.argtypes = [ContextObj, TacticObj]
_lib.Z3_tactic_dec_ref.argtypes = [ContextObj, TacticObj]
_lib.Z3_mk_probe.restype = ProbeObj
_lib.Z3_mk_probe.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_probe_inc_ref.argtypes = [ContextObj, ProbeObj]
_lib.Z3_probe_dec_ref.argtypes = [ContextObj, ProbeObj]
_lib.Z3_tactic_and_then.restype = TacticObj
_lib.Z3_tactic_and_then.argtypes = [ContextObj, TacticObj, TacticObj]
_lib.Z3_tactic_or_else.restype = TacticObj
_lib.Z3_tactic_or_else.argtypes = [ContextObj, TacticObj, TacticObj]
_lib.Z3_tactic_par_or.restype = TacticObj
_lib.Z3_tactic_par_or.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(TacticObj)]
_lib.Z3_tactic_par_and_then.restype = TacticObj
_lib.Z3_tactic_par_and_then.argtypes = [ContextObj, TacticObj, TacticObj]
_lib.Z3_tactic_try_for.restype = TacticObj
_lib.Z3_tactic_try_for.argtypes = [ContextObj, TacticObj, ctypes.c_uint]
_lib.Z3_tactic_when.restype = TacticObj
_lib.Z3_tactic_when.argtypes = [ContextObj, ProbeObj, TacticObj]
_lib.Z3_tactic_cond.restype = TacticObj
_lib.Z3_tactic_cond.argtypes = [ContextObj, ProbeObj, TacticObj, TacticObj]
_lib.Z3_tactic_repeat.restype = TacticObj
_lib.Z3_tactic_repeat.argtypes = [ContextObj, TacticObj, ctypes.c_uint]
_lib.Z3_tactic_skip.restype = TacticObj
_lib.Z3_tactic_skip.argtypes = [ContextObj]
_lib.Z3_tactic_fail.restype = TacticObj
_lib.Z3_tactic_fail.argtypes = [ContextObj]
_lib.Z3_tactic_fail_if.restype = TacticObj
_lib.Z3_tactic_fail_if.argtypes = [ContextObj, ProbeObj]
_lib.Z3_tactic_fail_if_not_decided.restype = TacticObj
_lib.Z3_tactic_fail_if_not_decided.argtypes = [ContextObj]
_lib.Z3_tactic_using_params.restype = TacticObj
_lib.Z3_tactic_using_params.argtypes = [ContextObj, TacticObj, Params]
_lib.Z3_probe_const.restype = ProbeObj
_lib.Z3_probe_const.argtypes = [ContextObj, ctypes.c_double]
_lib.Z3_probe_lt.restype = ProbeObj
_lib.Z3_probe_lt.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_gt.restype = ProbeObj
_lib.Z3_probe_gt.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_le.restype = ProbeObj
_lib.Z3_probe_le.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_ge.restype = ProbeObj
_lib.Z3_probe_ge.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_eq.restype = ProbeObj
_lib.Z3_probe_eq.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_and.restype = ProbeObj
_lib.Z3_probe_and.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_or.restype = ProbeObj
_lib.Z3_probe_or.argtypes = [ContextObj, ProbeObj, ProbeObj]
_lib.Z3_probe_not.restype = ProbeObj
_lib.Z3_probe_not.argtypes = [ContextObj, ProbeObj]
_lib.Z3_get_num_tactics.restype = ctypes.c_uint
_lib.Z3_get_num_tactics.argtypes = [ContextObj]
_lib.Z3_get_tactic_name.restype = ctypes.c_char_p
_lib.Z3_get_tactic_name.argtypes = [ContextObj, ctypes.c_uint]
_lib.Z3_get_num_probes.restype = ctypes.c_uint
_lib.Z3_get_num_probes.argtypes = [ContextObj]
_lib.Z3_get_probe_name.restype = ctypes.c_char_p
_lib.Z3_get_probe_name.argtypes = [ContextObj, ctypes.c_uint]
_lib.Z3_tactic_get_help.restype = ctypes.c_char_p
_lib.Z3_tactic_get_help.argtypes = [ContextObj, TacticObj]
_lib.Z3_tactic_get_param_descrs.restype = ParamDescrs
_lib.Z3_tactic_get_param_descrs.argtypes = [ContextObj, TacticObj]
_lib.Z3_tactic_get_descr.restype = ctypes.c_char_p
_lib.Z3_tactic_get_descr.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_probe_get_descr.restype = ctypes.c_char_p
_lib.Z3_probe_get_descr.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_probe_apply.restype = ctypes.c_double
_lib.Z3_probe_apply.argtypes = [ContextObj, ProbeObj, GoalObj]
_lib.Z3_tactic_apply.restype = ApplyResultObj
_lib.Z3_tactic_apply.argtypes = [ContextObj, TacticObj, GoalObj]
_lib.Z3_tactic_apply_ex.restype = ApplyResultObj
_lib.Z3_tactic_apply_ex.argtypes = [ContextObj, TacticObj, GoalObj, Params]
_lib.Z3_apply_result_inc_ref.argtypes = [ContextObj, ApplyResultObj]
_lib.Z3_apply_result_dec_ref.argtypes = [ContextObj, ApplyResultObj]
_lib.Z3_apply_result_to_string.restype = ctypes.c_char_p
_lib.Z3_apply_result_to_string.argtypes = [ContextObj, ApplyResultObj]
_lib.Z3_apply_result_get_num_subgoals.restype = ctypes.c_uint
_lib.Z3_apply_result_get_num_subgoals.argtypes = [ContextObj, ApplyResultObj]
_lib.Z3_apply_result_get_subgoal.restype = GoalObj
_lib.Z3_apply_result_get_subgoal.argtypes = [ContextObj, ApplyResultObj, ctypes.c_uint]
_lib.Z3_mk_solver.restype = SolverObj
_lib.Z3_mk_solver.argtypes = [ContextObj]
_lib.Z3_mk_simple_solver.restype = SolverObj
_lib.Z3_mk_simple_solver.argtypes = [ContextObj]
_lib.Z3_mk_solver_for_logic.restype = SolverObj
_lib.Z3_mk_solver_for_logic.argtypes = [ContextObj, Symbol]
_lib.Z3_mk_solver_from_tactic.restype = SolverObj
_lib.Z3_mk_solver_from_tactic.argtypes = [ContextObj, TacticObj]
_lib.Z3_solver_translate.restype = SolverObj
_lib.Z3_solver_translate.argtypes = [ContextObj, SolverObj, ContextObj]
_lib.Z3_solver_import_model_converter.argtypes = [ContextObj, SolverObj, SolverObj]
_lib.Z3_solver_get_help.restype = ctypes.c_char_p
_lib.Z3_solver_get_help.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_param_descrs.restype = ParamDescrs
_lib.Z3_solver_get_param_descrs.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_set_params.argtypes = [ContextObj, SolverObj, Params]
_lib.Z3_solver_inc_ref.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_dec_ref.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_push.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_pop.argtypes = [ContextObj, SolverObj, ctypes.c_uint]
_lib.Z3_solver_reset.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_num_scopes.restype = ctypes.c_uint
_lib.Z3_solver_get_num_scopes.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_assert.argtypes = [ContextObj, SolverObj, Ast]
_lib.Z3_solver_assert_and_track.argtypes = [ContextObj, SolverObj, Ast, Ast]
_lib.Z3_solver_from_file.argtypes = [ContextObj, SolverObj, ctypes.c_char_p]
_lib.Z3_solver_from_string.argtypes = [ContextObj, SolverObj, ctypes.c_char_p]
_lib.Z3_solver_get_assertions.restype = AstVectorObj
_lib.Z3_solver_get_assertions.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_units.restype = AstVectorObj
_lib.Z3_solver_get_units.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_non_units.restype = AstVectorObj
_lib.Z3_solver_get_non_units.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_check.restype = ctypes.c_int
_lib.Z3_solver_check.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_check_assumptions.restype = ctypes.c_int
_lib.Z3_solver_check_assumptions.argtypes = [ContextObj, SolverObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_get_implied_equalities.restype = ctypes.c_int
_lib.Z3_get_implied_equalities.argtypes = [ContextObj, SolverObj, ctypes.c_uint, ctypes.POINTER(Ast), ctypes.POINTER(ctypes.c_uint)]
_lib.Z3_solver_get_consequences.restype = ctypes.c_int
_lib.Z3_solver_get_consequences.argtypes = [ContextObj, SolverObj, AstVectorObj, AstVectorObj, AstVectorObj]
_lib.Z3_solver_cube.restype = AstVectorObj
_lib.Z3_solver_cube.argtypes = [ContextObj, SolverObj, AstVectorObj, ctypes.c_uint]
_lib.Z3_solver_get_model.restype = Model
_lib.Z3_solver_get_model.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_proof.restype = Ast
_lib.Z3_solver_get_proof.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_unsat_core.restype = AstVectorObj
_lib.Z3_solver_get_unsat_core.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_reason_unknown.restype = ctypes.c_char_p
_lib.Z3_solver_get_reason_unknown.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_get_statistics.restype = StatsObj
_lib.Z3_solver_get_statistics.argtypes = [ContextObj, SolverObj]
_lib.Z3_solver_to_string.restype = ctypes.c_char_p
_lib.Z3_solver_to_string.argtypes = [ContextObj, SolverObj]
_lib.Z3_stats_to_string.restype = ctypes.c_char_p
_lib.Z3_stats_to_string.argtypes = [ContextObj, StatsObj]
_lib.Z3_stats_inc_ref.argtypes = [ContextObj, StatsObj]
_lib.Z3_stats_dec_ref.argtypes = [ContextObj, StatsObj]
_lib.Z3_stats_size.restype = ctypes.c_uint
_lib.Z3_stats_size.argtypes = [ContextObj, StatsObj]
_lib.Z3_stats_get_key.restype = ctypes.c_char_p
_lib.Z3_stats_get_key.argtypes = [ContextObj, StatsObj, ctypes.c_uint]
_lib.Z3_stats_is_uint.restype = ctypes.c_bool
_lib.Z3_stats_is_uint.argtypes = [ContextObj, StatsObj, ctypes.c_uint]
_lib.Z3_stats_is_double.restype = ctypes.c_bool
_lib.Z3_stats_is_double.argtypes = [ContextObj, StatsObj, ctypes.c_uint]
_lib.Z3_stats_get_uint_value.restype = ctypes.c_uint
_lib.Z3_stats_get_uint_value.argtypes = [ContextObj, StatsObj, ctypes.c_uint]
_lib.Z3_stats_get_double_value.restype = ctypes.c_double
_lib.Z3_stats_get_double_value.argtypes = [ContextObj, StatsObj, ctypes.c_uint]
_lib.Z3_get_estimated_alloc_size.restype = ctypes.c_ulonglong
_lib.Z3_get_estimated_alloc_size.argtypes = []
_lib.Z3_mk_ast_vector.restype = AstVectorObj
_lib.Z3_mk_ast_vector.argtypes = [ContextObj]
_lib.Z3_ast_vector_inc_ref.argtypes = [ContextObj, AstVectorObj]
_lib.Z3_ast_vector_dec_ref.argtypes = [ContextObj, AstVectorObj]
_lib.Z3_ast_vector_size.restype = ctypes.c_uint
_lib.Z3_ast_vector_size.argtypes = [ContextObj, AstVectorObj]
_lib.Z3_ast_vector_get.restype = Ast
_lib.Z3_ast_vector_get.argtypes = [ContextObj, AstVectorObj, ctypes.c_uint]
_lib.Z3_ast_vector_set.argtypes = [ContextObj, AstVectorObj, ctypes.c_uint, Ast]
_lib.Z3_ast_vector_resize.argtypes = [ContextObj, AstVectorObj, ctypes.c_uint]
_lib.Z3_ast_vector_push.argtypes = [ContextObj, AstVectorObj, Ast]
_lib.Z3_ast_vector_translate.restype = AstVectorObj
_lib.Z3_ast_vector_translate.argtypes = [ContextObj, AstVectorObj, ContextObj]
_lib.Z3_ast_vector_to_string.restype = ctypes.c_char_p
_lib.Z3_ast_vector_to_string.argtypes = [ContextObj, AstVectorObj]
_lib.Z3_mk_ast_map.restype = AstMapObj
_lib.Z3_mk_ast_map.argtypes = [ContextObj]
_lib.Z3_ast_map_inc_ref.argtypes = [ContextObj, AstMapObj]
_lib.Z3_ast_map_dec_ref.argtypes = [ContextObj, AstMapObj]
_lib.Z3_ast_map_contains.restype = ctypes.c_bool
_lib.Z3_ast_map_contains.argtypes = [ContextObj, AstMapObj, Ast]
_lib.Z3_ast_map_find.restype = Ast
_lib.Z3_ast_map_find.argtypes = [ContextObj, AstMapObj, Ast]
_lib.Z3_ast_map_insert.argtypes = [ContextObj, AstMapObj, Ast, Ast]
_lib.Z3_ast_map_erase.argtypes = [ContextObj, AstMapObj, Ast]
_lib.Z3_ast_map_reset.argtypes = [ContextObj, AstMapObj]
_lib.Z3_ast_map_size.restype = ctypes.c_uint
_lib.Z3_ast_map_size.argtypes = [ContextObj, AstMapObj]
_lib.Z3_ast_map_keys.restype = AstVectorObj
_lib.Z3_ast_map_keys.argtypes = [ContextObj, AstMapObj]
_lib.Z3_ast_map_to_string.restype = ctypes.c_char_p
_lib.Z3_ast_map_to_string.argtypes = [ContextObj, AstMapObj]
_lib.Z3_algebraic_is_value.restype = ctypes.c_bool
_lib.Z3_algebraic_is_value.argtypes = [ContextObj, Ast]
_lib.Z3_algebraic_is_pos.restype = ctypes.c_bool
_lib.Z3_algebraic_is_pos.argtypes = [ContextObj, Ast]
_lib.Z3_algebraic_is_neg.restype = ctypes.c_bool
_lib.Z3_algebraic_is_neg.argtypes = [ContextObj, Ast]
_lib.Z3_algebraic_is_zero.restype = ctypes.c_bool
_lib.Z3_algebraic_is_zero.argtypes = [ContextObj, Ast]
_lib.Z3_algebraic_sign.restype = ctypes.c_int
_lib.Z3_algebraic_sign.argtypes = [ContextObj, Ast]
_lib.Z3_algebraic_add.restype = Ast
_lib.Z3_algebraic_add.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_sub.restype = Ast
_lib.Z3_algebraic_sub.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_mul.restype = Ast
_lib.Z3_algebraic_mul.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_div.restype = Ast
_lib.Z3_algebraic_div.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_root.restype = Ast
_lib.Z3_algebraic_root.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_algebraic_power.restype = Ast
_lib.Z3_algebraic_power.argtypes = [ContextObj, Ast, ctypes.c_uint]
_lib.Z3_algebraic_lt.restype = ctypes.c_bool
_lib.Z3_algebraic_lt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_gt.restype = ctypes.c_bool
_lib.Z3_algebraic_gt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_le.restype = ctypes.c_bool
_lib.Z3_algebraic_le.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_ge.restype = ctypes.c_bool
_lib.Z3_algebraic_ge.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_eq.restype = ctypes.c_bool
_lib.Z3_algebraic_eq.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_neq.restype = ctypes.c_bool
_lib.Z3_algebraic_neq.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_algebraic_roots.restype = AstVectorObj
_lib.Z3_algebraic_roots.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_algebraic_eval.restype = ctypes.c_int
_lib.Z3_algebraic_eval.argtypes = [ContextObj, Ast, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_polynomial_subresultants.restype = AstVectorObj
_lib.Z3_polynomial_subresultants.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_rcf_del.argtypes = [ContextObj, RCFNumObj]
_lib.Z3_rcf_mk_rational.restype = RCFNumObj
_lib.Z3_rcf_mk_rational.argtypes = [ContextObj, ctypes.c_char_p]
_lib.Z3_rcf_mk_small_int.restype = RCFNumObj
_lib.Z3_rcf_mk_small_int.argtypes = [ContextObj, ctypes.c_int]
_lib.Z3_rcf_mk_pi.restype = RCFNumObj
_lib.Z3_rcf_mk_pi.argtypes = [ContextObj]
_lib.Z3_rcf_mk_e.restype = RCFNumObj
_lib.Z3_rcf_mk_e.argtypes = [ContextObj]
_lib.Z3_rcf_mk_infinitesimal.restype = RCFNumObj
_lib.Z3_rcf_mk_infinitesimal.argtypes = [ContextObj]
_lib.Z3_rcf_mk_roots.restype = ctypes.c_uint
_lib.Z3_rcf_mk_roots.argtypes = [ContextObj, ctypes.c_uint, ctypes.POINTER(RCFNumObj), ctypes.POINTER(RCFNumObj)]
_lib.Z3_rcf_add.restype = RCFNumObj
_lib.Z3_rcf_add.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_sub.restype = RCFNumObj
_lib.Z3_rcf_sub.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_mul.restype = RCFNumObj
_lib.Z3_rcf_mul.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_div.restype = RCFNumObj
_lib.Z3_rcf_div.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_neg.restype = RCFNumObj
_lib.Z3_rcf_neg.argtypes = [ContextObj, RCFNumObj]
_lib.Z3_rcf_inv.restype = RCFNumObj
_lib.Z3_rcf_inv.argtypes = [ContextObj, RCFNumObj]
_lib.Z3_rcf_power.restype = RCFNumObj
_lib.Z3_rcf_power.argtypes = [ContextObj, RCFNumObj, ctypes.c_uint]
_lib.Z3_rcf_lt.restype = ctypes.c_bool
_lib.Z3_rcf_lt.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_gt.restype = ctypes.c_bool
_lib.Z3_rcf_gt.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_le.restype = ctypes.c_bool
_lib.Z3_rcf_le.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_ge.restype = ctypes.c_bool
_lib.Z3_rcf_ge.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_eq.restype = ctypes.c_bool
_lib.Z3_rcf_eq.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_neq.restype = ctypes.c_bool
_lib.Z3_rcf_neq.argtypes = [ContextObj, RCFNumObj, RCFNumObj]
_lib.Z3_rcf_num_to_string.restype = ctypes.c_char_p
_lib.Z3_rcf_num_to_string.argtypes = [ContextObj, RCFNumObj, ctypes.c_bool, ctypes.c_bool]
_lib.Z3_rcf_num_to_decimal_string.restype = ctypes.c_char_p
_lib.Z3_rcf_num_to_decimal_string.argtypes = [ContextObj, RCFNumObj, ctypes.c_uint]
_lib.Z3_rcf_get_numerator_denominator.argtypes = [ContextObj, RCFNumObj, ctypes.POINTER(RCFNumObj), ctypes.POINTER(RCFNumObj)]
_lib.Z3_mk_fixedpoint.restype = FixedpointObj
_lib.Z3_mk_fixedpoint.argtypes = [ContextObj]
_lib.Z3_fixedpoint_inc_ref.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_dec_ref.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_add_rule.argtypes = [ContextObj, FixedpointObj, Ast, Symbol]
_lib.Z3_fixedpoint_add_fact.argtypes = [ContextObj, FixedpointObj, FuncDecl, ctypes.c_uint, ctypes.POINTER(ctypes.c_uint)]
_lib.Z3_fixedpoint_assert.argtypes = [ContextObj, FixedpointObj, Ast]
_lib.Z3_fixedpoint_query.restype = ctypes.c_int
_lib.Z3_fixedpoint_query.argtypes = [ContextObj, FixedpointObj, Ast]
_lib.Z3_fixedpoint_query_relations.restype = ctypes.c_int
_lib.Z3_fixedpoint_query_relations.argtypes = [ContextObj, FixedpointObj, ctypes.c_uint, ctypes.POINTER(FuncDecl)]
_lib.Z3_fixedpoint_get_answer.restype = Ast
_lib.Z3_fixedpoint_get_answer.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_get_reason_unknown.restype = ctypes.c_char_p
_lib.Z3_fixedpoint_get_reason_unknown.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_update_rule.argtypes = [ContextObj, FixedpointObj, Ast, Symbol]
_lib.Z3_fixedpoint_get_num_levels.restype = ctypes.c_uint
_lib.Z3_fixedpoint_get_num_levels.argtypes = [ContextObj, FixedpointObj, FuncDecl]
_lib.Z3_fixedpoint_get_cover_delta.restype = Ast
_lib.Z3_fixedpoint_get_cover_delta.argtypes = [ContextObj, FixedpointObj, ctypes.c_int, FuncDecl]
_lib.Z3_fixedpoint_add_cover.argtypes = [ContextObj, FixedpointObj, ctypes.c_int, FuncDecl, Ast]
_lib.Z3_fixedpoint_get_statistics.restype = StatsObj
_lib.Z3_fixedpoint_get_statistics.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_register_relation.argtypes = [ContextObj, FixedpointObj, FuncDecl]
_lib.Z3_fixedpoint_set_predicate_representation.argtypes = [ContextObj, FixedpointObj, FuncDecl, ctypes.c_uint, ctypes.POINTER(Symbol)]
_lib.Z3_fixedpoint_get_rules.restype = AstVectorObj
_lib.Z3_fixedpoint_get_rules.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_get_assertions.restype = AstVectorObj
_lib.Z3_fixedpoint_get_assertions.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_set_params.argtypes = [ContextObj, FixedpointObj, Params]
_lib.Z3_fixedpoint_get_help.restype = ctypes.c_char_p
_lib.Z3_fixedpoint_get_help.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_get_param_descrs.restype = ParamDescrs
_lib.Z3_fixedpoint_get_param_descrs.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_to_string.restype = ctypes.c_char_p
_lib.Z3_fixedpoint_to_string.argtypes = [ContextObj, FixedpointObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_fixedpoint_from_string.restype = AstVectorObj
_lib.Z3_fixedpoint_from_string.argtypes = [ContextObj, FixedpointObj, ctypes.c_char_p]
_lib.Z3_fixedpoint_from_file.restype = AstVectorObj
_lib.Z3_fixedpoint_from_file.argtypes = [ContextObj, FixedpointObj, ctypes.c_char_p]
_lib.Z3_fixedpoint_push.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_pop.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_mk_optimize.restype = OptimizeObj
_lib.Z3_mk_optimize.argtypes = [ContextObj]
_lib.Z3_optimize_inc_ref.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_dec_ref.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_assert.argtypes = [ContextObj, OptimizeObj, Ast]
_lib.Z3_optimize_assert_soft.restype = ctypes.c_uint
_lib.Z3_optimize_assert_soft.argtypes = [ContextObj, OptimizeObj, Ast, ctypes.c_char_p, Symbol]
_lib.Z3_optimize_maximize.restype = ctypes.c_uint
_lib.Z3_optimize_maximize.argtypes = [ContextObj, OptimizeObj, Ast]
_lib.Z3_optimize_minimize.restype = ctypes.c_uint
_lib.Z3_optimize_minimize.argtypes = [ContextObj, OptimizeObj, Ast]
_lib.Z3_optimize_push.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_pop.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_check.restype = ctypes.c_int
_lib.Z3_optimize_check.argtypes = [ContextObj, OptimizeObj, ctypes.c_uint, ctypes.POINTER(Ast)]
_lib.Z3_optimize_get_reason_unknown.restype = ctypes.c_char_p
_lib.Z3_optimize_get_reason_unknown.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_get_model.restype = Model
_lib.Z3_optimize_get_model.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_get_unsat_core.restype = AstVectorObj
_lib.Z3_optimize_get_unsat_core.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_set_params.argtypes = [ContextObj, OptimizeObj, Params]
_lib.Z3_optimize_get_param_descrs.restype = ParamDescrs
_lib.Z3_optimize_get_param_descrs.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_get_lower.restype = Ast
_lib.Z3_optimize_get_lower.argtypes = [ContextObj, OptimizeObj, ctypes.c_uint]
_lib.Z3_optimize_get_upper.restype = Ast
_lib.Z3_optimize_get_upper.argtypes = [ContextObj, OptimizeObj, ctypes.c_uint]
_lib.Z3_optimize_get_lower_as_vector.restype = AstVectorObj
_lib.Z3_optimize_get_lower_as_vector.argtypes = [ContextObj, OptimizeObj, ctypes.c_uint]
_lib.Z3_optimize_get_upper_as_vector.restype = AstVectorObj
_lib.Z3_optimize_get_upper_as_vector.argtypes = [ContextObj, OptimizeObj, ctypes.c_uint]
_lib.Z3_optimize_to_string.restype = ctypes.c_char_p
_lib.Z3_optimize_to_string.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_from_string.argtypes = [ContextObj, OptimizeObj, ctypes.c_char_p]
_lib.Z3_optimize_from_file.argtypes = [ContextObj, OptimizeObj, ctypes.c_char_p]
_lib.Z3_optimize_get_help.restype = ctypes.c_char_p
_lib.Z3_optimize_get_help.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_get_statistics.restype = StatsObj
_lib.Z3_optimize_get_statistics.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_get_assertions.restype = AstVectorObj
_lib.Z3_optimize_get_assertions.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_optimize_get_objectives.restype = AstVectorObj
_lib.Z3_optimize_get_objectives.argtypes = [ContextObj, OptimizeObj]
_lib.Z3_mk_fpa_rounding_mode_sort.restype = Sort
_lib.Z3_mk_fpa_rounding_mode_sort.argtypes = [ContextObj]
_lib.Z3_mk_fpa_round_nearest_ties_to_even.restype = Ast
_lib.Z3_mk_fpa_round_nearest_ties_to_even.argtypes = [ContextObj]
_lib.Z3_mk_fpa_rne.restype = Ast
_lib.Z3_mk_fpa_rne.argtypes = [ContextObj]
_lib.Z3_mk_fpa_round_nearest_ties_to_away.restype = Ast
_lib.Z3_mk_fpa_round_nearest_ties_to_away.argtypes = [ContextObj]
_lib.Z3_mk_fpa_rna.restype = Ast
_lib.Z3_mk_fpa_rna.argtypes = [ContextObj]
_lib.Z3_mk_fpa_round_toward_positive.restype = Ast
_lib.Z3_mk_fpa_round_toward_positive.argtypes = [ContextObj]
_lib.Z3_mk_fpa_rtp.restype = Ast
_lib.Z3_mk_fpa_rtp.argtypes = [ContextObj]
_lib.Z3_mk_fpa_round_toward_negative.restype = Ast
_lib.Z3_mk_fpa_round_toward_negative.argtypes = [ContextObj]
_lib.Z3_mk_fpa_rtn.restype = Ast
_lib.Z3_mk_fpa_rtn.argtypes = [ContextObj]
_lib.Z3_mk_fpa_round_toward_zero.restype = Ast
_lib.Z3_mk_fpa_round_toward_zero.argtypes = [ContextObj]
_lib.Z3_mk_fpa_rtz.restype = Ast
_lib.Z3_mk_fpa_rtz.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort.restype = Sort
_lib.Z3_mk_fpa_sort.argtypes = [ContextObj, ctypes.c_uint, ctypes.c_uint]
_lib.Z3_mk_fpa_sort_half.restype = Sort
_lib.Z3_mk_fpa_sort_half.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_16.restype = Sort
_lib.Z3_mk_fpa_sort_16.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_single.restype = Sort
_lib.Z3_mk_fpa_sort_single.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_32.restype = Sort
_lib.Z3_mk_fpa_sort_32.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_double.restype = Sort
_lib.Z3_mk_fpa_sort_double.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_64.restype = Sort
_lib.Z3_mk_fpa_sort_64.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_quadruple.restype = Sort
_lib.Z3_mk_fpa_sort_quadruple.argtypes = [ContextObj]
_lib.Z3_mk_fpa_sort_128.restype = Sort
_lib.Z3_mk_fpa_sort_128.argtypes = [ContextObj]
_lib.Z3_mk_fpa_nan.restype = Ast
_lib.Z3_mk_fpa_nan.argtypes = [ContextObj, Sort]
_lib.Z3_mk_fpa_inf.restype = Ast
_lib.Z3_mk_fpa_inf.argtypes = [ContextObj, Sort, ctypes.c_bool]
_lib.Z3_mk_fpa_zero.restype = Ast
_lib.Z3_mk_fpa_zero.argtypes = [ContextObj, Sort, ctypes.c_bool]
_lib.Z3_mk_fpa_fp.restype = Ast
_lib.Z3_mk_fpa_fp.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_fpa_numeral_float.restype = Ast
_lib.Z3_mk_fpa_numeral_float.argtypes = [ContextObj, ctypes.c_float, Sort]
_lib.Z3_mk_fpa_numeral_double.restype = Ast
_lib.Z3_mk_fpa_numeral_double.argtypes = [ContextObj, ctypes.c_double, Sort]
_lib.Z3_mk_fpa_numeral_int.restype = Ast
_lib.Z3_mk_fpa_numeral_int.argtypes = [ContextObj, ctypes.c_int, Sort]
_lib.Z3_mk_fpa_numeral_int_uint.restype = Ast
_lib.Z3_mk_fpa_numeral_int_uint.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_int, ctypes.c_uint, Sort]
_lib.Z3_mk_fpa_numeral_int64_uint64.restype = Ast
_lib.Z3_mk_fpa_numeral_int64_uint64.argtypes = [ContextObj, ctypes.c_bool, ctypes.c_longlong, ctypes.c_ulonglong, Sort]
_lib.Z3_mk_fpa_abs.restype = Ast
_lib.Z3_mk_fpa_abs.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_neg.restype = Ast
_lib.Z3_mk_fpa_neg.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_add.restype = Ast
_lib.Z3_mk_fpa_add.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_fpa_sub.restype = Ast
_lib.Z3_mk_fpa_sub.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_fpa_mul.restype = Ast
_lib.Z3_mk_fpa_mul.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_fpa_div.restype = Ast
_lib.Z3_mk_fpa_div.argtypes = [ContextObj, Ast, Ast, Ast]
_lib.Z3_mk_fpa_fma.restype = Ast
_lib.Z3_mk_fpa_fma.argtypes = [ContextObj, Ast, Ast, Ast, Ast]
_lib.Z3_mk_fpa_sqrt.restype = Ast
_lib.Z3_mk_fpa_sqrt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_rem.restype = Ast
_lib.Z3_mk_fpa_rem.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_round_to_integral.restype = Ast
_lib.Z3_mk_fpa_round_to_integral.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_min.restype = Ast
_lib.Z3_mk_fpa_min.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_max.restype = Ast
_lib.Z3_mk_fpa_max.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_leq.restype = Ast
_lib.Z3_mk_fpa_leq.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_lt.restype = Ast
_lib.Z3_mk_fpa_lt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_geq.restype = Ast
_lib.Z3_mk_fpa_geq.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_gt.restype = Ast
_lib.Z3_mk_fpa_gt.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_eq.restype = Ast
_lib.Z3_mk_fpa_eq.argtypes = [ContextObj, Ast, Ast]
_lib.Z3_mk_fpa_is_normal.restype = Ast
_lib.Z3_mk_fpa_is_normal.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_is_subnormal.restype = Ast
_lib.Z3_mk_fpa_is_subnormal.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_is_zero.restype = Ast
_lib.Z3_mk_fpa_is_zero.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_is_infinite.restype = Ast
_lib.Z3_mk_fpa_is_infinite.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_is_nan.restype = Ast
_lib.Z3_mk_fpa_is_nan.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_is_negative.restype = Ast
_lib.Z3_mk_fpa_is_negative.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_is_positive.restype = Ast
_lib.Z3_mk_fpa_is_positive.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_to_fp_bv.restype = Ast
_lib.Z3_mk_fpa_to_fp_bv.argtypes = [ContextObj, Ast, Sort]
_lib.Z3_mk_fpa_to_fp_float.restype = Ast
_lib.Z3_mk_fpa_to_fp_float.argtypes = [ContextObj, Ast, Ast, Sort]
_lib.Z3_mk_fpa_to_fp_real.restype = Ast
_lib.Z3_mk_fpa_to_fp_real.argtypes = [ContextObj, Ast, Ast, Sort]
_lib.Z3_mk_fpa_to_fp_signed.restype = Ast
_lib.Z3_mk_fpa_to_fp_signed.argtypes = [ContextObj, Ast, Ast, Sort]
_lib.Z3_mk_fpa_to_fp_unsigned.restype = Ast
_lib.Z3_mk_fpa_to_fp_unsigned.argtypes = [ContextObj, Ast, Ast, Sort]
_lib.Z3_mk_fpa_to_ubv.restype = Ast
_lib.Z3_mk_fpa_to_ubv.argtypes = [ContextObj, Ast, Ast, ctypes.c_uint]
_lib.Z3_mk_fpa_to_sbv.restype = Ast
_lib.Z3_mk_fpa_to_sbv.argtypes = [ContextObj, Ast, Ast, ctypes.c_uint]
_lib.Z3_mk_fpa_to_real.restype = Ast
_lib.Z3_mk_fpa_to_real.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_get_ebits.restype = ctypes.c_uint
_lib.Z3_fpa_get_ebits.argtypes = [ContextObj, Sort]
_lib.Z3_fpa_get_sbits.restype = ctypes.c_uint
_lib.Z3_fpa_get_sbits.argtypes = [ContextObj, Sort]
_lib.Z3_fpa_is_numeral_nan.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_nan.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_is_numeral_inf.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_inf.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_is_numeral_zero.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_zero.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_is_numeral_normal.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_normal.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_is_numeral_subnormal.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_subnormal.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_is_numeral_positive.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_positive.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_is_numeral_negative.restype = ctypes.c_bool
_lib.Z3_fpa_is_numeral_negative.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_get_numeral_sign_bv.restype = Ast
_lib.Z3_fpa_get_numeral_sign_bv.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_get_numeral_significand_bv.restype = Ast
_lib.Z3_fpa_get_numeral_significand_bv.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_get_numeral_sign.restype = ctypes.c_bool
_lib.Z3_fpa_get_numeral_sign.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_int)]
_lib.Z3_fpa_get_numeral_significand_string.restype = ctypes.c_char_p
_lib.Z3_fpa_get_numeral_significand_string.argtypes = [ContextObj, Ast]
_lib.Z3_fpa_get_numeral_significand_uint64.restype = ctypes.c_bool
_lib.Z3_fpa_get_numeral_significand_uint64.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_ulonglong)]
_lib.Z3_fpa_get_numeral_exponent_string.restype = ctypes.c_char_p
_lib.Z3_fpa_get_numeral_exponent_string.argtypes = [ContextObj, Ast, ctypes.c_bool]
_lib.Z3_fpa_get_numeral_exponent_int64.restype = ctypes.c_bool
_lib.Z3_fpa_get_numeral_exponent_int64.argtypes = [ContextObj, Ast, ctypes.POINTER(ctypes.c_longlong), ctypes.c_bool]
_lib.Z3_fpa_get_numeral_exponent_bv.restype = Ast
_lib.Z3_fpa_get_numeral_exponent_bv.argtypes = [ContextObj, Ast, ctypes.c_bool]
_lib.Z3_mk_fpa_to_ieee_bv.restype = Ast
_lib.Z3_mk_fpa_to_ieee_bv.argtypes = [ContextObj, Ast]
_lib.Z3_mk_fpa_to_fp_int_real.restype = Ast
_lib.Z3_mk_fpa_to_fp_int_real.argtypes = [ContextObj, Ast, Ast, Ast, Sort]
_lib.Z3_fixedpoint_query_from_lvl.restype = ctypes.c_int
_lib.Z3_fixedpoint_query_from_lvl.argtypes = [ContextObj, FixedpointObj, Ast, ctypes.c_uint]
_lib.Z3_fixedpoint_get_ground_sat_answer.restype = Ast
_lib.Z3_fixedpoint_get_ground_sat_answer.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_get_rules_along_trace.restype = AstVectorObj
_lib.Z3_fixedpoint_get_rules_along_trace.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_get_rule_names_along_trace.restype = Symbol
_lib.Z3_fixedpoint_get_rule_names_along_trace.argtypes = [ContextObj, FixedpointObj]
_lib.Z3_fixedpoint_add_invariant.argtypes = [ContextObj, FixedpointObj, FuncDecl, Ast]
_lib.Z3_fixedpoint_get_reachable.restype = Ast
_lib.Z3_fixedpoint_get_reachable.argtypes = [ContextObj, FixedpointObj, FuncDecl]
_lib.Z3_qe_model_project.restype = Ast
_lib.Z3_qe_model_project.argtypes = [ContextObj, Model, ctypes.c_uint, ctypes.POINTER(Ast), Ast]
_lib.Z3_qe_model_project_skolem.restype = Ast
_lib.Z3_qe_model_project_skolem.argtypes = [ContextObj, Model, ctypes.c_uint, ctypes.POINTER(Ast), Ast, AstMapObj]
_lib.Z3_model_extrapolate.restype = Ast
_lib.Z3_model_extrapolate.argtypes = [ContextObj, Model, Ast]
_lib.Z3_qe_lite.restype = Ast
_lib.Z3_qe_lite.argtypes = [ContextObj, AstVectorObj, Ast]
class Elementaries:
def __init__(self, f):
self.f = f
self.get_error_code = _lib.Z3_get_error_code
self.get_error_message = _lib.Z3_get_error_msg
self.OK = Z3_OK
self.Exception = Z3Exception
def Check(self, ctx):
err = self.get_error_code(ctx)
if err != self.OK:
raise self.Exception(self.get_error_message(ctx, err))
def Z3_set_error_handler(ctx, hndlr, _elems=Elementaries(_lib.Z3_set_error_handler)):
ceh = _error_handler_type(hndlr)
_elems.f(ctx, ceh)
_elems.Check(ctx)
return ceh
def Z3_global_param_set(a0, a1, _elems=Elementaries(_lib.Z3_global_param_set)):
_elems.f(_to_ascii(a0), _to_ascii(a1))
def Z3_global_param_reset_all(_elems=Elementaries(_lib.Z3_global_param_reset_all)):
_elems.f()
def Z3_global_param_get(a0, a1, _elems=Elementaries(_lib.Z3_global_param_get)):
r = _elems.f(_to_ascii(a0), _to_ascii(a1))
return r
def Z3_mk_config(_elems=Elementaries(_lib.Z3_mk_config)):
r = _elems.f()
return r
def Z3_del_config(a0, _elems=Elementaries(_lib.Z3_del_config)):
_elems.f(a0)
def Z3_set_param_value(a0, a1, a2, _elems=Elementaries(_lib.Z3_set_param_value)):
_elems.f(a0, _to_ascii(a1), _to_ascii(a2))
def Z3_mk_context(a0, _elems=Elementaries(_lib.Z3_mk_context)):
r = _elems.f(a0)
return r
def Z3_mk_context_rc(a0, _elems=Elementaries(_lib.Z3_mk_context_rc)):
r = _elems.f(a0)
return r
def Z3_del_context(a0, _elems=Elementaries(_lib.Z3_del_context)):
_elems.f(a0)
def Z3_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_update_param_value(a0, a1, a2, _elems=Elementaries(_lib.Z3_update_param_value)):
_elems.f(a0, _to_ascii(a1), _to_ascii(a2))
_elems.Check(a0)
def Z3_interrupt(a0, _elems=Elementaries(_lib.Z3_interrupt)):
_elems.f(a0)
_elems.Check(a0)
def Z3_mk_params(a0, _elems=Elementaries(_lib.Z3_mk_params)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_params_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_params_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_params_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_params_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_params_set_bool(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_params_set_bool)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_params_set_uint(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_params_set_uint)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_params_set_double(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_params_set_double)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_params_set_symbol(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_params_set_symbol)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_params_to_string(a0, a1, _elems=Elementaries(_lib.Z3_params_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_params_validate(a0, a1, a2, _elems=Elementaries(_lib.Z3_params_validate)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_param_descrs_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_param_descrs_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_param_descrs_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_param_descrs_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_param_descrs_get_kind(a0, a1, a2, _elems=Elementaries(_lib.Z3_param_descrs_get_kind)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_param_descrs_size(a0, a1, _elems=Elementaries(_lib.Z3_param_descrs_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_param_descrs_get_name(a0, a1, a2, _elems=Elementaries(_lib.Z3_param_descrs_get_name)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_param_descrs_get_documentation(a0, a1, a2, _elems=Elementaries(_lib.Z3_param_descrs_get_documentation)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return _to_pystr(r)
def Z3_param_descrs_to_string(a0, a1, _elems=Elementaries(_lib.Z3_param_descrs_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_mk_int_symbol(a0, a1, _elems=Elementaries(_lib.Z3_mk_int_symbol)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_string_symbol(a0, a1, _elems=Elementaries(_lib.Z3_mk_string_symbol)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return r
def Z3_mk_uninterpreted_sort(a0, a1, _elems=Elementaries(_lib.Z3_mk_uninterpreted_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bool_sort(a0, _elems=Elementaries(_lib.Z3_mk_bool_sort)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_int_sort(a0, _elems=Elementaries(_lib.Z3_mk_int_sort)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_real_sort(a0, _elems=Elementaries(_lib.Z3_mk_real_sort)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_bv_sort(a0, a1, _elems=Elementaries(_lib.Z3_mk_bv_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_finite_domain_sort(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_finite_domain_sort)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_array_sort(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_array_sort)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_array_sort_n(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_array_sort_n)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_tuple_sort(a0, a1, a2, a3, a4, a5, a6, _elems=Elementaries(_lib.Z3_mk_tuple_sort)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6)
_elems.Check(a0)
return r
def Z3_mk_enumeration_sort(a0, a1, a2, a3, a4, a5, _elems=Elementaries(_lib.Z3_mk_enumeration_sort)):
r = _elems.f(a0, a1, a2, a3, a4, a5)
_elems.Check(a0)
return r
def Z3_mk_list_sort(a0, a1, a2, a3, a4, a5, a6, a7, a8, _elems=Elementaries(_lib.Z3_mk_list_sort)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7, a8)
_elems.Check(a0)
return r
def Z3_mk_constructor(a0, a1, a2, a3, a4, a5, a6, _elems=Elementaries(_lib.Z3_mk_constructor)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6)
_elems.Check(a0)
return r
def Z3_del_constructor(a0, a1, _elems=Elementaries(_lib.Z3_del_constructor)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_mk_datatype(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_datatype)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_constructor_list(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_constructor_list)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_del_constructor_list(a0, a1, _elems=Elementaries(_lib.Z3_del_constructor_list)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_mk_datatypes(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_datatypes)):
_elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
def Z3_query_constructor(a0, a1, a2, a3, a4, a5, _elems=Elementaries(_lib.Z3_query_constructor)):
_elems.f(a0, a1, a2, a3, a4, a5)
_elems.Check(a0)
def Z3_mk_func_decl(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_func_decl)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_app(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_app)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_const(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_const)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fresh_func_decl(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_fresh_func_decl)):
r = _elems.f(a0, _to_ascii(a1), a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_fresh_const(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fresh_const)):
r = _elems.f(a0, _to_ascii(a1), a2)
_elems.Check(a0)
return r
def Z3_mk_rec_func_decl(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_rec_func_decl)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_add_rec_def(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_add_rec_def)):
_elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
def Z3_mk_true(a0, _elems=Elementaries(_lib.Z3_mk_true)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_false(a0, _elems=Elementaries(_lib.Z3_mk_false)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_eq(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_eq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_distinct(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_distinct)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_not(a0, a1, _elems=Elementaries(_lib.Z3_mk_not)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_ite(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_ite)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_iff(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_iff)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_implies(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_implies)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_xor(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_xor)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_and(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_and)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_or(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_or)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_add(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_add)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_mul(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_mul)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_sub(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_sub)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_unary_minus(a0, a1, _elems=Elementaries(_lib.Z3_mk_unary_minus)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_div(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_div)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_mod(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_mod)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_rem(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_rem)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_power(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_power)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_lt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_lt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_le(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_le)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_gt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_gt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_ge(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_ge)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_int2real(a0, a1, _elems=Elementaries(_lib.Z3_mk_int2real)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_real2int(a0, a1, _elems=Elementaries(_lib.Z3_mk_real2int)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_is_int(a0, a1, _elems=Elementaries(_lib.Z3_mk_is_int)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bvnot(a0, a1, _elems=Elementaries(_lib.Z3_mk_bvnot)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bvredand(a0, a1, _elems=Elementaries(_lib.Z3_mk_bvredand)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bvredor(a0, a1, _elems=Elementaries(_lib.Z3_mk_bvredor)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bvand(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvand)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvor(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvor)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvxor(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvxor)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvnand(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvnand)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvnor(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvnor)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvxnor(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvxnor)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvneg(a0, a1, _elems=Elementaries(_lib.Z3_mk_bvneg)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bvadd(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvadd)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsub(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsub)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvmul(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvmul)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvudiv(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvudiv)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsdiv(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsdiv)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvurem(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvurem)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsrem(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsrem)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsmod(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsmod)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvult(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvult)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvslt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvslt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvule(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvule)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsle(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsle)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvuge(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvuge)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsge(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsge)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvugt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvugt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsgt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsgt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_concat(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_concat)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_extract(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_extract)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_sign_ext(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_sign_ext)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_zero_ext(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_zero_ext)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_repeat(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_repeat)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvshl(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvshl)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvlshr(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvlshr)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvashr(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvashr)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_rotate_left(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_rotate_left)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_rotate_right(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_rotate_right)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_ext_rotate_left(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_ext_rotate_left)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_ext_rotate_right(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_ext_rotate_right)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_int2bv(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_int2bv)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bv2int(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bv2int)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvadd_no_overflow(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_bvadd_no_overflow)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_bvadd_no_underflow(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvadd_no_underflow)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsub_no_overflow(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsub_no_overflow)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvsub_no_underflow(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_bvsub_no_underflow)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_bvsdiv_no_overflow(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvsdiv_no_overflow)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bvneg_no_overflow(a0, a1, _elems=Elementaries(_lib.Z3_mk_bvneg_no_overflow)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_bvmul_no_overflow(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_bvmul_no_overflow)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_bvmul_no_underflow(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bvmul_no_underflow)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_select(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_select)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_select_n(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_select_n)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_store(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_store)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_store_n(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_store_n)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_const_array(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_const_array)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_map(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_map)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_array_default(a0, a1, _elems=Elementaries(_lib.Z3_mk_array_default)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_as_array(a0, a1, _elems=Elementaries(_lib.Z3_mk_as_array)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_set_sort(a0, a1, _elems=Elementaries(_lib.Z3_mk_set_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_empty_set(a0, a1, _elems=Elementaries(_lib.Z3_mk_empty_set)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_full_set(a0, a1, _elems=Elementaries(_lib.Z3_mk_full_set)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_set_add(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_add)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_set_del(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_del)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_set_union(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_union)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_set_intersect(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_intersect)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_set_difference(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_difference)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_set_complement(a0, a1, _elems=Elementaries(_lib.Z3_mk_set_complement)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_set_member(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_member)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_set_subset(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_set_subset)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_array_ext(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_array_ext)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_numeral(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_numeral)):
r = _elems.f(a0, _to_ascii(a1), a2)
_elems.Check(a0)
return r
def Z3_mk_real(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_real)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_int(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_int)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_unsigned_int(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_unsigned_int)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_int64(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_int64)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_unsigned_int64(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_unsigned_int64)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bv_numeral(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bv_numeral)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_seq_sort(a0, a1, _elems=Elementaries(_lib.Z3_mk_seq_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_seq_sort(a0, a1, _elems=Elementaries(_lib.Z3_is_seq_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_re_sort(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_re_sort(a0, a1, _elems=Elementaries(_lib.Z3_is_re_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_string_sort(a0, _elems=Elementaries(_lib.Z3_mk_string_sort)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_is_string_sort(a0, a1, _elems=Elementaries(_lib.Z3_is_string_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_string(a0, a1, _elems=Elementaries(_lib.Z3_mk_string)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return r
def Z3_is_string(a0, a1, _elems=Elementaries(_lib.Z3_is_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_string(a0, a1, _elems=Elementaries(_lib.Z3_get_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_mk_seq_empty(a0, a1, _elems=Elementaries(_lib.Z3_mk_seq_empty)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_seq_unit(a0, a1, _elems=Elementaries(_lib.Z3_mk_seq_unit)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_seq_concat(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_seq_concat)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_seq_prefix(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_seq_prefix)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_seq_suffix(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_seq_suffix)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_seq_contains(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_seq_contains)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_seq_extract(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_seq_extract)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_seq_replace(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_seq_replace)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_seq_at(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_seq_at)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_seq_length(a0, a1, _elems=Elementaries(_lib.Z3_mk_seq_length)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_seq_index(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_seq_index)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_str_to_int(a0, a1, _elems=Elementaries(_lib.Z3_mk_str_to_int)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_int_to_str(a0, a1, _elems=Elementaries(_lib.Z3_mk_int_to_str)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_seq_to_re(a0, a1, _elems=Elementaries(_lib.Z3_mk_seq_to_re)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_seq_in_re(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_seq_in_re)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_re_plus(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_plus)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_re_star(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_star)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_re_option(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_option)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_re_union(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_re_union)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_re_concat(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_re_concat)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_re_range(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_re_range)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_re_loop(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_re_loop)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_re_intersect(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_re_intersect)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_re_complement(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_complement)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_re_empty(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_empty)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_re_full(a0, a1, _elems=Elementaries(_lib.Z3_mk_re_full)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_pattern(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_pattern)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_bound(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_bound)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_forall(a0, a1, a2, a3, a4, a5, a6, a7, _elems=Elementaries(_lib.Z3_mk_forall)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7)
_elems.Check(a0)
return r
def Z3_mk_exists(a0, a1, a2, a3, a4, a5, a6, a7, _elems=Elementaries(_lib.Z3_mk_exists)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7)
_elems.Check(a0)
return r
def Z3_mk_quantifier(a0, a1, a2, a3, a4, a5, a6, a7, a8, _elems=Elementaries(_lib.Z3_mk_quantifier)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7, a8)
_elems.Check(a0)
return r
def Z3_mk_quantifier_ex(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, _elems=Elementaries(_lib.Z3_mk_quantifier_ex)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12)
_elems.Check(a0)
return r
def Z3_mk_forall_const(a0, a1, a2, a3, a4, a5, a6, _elems=Elementaries(_lib.Z3_mk_forall_const)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6)
_elems.Check(a0)
return r
def Z3_mk_exists_const(a0, a1, a2, a3, a4, a5, a6, _elems=Elementaries(_lib.Z3_mk_exists_const)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6)
_elems.Check(a0)
return r
def Z3_mk_quantifier_const(a0, a1, a2, a3, a4, a5, a6, a7, _elems=Elementaries(_lib.Z3_mk_quantifier_const)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7)
_elems.Check(a0)
return r
def Z3_mk_quantifier_const_ex(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, _elems=Elementaries(_lib.Z3_mk_quantifier_const_ex)):
r = _elems.f(a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11)
_elems.Check(a0)
return r
def Z3_mk_lambda(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_lambda)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_lambda_const(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_lambda_const)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_get_symbol_kind(a0, a1, _elems=Elementaries(_lib.Z3_get_symbol_kind)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_symbol_int(a0, a1, _elems=Elementaries(_lib.Z3_get_symbol_int)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_symbol_string(a0, a1, _elems=Elementaries(_lib.Z3_get_symbol_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_get_sort_name(a0, a1, _elems=Elementaries(_lib.Z3_get_sort_name)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_sort_id(a0, a1, _elems=Elementaries(_lib.Z3_get_sort_id)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_sort_to_ast(a0, a1, _elems=Elementaries(_lib.Z3_sort_to_ast)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_eq_sort(a0, a1, a2, _elems=Elementaries(_lib.Z3_is_eq_sort)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_sort_kind(a0, a1, _elems=Elementaries(_lib.Z3_get_sort_kind)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_bv_sort_size(a0, a1, _elems=Elementaries(_lib.Z3_get_bv_sort_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_finite_domain_sort_size(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_finite_domain_sort_size)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_array_sort_domain(a0, a1, _elems=Elementaries(_lib.Z3_get_array_sort_domain)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_array_sort_range(a0, a1, _elems=Elementaries(_lib.Z3_get_array_sort_range)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_tuple_sort_mk_decl(a0, a1, _elems=Elementaries(_lib.Z3_get_tuple_sort_mk_decl)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_tuple_sort_num_fields(a0, a1, _elems=Elementaries(_lib.Z3_get_tuple_sort_num_fields)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_tuple_sort_field_decl(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_tuple_sort_field_decl)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_datatype_sort_num_constructors(a0, a1, _elems=Elementaries(_lib.Z3_get_datatype_sort_num_constructors)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_datatype_sort_constructor(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_datatype_sort_constructor)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_datatype_sort_recognizer(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_datatype_sort_recognizer)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_datatype_sort_constructor_accessor(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_get_datatype_sort_constructor_accessor)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_datatype_update_field(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_datatype_update_field)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_get_relation_arity(a0, a1, _elems=Elementaries(_lib.Z3_get_relation_arity)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_relation_column(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_relation_column)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_atmost(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_atmost)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_atleast(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_atleast)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_pble(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_pble)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_pbge(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_pbge)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_pbeq(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_pbeq)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_func_decl_to_ast(a0, a1, _elems=Elementaries(_lib.Z3_func_decl_to_ast)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_eq_func_decl(a0, a1, a2, _elems=Elementaries(_lib.Z3_is_eq_func_decl)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_func_decl_id(a0, a1, _elems=Elementaries(_lib.Z3_get_func_decl_id)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_decl_name(a0, a1, _elems=Elementaries(_lib.Z3_get_decl_name)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_decl_kind(a0, a1, _elems=Elementaries(_lib.Z3_get_decl_kind)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_domain_size(a0, a1, _elems=Elementaries(_lib.Z3_get_domain_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_arity(a0, a1, _elems=Elementaries(_lib.Z3_get_arity)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_domain(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_domain)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_range(a0, a1, _elems=Elementaries(_lib.Z3_get_range)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_decl_num_parameters(a0, a1, _elems=Elementaries(_lib.Z3_get_decl_num_parameters)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_decl_parameter_kind(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_parameter_kind)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_int_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_int_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_double_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_double_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_symbol_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_symbol_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_sort_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_sort_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_ast_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_ast_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_func_decl_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_func_decl_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_decl_rational_parameter(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_decl_rational_parameter)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return _to_pystr(r)
def Z3_app_to_ast(a0, a1, _elems=Elementaries(_lib.Z3_app_to_ast)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_app_decl(a0, a1, _elems=Elementaries(_lib.Z3_get_app_decl)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_app_num_args(a0, a1, _elems=Elementaries(_lib.Z3_get_app_num_args)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_app_arg(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_app_arg)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_is_eq_ast(a0, a1, a2, _elems=Elementaries(_lib.Z3_is_eq_ast)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_ast_id(a0, a1, _elems=Elementaries(_lib.Z3_get_ast_id)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_ast_hash(a0, a1, _elems=Elementaries(_lib.Z3_get_ast_hash)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_sort(a0, a1, _elems=Elementaries(_lib.Z3_get_sort)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_well_sorted(a0, a1, _elems=Elementaries(_lib.Z3_is_well_sorted)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_bool_value(a0, a1, _elems=Elementaries(_lib.Z3_get_bool_value)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_ast_kind(a0, a1, _elems=Elementaries(_lib.Z3_get_ast_kind)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_app(a0, a1, _elems=Elementaries(_lib.Z3_is_app)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_numeral_ast(a0, a1, _elems=Elementaries(_lib.Z3_is_numeral_ast)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_algebraic_number(a0, a1, _elems=Elementaries(_lib.Z3_is_algebraic_number)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_to_app(a0, a1, _elems=Elementaries(_lib.Z3_to_app)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_to_func_decl(a0, a1, _elems=Elementaries(_lib.Z3_to_func_decl)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_numeral_string(a0, a1, _elems=Elementaries(_lib.Z3_get_numeral_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_get_numeral_decimal_string(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_numeral_decimal_string)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return _to_pystr(r)
def Z3_get_numeral_double(a0, a1, _elems=Elementaries(_lib.Z3_get_numeral_double)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_numerator(a0, a1, _elems=Elementaries(_lib.Z3_get_numerator)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_denominator(a0, a1, _elems=Elementaries(_lib.Z3_get_denominator)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_numeral_small(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_get_numeral_small)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_get_numeral_int(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_numeral_int)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_numeral_uint(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_numeral_uint)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_numeral_uint64(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_numeral_uint64)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_numeral_int64(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_numeral_int64)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_numeral_rational_int64(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_get_numeral_rational_int64)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_get_algebraic_number_lower(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_algebraic_number_lower)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_algebraic_number_upper(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_algebraic_number_upper)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_pattern_to_ast(a0, a1, _elems=Elementaries(_lib.Z3_pattern_to_ast)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_pattern_num_terms(a0, a1, _elems=Elementaries(_lib.Z3_get_pattern_num_terms)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_pattern(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_pattern)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_index_value(a0, a1, _elems=Elementaries(_lib.Z3_get_index_value)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_quantifier_forall(a0, a1, _elems=Elementaries(_lib.Z3_is_quantifier_forall)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_quantifier_exists(a0, a1, _elems=Elementaries(_lib.Z3_is_quantifier_exists)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_is_lambda(a0, a1, _elems=Elementaries(_lib.Z3_is_lambda)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_quantifier_weight(a0, a1, _elems=Elementaries(_lib.Z3_get_quantifier_weight)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_quantifier_num_patterns(a0, a1, _elems=Elementaries(_lib.Z3_get_quantifier_num_patterns)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_quantifier_pattern_ast(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_quantifier_pattern_ast)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_quantifier_num_no_patterns(a0, a1, _elems=Elementaries(_lib.Z3_get_quantifier_num_no_patterns)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_quantifier_no_pattern_ast(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_quantifier_no_pattern_ast)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_quantifier_num_bound(a0, a1, _elems=Elementaries(_lib.Z3_get_quantifier_num_bound)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_quantifier_bound_name(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_quantifier_bound_name)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_quantifier_bound_sort(a0, a1, a2, _elems=Elementaries(_lib.Z3_get_quantifier_bound_sort)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_quantifier_body(a0, a1, _elems=Elementaries(_lib.Z3_get_quantifier_body)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_simplify(a0, a1, _elems=Elementaries(_lib.Z3_simplify)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_simplify_ex(a0, a1, a2, _elems=Elementaries(_lib.Z3_simplify_ex)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_simplify_get_help(a0, _elems=Elementaries(_lib.Z3_simplify_get_help)):
r = _elems.f(a0)
_elems.Check(a0)
return _to_pystr(r)
def Z3_simplify_get_param_descrs(a0, _elems=Elementaries(_lib.Z3_simplify_get_param_descrs)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_update_term(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_update_term)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_substitute(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_substitute)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_substitute_vars(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_substitute_vars)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_translate(a0, a1, a2, _elems=Elementaries(_lib.Z3_translate)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_model(a0, _elems=Elementaries(_lib.Z3_mk_model)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_model_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_model_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_model_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_model_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_model_eval(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_model_eval)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_model_get_const_interp(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_get_const_interp)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_has_interp(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_has_interp)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_get_func_interp(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_get_func_interp)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_get_num_consts(a0, a1, _elems=Elementaries(_lib.Z3_model_get_num_consts)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_model_get_const_decl(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_get_const_decl)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_get_num_funcs(a0, a1, _elems=Elementaries(_lib.Z3_model_get_num_funcs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_model_get_func_decl(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_get_func_decl)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_get_num_sorts(a0, a1, _elems=Elementaries(_lib.Z3_model_get_num_sorts)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_model_get_sort(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_get_sort)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_get_sort_universe(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_get_sort_universe)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_model_translate(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_translate)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_is_as_array(a0, a1, _elems=Elementaries(_lib.Z3_is_as_array)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_as_array_func_decl(a0, a1, _elems=Elementaries(_lib.Z3_get_as_array_func_decl)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_add_func_interp(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_add_func_interp)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_add_const_interp(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_add_const_interp)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_func_interp_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_func_interp_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_func_interp_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_func_interp_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_func_interp_get_num_entries(a0, a1, _elems=Elementaries(_lib.Z3_func_interp_get_num_entries)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_func_interp_get_entry(a0, a1, a2, _elems=Elementaries(_lib.Z3_func_interp_get_entry)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_func_interp_get_else(a0, a1, _elems=Elementaries(_lib.Z3_func_interp_get_else)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_func_interp_set_else(a0, a1, a2, _elems=Elementaries(_lib.Z3_func_interp_set_else)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_func_interp_get_arity(a0, a1, _elems=Elementaries(_lib.Z3_func_interp_get_arity)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_func_interp_add_entry(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_func_interp_add_entry)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_func_entry_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_func_entry_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_func_entry_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_func_entry_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_func_entry_get_value(a0, a1, _elems=Elementaries(_lib.Z3_func_entry_get_value)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_func_entry_get_num_args(a0, a1, _elems=Elementaries(_lib.Z3_func_entry_get_num_args)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_func_entry_get_arg(a0, a1, a2, _elems=Elementaries(_lib.Z3_func_entry_get_arg)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_open_log(a0, _elems=Elementaries(_lib.Z3_open_log)):
r = _elems.f(_to_ascii(a0))
return r
def Z3_append_log(a0, _elems=Elementaries(_lib.Z3_append_log)):
_elems.f(_to_ascii(a0))
def Z3_close_log(_elems=Elementaries(_lib.Z3_close_log)):
_elems.f()
def Z3_toggle_warning_messages(a0, _elems=Elementaries(_lib.Z3_toggle_warning_messages)):
_elems.f(a0)
def Z3_set_ast_print_mode(a0, a1, _elems=Elementaries(_lib.Z3_set_ast_print_mode)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_ast_to_string(a0, a1, _elems=Elementaries(_lib.Z3_ast_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_pattern_to_string(a0, a1, _elems=Elementaries(_lib.Z3_pattern_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_sort_to_string(a0, a1, _elems=Elementaries(_lib.Z3_sort_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_func_decl_to_string(a0, a1, _elems=Elementaries(_lib.Z3_func_decl_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_model_to_string(a0, a1, _elems=Elementaries(_lib.Z3_model_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_benchmark_to_smtlib_string(a0, a1, a2, a3, a4, a5, a6, a7, _elems=Elementaries(_lib.Z3_benchmark_to_smtlib_string)):
r = _elems.f(a0, _to_ascii(a1), _to_ascii(a2), _to_ascii(a3), _to_ascii(a4), a5, a6, a7)
_elems.Check(a0)
return _to_pystr(r)
def Z3_parse_smtlib2_string(a0, a1, a2, a3, a4, a5, a6, a7, _elems=Elementaries(_lib.Z3_parse_smtlib2_string)):
r = _elems.f(a0, _to_ascii(a1), a2, a3, a4, a5, a6, a7)
_elems.Check(a0)
return r
def Z3_parse_smtlib2_file(a0, a1, a2, a3, a4, a5, a6, a7, _elems=Elementaries(_lib.Z3_parse_smtlib2_file)):
r = _elems.f(a0, _to_ascii(a1), a2, a3, a4, a5, a6, a7)
_elems.Check(a0)
return r
def Z3_eval_smtlib2_string(a0, a1, _elems=Elementaries(_lib.Z3_eval_smtlib2_string)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return _to_pystr(r)
def Z3_get_error_code(a0, _elems=Elementaries(_lib.Z3_get_error_code)):
r = _elems.f(a0)
return r
def Z3_set_error(a0, a1, _elems=Elementaries(_lib.Z3_set_error)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_get_error_msg(a0, a1, _elems=Elementaries(_lib.Z3_get_error_msg)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_get_version(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_get_version)):
_elems.f(a0, a1, a2, a3)
def Z3_get_full_version(_elems=Elementaries(_lib.Z3_get_full_version)):
r = _elems.f()
return _to_pystr(r)
def Z3_enable_trace(a0, _elems=Elementaries(_lib.Z3_enable_trace)):
_elems.f(_to_ascii(a0))
def Z3_disable_trace(a0, _elems=Elementaries(_lib.Z3_disable_trace)):
_elems.f(_to_ascii(a0))
def Z3_reset_memory(_elems=Elementaries(_lib.Z3_reset_memory)):
_elems.f()
def Z3_finalize_memory(_elems=Elementaries(_lib.Z3_finalize_memory)):
_elems.f()
def Z3_mk_goal(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_goal)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_goal_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_goal_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_goal_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_goal_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_goal_precision(a0, a1, _elems=Elementaries(_lib.Z3_goal_precision)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_assert(a0, a1, a2, _elems=Elementaries(_lib.Z3_goal_assert)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_goal_inconsistent(a0, a1, _elems=Elementaries(_lib.Z3_goal_inconsistent)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_depth(a0, a1, _elems=Elementaries(_lib.Z3_goal_depth)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_reset(a0, a1, _elems=Elementaries(_lib.Z3_goal_reset)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_goal_size(a0, a1, _elems=Elementaries(_lib.Z3_goal_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_formula(a0, a1, a2, _elems=Elementaries(_lib.Z3_goal_formula)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_goal_num_exprs(a0, a1, _elems=Elementaries(_lib.Z3_goal_num_exprs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_is_decided_sat(a0, a1, _elems=Elementaries(_lib.Z3_goal_is_decided_sat)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_is_decided_unsat(a0, a1, _elems=Elementaries(_lib.Z3_goal_is_decided_unsat)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_goal_translate(a0, a1, a2, _elems=Elementaries(_lib.Z3_goal_translate)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_goal_convert_model(a0, a1, a2, _elems=Elementaries(_lib.Z3_goal_convert_model)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_goal_to_string(a0, a1, _elems=Elementaries(_lib.Z3_goal_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_goal_to_dimacs_string(a0, a1, _elems=Elementaries(_lib.Z3_goal_to_dimacs_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_mk_tactic(a0, a1, _elems=Elementaries(_lib.Z3_mk_tactic)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return r
def Z3_tactic_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_tactic_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_tactic_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_tactic_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_mk_probe(a0, a1, _elems=Elementaries(_lib.Z3_mk_probe)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return r
def Z3_probe_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_probe_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_probe_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_probe_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_tactic_and_then(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_and_then)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_or_else(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_or_else)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_par_or(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_par_or)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_par_and_then(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_par_and_then)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_try_for(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_try_for)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_when(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_when)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_cond(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_tactic_cond)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_tactic_repeat(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_repeat)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_skip(a0, _elems=Elementaries(_lib.Z3_tactic_skip)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_tactic_fail(a0, _elems=Elementaries(_lib.Z3_tactic_fail)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_tactic_fail_if(a0, a1, _elems=Elementaries(_lib.Z3_tactic_fail_if)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_tactic_fail_if_not_decided(a0, _elems=Elementaries(_lib.Z3_tactic_fail_if_not_decided)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_tactic_using_params(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_using_params)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_const(a0, a1, _elems=Elementaries(_lib.Z3_probe_const)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_probe_lt(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_lt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_gt(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_gt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_le(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_le)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_ge(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_ge)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_eq(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_eq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_and(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_and)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_or(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_or)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_probe_not(a0, a1, _elems=Elementaries(_lib.Z3_probe_not)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_get_num_tactics(a0, _elems=Elementaries(_lib.Z3_get_num_tactics)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_get_tactic_name(a0, a1, _elems=Elementaries(_lib.Z3_get_tactic_name)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_get_num_probes(a0, _elems=Elementaries(_lib.Z3_get_num_probes)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_get_probe_name(a0, a1, _elems=Elementaries(_lib.Z3_get_probe_name)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_tactic_get_help(a0, a1, _elems=Elementaries(_lib.Z3_tactic_get_help)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_tactic_get_param_descrs(a0, a1, _elems=Elementaries(_lib.Z3_tactic_get_param_descrs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_tactic_get_descr(a0, a1, _elems=Elementaries(_lib.Z3_tactic_get_descr)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return _to_pystr(r)
def Z3_probe_get_descr(a0, a1, _elems=Elementaries(_lib.Z3_probe_get_descr)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return _to_pystr(r)
def Z3_probe_apply(a0, a1, a2, _elems=Elementaries(_lib.Z3_probe_apply)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_apply(a0, a1, a2, _elems=Elementaries(_lib.Z3_tactic_apply)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_tactic_apply_ex(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_tactic_apply_ex)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_apply_result_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_apply_result_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_apply_result_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_apply_result_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_apply_result_to_string(a0, a1, _elems=Elementaries(_lib.Z3_apply_result_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_apply_result_get_num_subgoals(a0, a1, _elems=Elementaries(_lib.Z3_apply_result_get_num_subgoals)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_apply_result_get_subgoal(a0, a1, a2, _elems=Elementaries(_lib.Z3_apply_result_get_subgoal)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_solver(a0, _elems=Elementaries(_lib.Z3_mk_solver)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_simple_solver(a0, _elems=Elementaries(_lib.Z3_mk_simple_solver)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_solver_for_logic(a0, a1, _elems=Elementaries(_lib.Z3_mk_solver_for_logic)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_solver_from_tactic(a0, a1, _elems=Elementaries(_lib.Z3_mk_solver_from_tactic)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_translate(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_translate)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_solver_import_model_converter(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_import_model_converter)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_solver_get_help(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_help)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_solver_get_param_descrs(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_param_descrs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_set_params(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_set_params)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_solver_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_solver_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_solver_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_solver_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_solver_push(a0, a1, _elems=Elementaries(_lib.Z3_solver_push)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_solver_pop(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_pop)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_solver_reset(a0, a1, _elems=Elementaries(_lib.Z3_solver_reset)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_solver_get_num_scopes(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_num_scopes)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_assert(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_assert)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_solver_assert_and_track(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_solver_assert_and_track)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_solver_from_file(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_from_file)):
_elems.f(a0, a1, _to_ascii(a2))
_elems.Check(a0)
def Z3_solver_from_string(a0, a1, a2, _elems=Elementaries(_lib.Z3_solver_from_string)):
_elems.f(a0, a1, _to_ascii(a2))
_elems.Check(a0)
def Z3_solver_get_assertions(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_assertions)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_get_units(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_units)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_get_non_units(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_non_units)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_check(a0, a1, _elems=Elementaries(_lib.Z3_solver_check)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_check_assumptions(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_solver_check_assumptions)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_get_implied_equalities(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_get_implied_equalities)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_solver_get_consequences(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_solver_get_consequences)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_solver_cube(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_solver_cube)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_solver_get_model(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_model)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_get_proof(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_proof)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_get_unsat_core(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_unsat_core)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_get_reason_unknown(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_reason_unknown)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_solver_get_statistics(a0, a1, _elems=Elementaries(_lib.Z3_solver_get_statistics)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_solver_to_string(a0, a1, _elems=Elementaries(_lib.Z3_solver_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_stats_to_string(a0, a1, _elems=Elementaries(_lib.Z3_stats_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_stats_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_stats_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_stats_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_stats_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_stats_size(a0, a1, _elems=Elementaries(_lib.Z3_stats_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_stats_get_key(a0, a1, a2, _elems=Elementaries(_lib.Z3_stats_get_key)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return _to_pystr(r)
def Z3_stats_is_uint(a0, a1, a2, _elems=Elementaries(_lib.Z3_stats_is_uint)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_stats_is_double(a0, a1, a2, _elems=Elementaries(_lib.Z3_stats_is_double)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_stats_get_uint_value(a0, a1, a2, _elems=Elementaries(_lib.Z3_stats_get_uint_value)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_stats_get_double_value(a0, a1, a2, _elems=Elementaries(_lib.Z3_stats_get_double_value)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_get_estimated_alloc_size(_elems=Elementaries(_lib.Z3_get_estimated_alloc_size)):
r = _elems.f()
return r
def Z3_mk_ast_vector(a0, _elems=Elementaries(_lib.Z3_mk_ast_vector)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_ast_vector_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_ast_vector_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_ast_vector_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_ast_vector_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_ast_vector_size(a0, a1, _elems=Elementaries(_lib.Z3_ast_vector_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_ast_vector_get(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_vector_get)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_ast_vector_set(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_ast_vector_set)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_ast_vector_resize(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_vector_resize)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_ast_vector_push(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_vector_push)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_ast_vector_translate(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_vector_translate)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_ast_vector_to_string(a0, a1, _elems=Elementaries(_lib.Z3_ast_vector_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_mk_ast_map(a0, _elems=Elementaries(_lib.Z3_mk_ast_map)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_ast_map_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_ast_map_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_ast_map_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_ast_map_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_ast_map_contains(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_map_contains)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_ast_map_find(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_map_find)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_ast_map_insert(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_ast_map_insert)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_ast_map_erase(a0, a1, a2, _elems=Elementaries(_lib.Z3_ast_map_erase)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_ast_map_reset(a0, a1, _elems=Elementaries(_lib.Z3_ast_map_reset)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_ast_map_size(a0, a1, _elems=Elementaries(_lib.Z3_ast_map_size)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_ast_map_keys(a0, a1, _elems=Elementaries(_lib.Z3_ast_map_keys)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_ast_map_to_string(a0, a1, _elems=Elementaries(_lib.Z3_ast_map_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_algebraic_is_value(a0, a1, _elems=Elementaries(_lib.Z3_algebraic_is_value)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_algebraic_is_pos(a0, a1, _elems=Elementaries(_lib.Z3_algebraic_is_pos)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_algebraic_is_neg(a0, a1, _elems=Elementaries(_lib.Z3_algebraic_is_neg)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_algebraic_is_zero(a0, a1, _elems=Elementaries(_lib.Z3_algebraic_is_zero)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_algebraic_sign(a0, a1, _elems=Elementaries(_lib.Z3_algebraic_sign)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_algebraic_add(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_add)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_sub(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_sub)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_mul(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_mul)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_div(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_div)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_root(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_root)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_power(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_power)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_lt(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_lt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_gt(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_gt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_le(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_le)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_ge(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_ge)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_eq(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_eq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_neq(a0, a1, a2, _elems=Elementaries(_lib.Z3_algebraic_neq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_algebraic_roots(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_algebraic_roots)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_algebraic_eval(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_algebraic_eval)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_polynomial_subresultants(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_polynomial_subresultants)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_rcf_del(a0, a1, _elems=Elementaries(_lib.Z3_rcf_del)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_rcf_mk_rational(a0, a1, _elems=Elementaries(_lib.Z3_rcf_mk_rational)):
r = _elems.f(a0, _to_ascii(a1))
_elems.Check(a0)
return r
def Z3_rcf_mk_small_int(a0, a1, _elems=Elementaries(_lib.Z3_rcf_mk_small_int)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_rcf_mk_pi(a0, _elems=Elementaries(_lib.Z3_rcf_mk_pi)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_rcf_mk_e(a0, _elems=Elementaries(_lib.Z3_rcf_mk_e)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_rcf_mk_infinitesimal(a0, _elems=Elementaries(_lib.Z3_rcf_mk_infinitesimal)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_rcf_mk_roots(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_rcf_mk_roots)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_rcf_add(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_add)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_sub(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_sub)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_mul(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_mul)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_div(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_div)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_neg(a0, a1, _elems=Elementaries(_lib.Z3_rcf_neg)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_rcf_inv(a0, a1, _elems=Elementaries(_lib.Z3_rcf_inv)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_rcf_power(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_power)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_lt(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_lt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_gt(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_gt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_le(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_le)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_ge(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_ge)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_eq(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_eq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_neq(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_neq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_rcf_num_to_string(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_rcf_num_to_string)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return _to_pystr(r)
def Z3_rcf_num_to_decimal_string(a0, a1, a2, _elems=Elementaries(_lib.Z3_rcf_num_to_decimal_string)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return _to_pystr(r)
def Z3_rcf_get_numerator_denominator(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_rcf_get_numerator_denominator)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_mk_fixedpoint(a0, _elems=Elementaries(_lib.Z3_mk_fixedpoint)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_fixedpoint_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_fixedpoint_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_fixedpoint_add_rule(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_add_rule)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_fixedpoint_add_fact(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_fixedpoint_add_fact)):
_elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
def Z3_fixedpoint_assert(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_assert)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_fixedpoint_query(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_query)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_fixedpoint_query_relations(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_query_relations)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_answer(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_answer)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_reason_unknown(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_reason_unknown)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_fixedpoint_update_rule(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_update_rule)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_fixedpoint_get_num_levels(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_get_num_levels)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_cover_delta(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_get_cover_delta)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_fixedpoint_add_cover(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_fixedpoint_add_cover)):
_elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
def Z3_fixedpoint_get_statistics(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_statistics)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_register_relation(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_register_relation)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_fixedpoint_set_predicate_representation(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_fixedpoint_set_predicate_representation)):
_elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
def Z3_fixedpoint_get_rules(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_rules)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_assertions(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_assertions)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_set_params(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_set_params)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_fixedpoint_get_help(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_help)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_fixedpoint_get_param_descrs(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_param_descrs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_to_string(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_to_string)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return _to_pystr(r)
def Z3_fixedpoint_from_string(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_from_string)):
r = _elems.f(a0, a1, _to_ascii(a2))
_elems.Check(a0)
return r
def Z3_fixedpoint_from_file(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_from_file)):
r = _elems.f(a0, a1, _to_ascii(a2))
_elems.Check(a0)
return r
def Z3_fixedpoint_push(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_push)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_fixedpoint_pop(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_pop)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_mk_optimize(a0, _elems=Elementaries(_lib.Z3_mk_optimize)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_optimize_inc_ref(a0, a1, _elems=Elementaries(_lib.Z3_optimize_inc_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_optimize_dec_ref(a0, a1, _elems=Elementaries(_lib.Z3_optimize_dec_ref)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_optimize_assert(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_assert)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_optimize_assert_soft(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_optimize_assert_soft)):
r = _elems.f(a0, a1, a2, _to_ascii(a3), a4)
_elems.Check(a0)
return r
def Z3_optimize_maximize(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_maximize)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_optimize_minimize(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_minimize)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_optimize_push(a0, a1, _elems=Elementaries(_lib.Z3_optimize_push)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_optimize_pop(a0, a1, _elems=Elementaries(_lib.Z3_optimize_pop)):
_elems.f(a0, a1)
_elems.Check(a0)
def Z3_optimize_check(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_optimize_check)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_optimize_get_reason_unknown(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_reason_unknown)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_optimize_get_model(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_model)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_optimize_get_unsat_core(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_unsat_core)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_optimize_set_params(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_set_params)):
_elems.f(a0, a1, a2)
_elems.Check(a0)
def Z3_optimize_get_param_descrs(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_param_descrs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_optimize_get_lower(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_get_lower)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_optimize_get_upper(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_get_upper)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_optimize_get_lower_as_vector(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_get_lower_as_vector)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_optimize_get_upper_as_vector(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_get_upper_as_vector)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_optimize_to_string(a0, a1, _elems=Elementaries(_lib.Z3_optimize_to_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_optimize_from_string(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_from_string)):
_elems.f(a0, a1, _to_ascii(a2))
_elems.Check(a0)
def Z3_optimize_from_file(a0, a1, a2, _elems=Elementaries(_lib.Z3_optimize_from_file)):
_elems.f(a0, a1, _to_ascii(a2))
_elems.Check(a0)
def Z3_optimize_get_help(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_help)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_optimize_get_statistics(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_statistics)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_optimize_get_assertions(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_assertions)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_optimize_get_objectives(a0, a1, _elems=Elementaries(_lib.Z3_optimize_get_objectives)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_rounding_mode_sort(a0, _elems=Elementaries(_lib.Z3_mk_fpa_rounding_mode_sort)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_round_nearest_ties_to_even(a0, _elems=Elementaries(_lib.Z3_mk_fpa_round_nearest_ties_to_even)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_rne(a0, _elems=Elementaries(_lib.Z3_mk_fpa_rne)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_round_nearest_ties_to_away(a0, _elems=Elementaries(_lib.Z3_mk_fpa_round_nearest_ties_to_away)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_rna(a0, _elems=Elementaries(_lib.Z3_mk_fpa_rna)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_round_toward_positive(a0, _elems=Elementaries(_lib.Z3_mk_fpa_round_toward_positive)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_rtp(a0, _elems=Elementaries(_lib.Z3_mk_fpa_rtp)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_round_toward_negative(a0, _elems=Elementaries(_lib.Z3_mk_fpa_round_toward_negative)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_rtn(a0, _elems=Elementaries(_lib.Z3_mk_fpa_rtn)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_round_toward_zero(a0, _elems=Elementaries(_lib.Z3_mk_fpa_round_toward_zero)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_rtz(a0, _elems=Elementaries(_lib.Z3_mk_fpa_rtz)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_sort)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_half(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_half)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_16(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_16)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_single(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_single)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_32(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_32)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_double(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_double)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_64(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_64)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_quadruple(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_quadruple)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_sort_128(a0, _elems=Elementaries(_lib.Z3_mk_fpa_sort_128)):
r = _elems.f(a0)
_elems.Check(a0)
return r
def Z3_mk_fpa_nan(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_nan)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_inf(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_inf)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_zero(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_zero)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_fp(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_fp)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_numeral_float(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_numeral_float)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_numeral_double(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_numeral_double)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_numeral_int(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_numeral_int)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_numeral_int_uint(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_fpa_numeral_int_uint)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_fpa_numeral_int64_uint64(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_fpa_numeral_int64_uint64)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_fpa_abs(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_abs)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_neg(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_neg)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_add(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_add)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_sub(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_sub)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_mul(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_mul)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_div(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_div)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_fma(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_fpa_fma)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_mk_fpa_sqrt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_sqrt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_rem(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_rem)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_round_to_integral(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_round_to_integral)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_min(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_min)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_max(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_max)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_leq(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_leq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_lt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_lt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_geq(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_geq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_gt(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_gt)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_eq(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_eq)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_normal(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_normal)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_subnormal(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_subnormal)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_zero(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_zero)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_infinite(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_infinite)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_nan(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_nan)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_negative(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_negative)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_is_positive(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_is_positive)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_fp_bv(a0, a1, a2, _elems=Elementaries(_lib.Z3_mk_fpa_to_fp_bv)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_fp_float(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_to_fp_float)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_fp_real(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_to_fp_real)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_fp_signed(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_to_fp_signed)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_fp_unsigned(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_to_fp_unsigned)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_ubv(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_to_ubv)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_sbv(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_mk_fpa_to_sbv)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_real(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_to_real)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_get_ebits(a0, a1, _elems=Elementaries(_lib.Z3_fpa_get_ebits)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_get_sbits(a0, a1, _elems=Elementaries(_lib.Z3_fpa_get_sbits)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_nan(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_nan)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_inf(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_inf)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_zero(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_zero)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_normal(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_normal)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_subnormal(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_subnormal)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_positive(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_positive)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_is_numeral_negative(a0, a1, _elems=Elementaries(_lib.Z3_fpa_is_numeral_negative)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_get_numeral_sign_bv(a0, a1, _elems=Elementaries(_lib.Z3_fpa_get_numeral_sign_bv)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_get_numeral_significand_bv(a0, a1, _elems=Elementaries(_lib.Z3_fpa_get_numeral_significand_bv)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fpa_get_numeral_sign(a0, a1, a2, _elems=Elementaries(_lib.Z3_fpa_get_numeral_sign)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_fpa_get_numeral_significand_string(a0, a1, _elems=Elementaries(_lib.Z3_fpa_get_numeral_significand_string)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return _to_pystr(r)
def Z3_fpa_get_numeral_significand_uint64(a0, a1, a2, _elems=Elementaries(_lib.Z3_fpa_get_numeral_significand_uint64)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_fpa_get_numeral_exponent_string(a0, a1, a2, _elems=Elementaries(_lib.Z3_fpa_get_numeral_exponent_string)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return _to_pystr(r)
def Z3_fpa_get_numeral_exponent_int64(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fpa_get_numeral_exponent_int64)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_fpa_get_numeral_exponent_bv(a0, a1, a2, _elems=Elementaries(_lib.Z3_fpa_get_numeral_exponent_bv)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_ieee_bv(a0, a1, _elems=Elementaries(_lib.Z3_mk_fpa_to_ieee_bv)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_mk_fpa_to_fp_int_real(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_mk_fpa_to_fp_int_real)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_fixedpoint_query_from_lvl(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_query_from_lvl)):
r = _elems.f(a0, a1, a2, a3)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_ground_sat_answer(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_ground_sat_answer)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_rules_along_trace(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_rules_along_trace)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_get_rule_names_along_trace(a0, a1, _elems=Elementaries(_lib.Z3_fixedpoint_get_rule_names_along_trace)):
r = _elems.f(a0, a1)
_elems.Check(a0)
return r
def Z3_fixedpoint_add_invariant(a0, a1, a2, a3, _elems=Elementaries(_lib.Z3_fixedpoint_add_invariant)):
_elems.f(a0, a1, a2, a3)
_elems.Check(a0)
def Z3_fixedpoint_get_reachable(a0, a1, a2, _elems=Elementaries(_lib.Z3_fixedpoint_get_reachable)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_qe_model_project(a0, a1, a2, a3, a4, _elems=Elementaries(_lib.Z3_qe_model_project)):
r = _elems.f(a0, a1, a2, a3, a4)
_elems.Check(a0)
return r
def Z3_qe_model_project_skolem(a0, a1, a2, a3, a4, a5, _elems=Elementaries(_lib.Z3_qe_model_project_skolem)):
r = _elems.f(a0, a1, a2, a3, a4, a5)
_elems.Check(a0)
return r
def Z3_model_extrapolate(a0, a1, a2, _elems=Elementaries(_lib.Z3_model_extrapolate)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
def Z3_qe_lite(a0, a1, a2, _elems=Elementaries(_lib.Z3_qe_lite)):
r = _elems.f(a0, a1, a2)
_elems.Check(a0)
return r
# Clean up
del _lib
del _default_dirs
del _all_dirs
del _ext
| 36.812705 | 237 | 0.749376 |
fdda024af040e1cb659eefd8fa0170cdadba2a5e | 820 | py | Python | test/unit/applications/lang/python.py | Dean-Coakley/unit | 98c0ce6cc413b174d5a36f92095246a9967b1371 | [
"Apache-2.0"
] | null | null | null | test/unit/applications/lang/python.py | Dean-Coakley/unit | 98c0ce6cc413b174d5a36f92095246a9967b1371 | [
"Apache-2.0"
] | null | null | null | test/unit/applications/lang/python.py | Dean-Coakley/unit | 98c0ce6cc413b174d5a36f92095246a9967b1371 | [
"Apache-2.0"
] | null | null | null | from unit.applications.proto import TestApplicationProto
class TestApplicationPython(TestApplicationProto):
application_type = "python"
def load(self, script, name=None, **kwargs):
if name is None:
name = script
script_path = self.current_dir + '/python/' + script
self._load_conf(
{
"listeners": {"*:7080": {"pass": "applications/" + name}},
"applications": {
name: {
"type": self.application_type,
"processes": {"spare": 0},
"path": script_path,
"working_directory": script_path,
"module": "wsgi",
}
},
},
**kwargs
)
| 29.285714 | 74 | 0.452439 |
0505560bd639f191f289a70cb2e58e270d74f942 | 294 | py | Python | skills/reddit_ner_skill/test.py | oserikov/dream | 109ba2df799025dcdada1fddbb7380e1c03100eb | [
"Apache-2.0"
] | 34 | 2021-08-18T14:51:44.000Z | 2022-03-10T14:14:48.000Z | skills/reddit_ner_skill/test.py | oserikov/dream | 109ba2df799025dcdada1fddbb7380e1c03100eb | [
"Apache-2.0"
] | 27 | 2021-08-30T14:42:09.000Z | 2022-03-17T22:11:45.000Z | skills/reddit_ner_skill/test.py | oserikov/dream | 109ba2df799025dcdada1fddbb7380e1c03100eb | [
"Apache-2.0"
] | 40 | 2021-08-22T07:13:32.000Z | 2022-03-29T11:45:32.000Z | #!/usr/bin/env python
import requests
import json
def main_test():
url = "http://0.0.0.0:8035/respond"
tests = json.load(open("tests.json"))
for test in tests:
assert requests.post(url=url, json=test).ok
print("Success!")
if __name__ == "__main__":
main_test()
| 17.294118 | 51 | 0.632653 |
c8522b8ac77fbb8adbe9ab9ae0ff9bc7599c86f3 | 9,293 | py | Python | handler.py | jaredbrook/aws-lambda-http-check | c9e85031f0c4ce033f4ff625c50d39f6d65fa1ad | [
"MIT"
] | null | null | null | handler.py | jaredbrook/aws-lambda-http-check | c9e85031f0c4ce033f4ff625c50d39f6d65fa1ad | [
"MIT"
] | null | null | null | handler.py | jaredbrook/aws-lambda-http-check | c9e85031f0c4ce033f4ff625c50d39f6d65fa1ad | [
"MIT"
] | null | null | null | import json
import os
import http.client
import boto3
from time import perf_counter as pc
from urllib.parse import urlparse
import ssl
from io import StringIO
import gzip
import re
import hashlib
class Config:
"""Lambda function runtime configuration"""
ENDPOINT = 'ENDPOINT'
METHOD = 'METHOD'
PAYLOAD = 'PAYLOAD'
TIMEOUT = 'TIMEOUT'
HEADERS = 'HEADERS'
COMPRESSED = 'COMPRESSED'
REPORT_RESPONSE_BODY = 'REPORT_RESPONSE_BODY'
REPORT_AS_CW_METRICS = 'REPORT_AS_CW_METRICS'
CW_METRICS_NAMESPACE = 'CW_METRICS_NAMESPACE'
CW_METRICS_METRIC_NAME = 'CW_METRICS_METRIC_NAME'
BODY_REGEX_MATCH = 'BODY_REGEX_MATCH'
STATUS_CODE_MATCH = 'STATUS_CODE_MATCH'
FAIL_ON_STATUS_CODE_MISMATCH = 'FAIL_ON_STATUS_CODE_MISMATCH'
def __init__(self, event):
self.event = event
self.defaults = {
self.ENDPOINT: 'https://google.com.au',
self.METHOD: 'GET',
self.PAYLOAD: None,
self.TIMEOUT: 120,
self.REPORT_RESPONSE_BODY: '0',
self.REPORT_AS_CW_METRICS: '1',
self.CW_METRICS_NAMESPACE: 'HttpCheck',
self.HEADERS: '',
self.COMPRESSED: '0',
self.BODY_REGEX_MATCH: None,
self.STATUS_CODE_MATCH: None,
self.FAIL_ON_STATUS_CODE_MISMATCH: None
}
def __get_property(self, property_name):
if property_name in self.event:
return self.event[property_name]
if property_name in os.environ:
return os.environ[property_name]
if property_name in self.defaults:
return self.defaults[property_name]
return None
@property
def endpoint(self):
return self.__get_property(self.ENDPOINT)
@property
def method(self):
return self.__get_property(self.METHOD)
@property
def payload(self):
payload = self.__get_property(self.PAYLOAD)
if payload is not None:
return payload.encode('utf-8')
return payload
@property
def timeout(self):
return self.__get_property(self.TIMEOUT)
@property
def reportbody(self):
return self.__get_property(self.REPORT_RESPONSE_BODY)
@property
def headers(self):
headers = self.__get_property(self.HEADERS)
if headers == '':
return {}
else:
try:
return dict(u.split("=") for u in headers.split(' '))
except:
print(f"Could not decode headers: {headers}")
@property
def bodyregexmatch(self):
return self.__get_property(self.BODY_REGEX_MATCH)
@property
def statuscodematch(self):
return self.__get_property(self.STATUS_CODE_MATCH)
@property
def fail_on_statuscode_mismatch(self):
return self.__get_property(self.FAIL_ON_STATUS_CODE_MISMATCH)
@property
def cwoptions(self):
return {
'enabled': self.__get_property(self.REPORT_AS_CW_METRICS),
'namespace': self.__get_property(self.CW_METRICS_NAMESPACE),
}
@property
def compressed(self):
return self.__get_property(self.COMPRESSED)
class HttpCheck:
"""Execution of HTTP(s) request"""
def __init__(self, config):
self.method = config.method
self.endpoint = config.endpoint
self.timeout = config.timeout
self.payload = config.payload
self.headers = config.headers
self.compressed = config.compressed
self.bodyregexmatch = config.bodyregexmatch
self.statuscodematch = config.statuscodematch
self.fail_on_statuscode_mismatch = config.fail_on_statuscode_mismatch
def execute(self):
url = urlparse(self.endpoint)
location = url.netloc
if url.scheme == 'http':
request = http.client.HTTPConnection(location, timeout=int(self.timeout))
if url.scheme == 'https':
request = http.client.HTTPSConnection(location, timeout=int(self.timeout), context=ssl._create_unverified_context())
if 'HTTP_DEBUG' in os.environ and os.environ['HTTP_DEBUG'] == '1':
request.set_debuglevel(1)
path = url.path
if path == '':
path = '/'
if url.query is not None:
path = path + "?" + url.query
if self.compressed == '1':
self.headers['Accept-Encoding'] = 'deflate, gzip'
try:
t0 = pc()
# perform request
request.request(self.method, path, self.payload, self.headers)
# read response
response_data = request.getresponse()
# stop the stopwatch
t1 = pc()
print(f"Headers: {response_data.getheaders()}")
if response_data.getheader('Content-Encoding') == 'gzip':
data = gzip.decompress(response_data.read())
response_body = str(data,'utf-8')
elif response_data.getheader('Content-Type') and response_data.getheader('Content-Type').startswith('image/'):
response_body = hashlib.md5(response_data.read()).hexdigest()
print(response_body)
else:
response_body = str(response_data.read().decode('utf-8','replace'))
result = {
'Reason': response_data.reason,
'ResponseBody': response_body,
'StatusCode': response_data.status,
'TimeTaken': int((t1 - t0) * 1000),
'Available': '1'
}
if self.bodyregexmatch is not None:
regex = re.compile(self.bodyregexmatch)
value = 1 if regex.search(response_body) else 0
result['ResponseBodyRegexMatch'] = value
if self.statuscodematch is not None:
result['StatusCodeMatch'] = int(int(response_data.status) == int(self.statuscodematch))
if not result['StatusCodeMatch'] and self.fail_on_statuscode_mismatch:
result['Available'] = '0'
# return structure with data
return result
except Exception as e:
print(f"Failed to connect to {self.endpoint}\n{e}")
return {'Available': 0, 'Reason': str(e)}
class ResultReporter:
"""Reporting results to CloudWatch"""
def __init__(self, config, context):
self.options = config.cwoptions
self.endpoint = config.endpoint
def report(self, result):
if self.options['enabled'] == '1':
try:
cloudwatch = boto3.client('cloudwatch')
metric_data = [{
'MetricName': 'Available',
'Dimensions': [
{'Name': 'Endpoint', 'Value': self.endpoint}
],
'Unit': 'None',
'Value': int(result['Available'])
}]
if result['Available'] == '1':
metric_data.append({
'MetricName': 'TimeTaken',
'Dimensions': [
{'Name': 'Endpoint', 'Value': self.endpoint}
],
'Unit': 'Milliseconds',
'Value': int(result['TimeTaken'])
})
metric_data.append({
'MetricName': 'StatusCode',
'Dimensions': [
{'Name': 'Endpoint', 'Value': self.endpoint}
],
'Unit': 'None',
'Value': int(result['StatusCode'])
})
for additional_metric in ['ResponseBodyRegexMatch', 'StatusCodeMatch']:
if additional_metric in result:
metric_data.append({
'MetricName': additional_metric,
'Dimensions': [
{'Name': 'Endpoint', 'Value': self.endpoint}
],
'Unit': 'None',
'Value': int(result[additional_metric])
})
result = cloudwatch.put_metric_data(
MetricData=metric_data,
Namespace=self.options['namespace']
)
print(f"Sent data to CloudWatch requestId=:{result['ResponseMetadata']['RequestId']}")
except Exception as e:
print(f"Failed to publish metrics to CloudWatch:{e}")
def http_check(event, context):
"""Lambda function handler"""
config = Config(event)
http_check = HttpCheck(config)
result = http_check.execute()
# report results
ResultReporter(config, result).report(result)
# Remove body if not required
if (config.reportbody != '1') and ('ResponseBody' in result):
del result['ResponseBody']
result_json = json.dumps(result, indent=4)
# log results
print(f"Result of checking {config.method} {config.endpoint}\n{result_json}")
# return to caller
return result
| 34.040293 | 128 | 0.556763 |
3a0593a233d54c1b0a1eca85534f3f8841f2af7a | 7,298 | py | Python | tests/test_parsing.py | gargii/python-skytools | 8b75647e03809f3ceb98cd614e868393f3262e6a | [
"ISC"
] | 7 | 2019-11-17T01:59:41.000Z | 2021-04-23T15:01:03.000Z | tests/test_parsing.py | gargii/python-skytools | 8b75647e03809f3ceb98cd614e868393f3262e6a | [
"ISC"
] | 2 | 2019-09-15T16:38:02.000Z | 2022-02-07T07:35:28.000Z | tests/test_parsing.py | gargii/python-skytools | 8b75647e03809f3ceb98cd614e868393f3262e6a | [
"ISC"
] | 4 | 2017-11-18T16:49:26.000Z | 2021-11-15T15:19:33.000Z |
import pytest
from skytools.parsing import (
dedent, hsize_to_bytes, merge_connect_string, parse_acl,
parse_connect_string, parse_logtriga_sql, parse_pgarray,
parse_sqltriga_sql, parse_statements, parse_tabbed_table, sql_tokenizer,
)
def test_parse_pgarray():
assert parse_pgarray('{}') == []
assert parse_pgarray('{a,b,null,"null"}') == ['a', 'b', None, 'null']
assert parse_pgarray(r'{"a,a","b\"b","c\\c"}') == ['a,a', 'b"b', 'c\\c']
assert parse_pgarray("[0,3]={1,2,3}") == ['1', '2', '3']
assert parse_pgarray(None) is None
with pytest.raises(ValueError):
parse_pgarray('}{')
with pytest.raises(ValueError):
parse_pgarray('[1]=}')
with pytest.raises(ValueError):
parse_pgarray('{"..." , }')
with pytest.raises(ValueError):
parse_pgarray('{"..." ; }')
with pytest.raises(ValueError):
parse_pgarray('{"}')
with pytest.raises(ValueError):
parse_pgarray('{"..."}zzz')
with pytest.raises(ValueError):
parse_pgarray('{"..."}z')
def test_parse_sqltriga_sql():
# Insert event
row = parse_logtriga_sql('I', '(id, data) values (1, null)')
assert row == {'data': None, 'id': '1'}
row = parse_sqltriga_sql('I', '(id, data) values (1, null)', pklist=["id"])
assert row == {'data': None, 'id': '1'}
# Update event
row = parse_logtriga_sql('U', "data='foo' where id = 1")
assert row == {'data': 'foo', 'id': '1'}
# Delete event
row = parse_logtriga_sql('D', "id = 1 and id2 = 'str''val'")
assert row == {'id': '1', 'id2': "str'val"}
# Insert event, splitkeys
keys, row = parse_logtriga_sql('I', '(id, data) values (1, null)', splitkeys=True)
assert keys == {}
assert row == {'data': None, 'id': '1'}
keys, row = parse_logtriga_sql('I', '(id, data) values (1, null)', splitkeys=True)
assert keys == {}
assert row == {'data': None, 'id': '1'}
# Update event, splitkeys
keys, row = parse_logtriga_sql('U', "data='foo' where id = 1", splitkeys=True)
assert keys == {'id': '1'}
assert row == {'data': 'foo'}
keys, row = parse_logtriga_sql('U', "data='foo',type=3 where id = 1", splitkeys=True)
assert keys == {'id': '1'}
assert row == {'data': 'foo', 'type': '3'}
# Delete event, splitkeys
keys, row = parse_logtriga_sql('D', "id = 1 and id2 = 'str''val'", splitkeys=True)
assert keys == {'id': '1', 'id2': "str'val"}
# generic
with pytest.raises(ValueError):
parse_logtriga_sql('J', "(id, data) values (1, null)")
with pytest.raises(ValueError):
parse_logtriga_sql('I', "(id) values (1, null)")
# insert errors
with pytest.raises(ValueError):
parse_logtriga_sql('I', "insert (id, data) values (1, null)")
with pytest.raises(ValueError):
parse_logtriga_sql('I', "(id; data) values (1, null)")
with pytest.raises(ValueError):
parse_logtriga_sql('I', "(id, data) select (1, null)")
with pytest.raises(ValueError):
parse_logtriga_sql('I', "(id, data) values of (1, null)")
with pytest.raises(ValueError):
parse_logtriga_sql('I', "(id, data) values (1; null)")
with pytest.raises(ValueError):
parse_logtriga_sql('I', "(id, data) values (1, null) ;")
with pytest.raises(ValueError, match="EOF"):
parse_logtriga_sql('I', "(id, data) values (1, null) , insert")
# update errors
with pytest.raises(ValueError):
parse_logtriga_sql('U', "(id,data) values (1, null)")
with pytest.raises(ValueError):
parse_logtriga_sql('U', "id,data")
with pytest.raises(ValueError):
parse_logtriga_sql('U', "data='foo';type=3 where id = 1")
with pytest.raises(ValueError):
parse_logtriga_sql('U', "data='foo' where id>1")
with pytest.raises(ValueError):
parse_logtriga_sql('U', "data='foo' where id=1 or true")
# delete errors
with pytest.raises(ValueError):
parse_logtriga_sql('D', "foo,1")
with pytest.raises(ValueError):
parse_logtriga_sql('D', "foo = 1 ,")
def test_parse_tabbed_table():
assert parse_tabbed_table('col1\tcol2\nval1\tval2\n') == [
{'col1': 'val1', 'col2': 'val2'}
]
# skip rows with different size
assert parse_tabbed_table('col1\tcol2\nval1\tval2\ntmp\n') == [
{'col1': 'val1', 'col2': 'val2'}
]
def test_sql_tokenizer():
res = sql_tokenizer("select * from a.b", ignore_whitespace=True)
assert list(res) == [
('ident', 'select'), ('sym', '*'), ('ident', 'from'),
('ident', 'a'), ('sym', '.'), ('ident', 'b')
]
res = sql_tokenizer("\"c olumn\",'str''val'")
assert list(res) == [
('ident', '"c olumn"'), ('sym', ','), ('str', "'str''val'")
]
res = sql_tokenizer('a.b a."b "" c" a.1', fqident=True, ignore_whitespace=True)
assert list(res) == [
('ident', 'a.b'), ('ident', 'a."b "" c"'), ('ident', 'a'), ('sym', '.'), ('num', '1')
]
res = sql_tokenizer(r"set 'a''\' + E'\''", standard_quoting=True, ignore_whitespace=True)
assert list(res) == [
('ident', 'set'), ('str', "'a''\\'"), ('sym', '+'), ('str', "E'\\''")
]
res = sql_tokenizer('a.b a."b "" c" a.1', fqident=True, standard_quoting=True, ignore_whitespace=True)
assert list(res) == [
('ident', 'a.b'), ('ident', 'a."b "" c"'), ('ident', 'a'), ('sym', '.'), ('num', '1')
]
res = sql_tokenizer('a.b\nc;', show_location=True, ignore_whitespace=True)
assert list(res) == [
('ident', 'a', 1), ('sym', '.', 2), ('ident', 'b', 3), ('ident', 'c', 5), ('sym', ';', 6)
]
def test_parse_statements():
res = parse_statements("begin; select 1; select 'foo'; end;")
assert list(res) == ['begin;', 'select 1;', "select 'foo';", 'end;']
res = parse_statements("select (select 2+(select 3;);) ; select 4;")
assert list(res) == ['select (select 2+(select 3;);) ;', 'select 4;']
with pytest.raises(ValueError):
list(parse_statements('select ());'))
with pytest.raises(ValueError):
list(parse_statements('copy from stdin;'))
def test_parse_acl():
assert parse_acl('user=rwx/owner') == ('user', 'rwx', 'owner')
assert parse_acl('" ""user"=rwx/" ""owner"') == (' "user', 'rwx', ' "owner')
assert parse_acl('user=rwx') == ('user', 'rwx', None)
assert parse_acl('=/f') == (None, '', 'f')
# is this ok?
assert parse_acl('?') is None
def test_dedent():
assert dedent(' Line1:\n Line 2\n') == 'Line1:\n Line 2\n'
res = dedent(' \nLine1:\n Line 2\n Line 3\n Line 4')
assert res == 'Line1:\nLine 2\n Line 3\n Line 4\n'
def test_hsize_to_bytes():
assert hsize_to_bytes('10G') == 10737418240
assert hsize_to_bytes('12k') == 12288
with pytest.raises(ValueError):
hsize_to_bytes("x")
def test_parse_connect_string():
assert parse_connect_string("host=foo") == [('host', 'foo')]
res = parse_connect_string(r" host = foo password = ' f\\\o\'o ' ")
assert res == [('host', 'foo'), ('password', "' f\\o'o '")]
with pytest.raises(ValueError):
parse_connect_string(r" host = ")
def test_merge_connect_string():
res = merge_connect_string([('host', 'ip'), ('pass', ''), ('x', ' ')])
assert res == "host=ip pass='' x=' '"
| 35.6 | 106 | 0.576596 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.