hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d4d848217b89b74f8954cb11200a1df6ecb41ded
| 4,423
|
py
|
Python
|
legacy_notebooks/data-engineering-1.1.0/Python/classic/includes/main/python/operations.py
|
david-lyle/databricks-notebooks
|
7acc142d4e397b92031ee6ab8332f26e290c4d91
|
[
"Apache-2.0"
] | null | null | null |
legacy_notebooks/data-engineering-1.1.0/Python/classic/includes/main/python/operations.py
|
david-lyle/databricks-notebooks
|
7acc142d4e397b92031ee6ab8332f26e290c4d91
|
[
"Apache-2.0"
] | null | null | null |
legacy_notebooks/data-engineering-1.1.0/Python/classic/includes/main/python/operations.py
|
david-lyle/databricks-notebooks
|
7acc142d4e397b92031ee6ab8332f26e290c4d91
|
[
"Apache-2.0"
] | 1
|
2022-02-15T08:10:55.000Z
|
2022-02-15T08:10:55.000Z
|
# Databricks notebook source
from delta.tables import DeltaTable
from pyspark.sql import DataFrame
from pyspark.sql.functions import (
col,
current_timestamp,
from_json,
from_unixtime,
lag,
lead,
lit,
mean,
stddev,
max,
)
from typing import List
from pyspark.sql.session import SparkSession
from pyspark.sql.window import Window
# COMMAND ----------
def batch_writer(
dataframe: DataFrame,
partition_column: str,
exclude_columns: List = [],
mode: str = "append",
) -> DataFrame:
return (
dataframe.drop(
*exclude_columns
) # This uses Python argument unpacking (https://docs.python.org/3/tutorial/controlflow.html#unpacking-argument-lists)
.write.format("delta")
.mode(mode)
.partitionBy(partition_column)
)
# COMMAND ----------
def generate_clean_and_quarantine_dataframes(
dataframe: DataFrame,
) -> (DataFrame, DataFrame):
return (
dataframe.filter("device_id IS NOT NULL"),
dataframe.filter("device_id IS NULL"),
)
# COMMAND ----------
# TODO
def read_batch_bronze() -> DataFrame:
return # FILL_THIS_IN
# COMMAND ----------
def read_batch_delta(deltaPath: str) -> DataFrame:
return spark.read.format("delta").load(deltaPath)
# COMMAND ----------
def read_batch_raw(rawPath: str) -> DataFrame:
kafka_schema = "value STRING"
return spark.read.format("text").schema(kafka_schema).load(rawPath)
# COMMAND ----------
def transform_bronze(bronze: DataFrame, quarantine: bool = False) -> DataFrame:
json_schema = """
time TIMESTAMP,
name STRING,
device_id STRING,
steps INTEGER,
day INTEGER,
month INTEGER,
hour INTEGER
"""
bronzeAugmentedDF = bronze.withColumn(
"nested_json", from_json(col("value"), json_schema)
)
silver_health_tracker = bronzeAugmentedDF.select("value", "nested_json.*")
if not quarantine:
silver_health_tracker = silver_health_tracker.select(
"value",
col("device_id").cast("integer").alias("device_id"),
"steps",
col("time").alias("eventtime"),
"name",
col("time").cast("date").alias("p_eventdate"),
)
else:
silver_health_tracker = silver_health_tracker.select(
"value",
"device_id",
"steps",
col("time").alias("eventtime"),
"name",
col("time").cast("date").alias("p_eventdate"),
)
return silver_health_tracker
# COMMAND ----------
def repair_quarantined_records(
spark: SparkSession, bronzeTable: str, userTable: str
) -> DataFrame:
bronzeQuarantinedDF = spark.read.table(bronzeTable).filter("status = 'quarantined'")
bronzeQuarTransDF = transform_bronze(bronzeQuarantinedDF, quarantine=True).alias(
"quarantine"
)
health_tracker_user_df = spark.read.table(userTable).alias("user")
repairDF = bronzeQuarTransDF.join(
health_tracker_user_df,
bronzeQuarTransDF.device_id == health_tracker_user_df.user_id,
)
silverCleanedDF = repairDF.select(
col("quarantine.value").alias("value"),
col("user.device_id").cast("INTEGER").alias("device_id"),
col("quarantine.steps").alias("steps"),
col("quarantine.eventtime").alias("eventtime"),
col("quarantine.name").alias("name"),
col("quarantine.eventtime").cast("date").alias("p_eventdate"),
)
return silverCleanedDF
# COMMAND ----------
def transform_raw(raw: DataFrame) -> DataFrame:
return raw.select(
lit("files.training.databricks.com").alias("datasource"),
current_timestamp().alias("ingesttime"),
lit("new").alias("status"),
"value",
current_timestamp().cast("date").alias("p_ingestdate"),
)
# COMMAND ----------
def update_bronze_table_status(
spark: SparkSession, bronzeTablePath: str, dataframe: DataFrame, status: str
) -> bool:
bronzeTable = DeltaTable.forPath(spark, bronzePath)
dataframeAugmented = dataframe.withColumn("status", lit(status))
update_match = "bronze.value = dataframe.value"
update = {"status": "dataframe.status"}
(
bronzeTable.alias("bronze")
.merge(dataframeAugmented.alias("dataframe"), update_match)
.whenMatchedUpdate(set=update)
.execute()
)
return True
| 26.171598
| 127
| 0.634863
|
be2a6ebbe3e99733f2253fb60480747b997838c3
| 56,971
|
py
|
Python
|
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/pandas/core/panel.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/pandas/core/panel.py
|
mattl1598/testing
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | 1
|
2018-04-15T22:59:15.000Z
|
2018-04-15T22:59:15.000Z
|
editing files/Portable Python 3.2.5.1/App/Lib/site-packages/pandas/core/panel.py
|
mattl1598/Project-Mochachino
|
cd8124773b83a07301c507ffbb9ccaafbfe7a274
|
[
"Unlicense"
] | null | null | null |
"""
Contains data structures designed for manipulating panel (3-dimensional) data
"""
# pylint: disable=E1103,W0231,W0212,W0621
import operator
import sys
import numpy as np
from pandas.core.common import (PandasError, _mut_exclusive,
_try_sort, _default_index, _infer_dtype_from_scalar,
notnull)
from pandas.core.categorical import Factor
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import _maybe_droplevels, _is_list_like
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.util import py3compat
from pandas.util.decorators import deprecate, Appender, Substitution
import pandas.core.common as com
import pandas.core.nanops as nanops
import pandas.lib as lib
def _ensure_like_indices(time, panels):
"""
Makes sure that time and panels are conformable
"""
n_time = len(time)
n_panel = len(panels)
u_panels = np.unique(panels) # this sorts!
u_time = np.unique(time)
if len(u_time) == n_time:
time = np.tile(u_time, len(u_panels))
if len(u_panels) == n_panel:
panels = np.repeat(u_panels, len(u_time))
return time, panels
def panel_index(time, panels, names=['time', 'panel']):
"""
Returns a multi-index suitable for a panel-like DataFrame
Parameters
----------
time : array-like
Time index, does not have to repeat
panels : array-like
Panel index, does not have to repeat
names : list, optional
List containing the names of the indices
Returns
-------
multi_index : MultiIndex
Time index is the first level, the panels are the second level.
Examples
--------
>>> years = list(range(1960,1963))
>>> panels = ['A', 'B', 'C']
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1961, 'A'), (1962, 'A'), (1960, 'B'),
(1961, 'B'), (1962, 'B'), (1960, 'C'), (1961, 'C'),
(1962, 'C')], dtype=object)
or
>>> import numpy as np
>>> years = np.repeat(list(range(1960,1963)), 3)
>>> panels = np.tile(['A', 'B', 'C'], 3)
>>> panel_idx = panel_index(years, panels)
>>> panel_idx
MultiIndex([(1960, 'A'), (1960, 'B'), (1960, 'C'), (1961, 'A'),
(1961, 'B'), (1961, 'C'), (1962, 'A'), (1962, 'B'),
(1962, 'C')], dtype=object)
"""
time, panels = _ensure_like_indices(time, panels)
time_factor = Factor.from_array(time)
panel_factor = Factor.from_array(panels)
labels = [time_factor.labels, panel_factor.labels]
levels = [time_factor.levels, panel_factor.levels]
return MultiIndex(levels, labels, sortorder=None, names=names)
class PanelError(Exception):
pass
def _arith_method(func, name):
# work only for scalars
def f(self, other):
if not np.isscalar(other):
raise ValueError('Simple arithmetic with %s can only be '
'done with scalar values' % self._constructor.__name__)
return self._combine(other, func)
f.__name__ = name
return f
def _comp_method(func, name):
def na_op(x, y):
try:
result = func(x, y)
except TypeError:
xrav = x.ravel()
result = np.empty(x.size, dtype=x.dtype)
if isinstance(y, np.ndarray):
yrav = y.ravel()
mask = notnull(xrav) & notnull(yrav)
result[mask] = func(np.array(list(xrav[mask])),
np.array(list(yrav[mask])))
else:
mask = notnull(xrav)
result[mask] = func(np.array(list(xrav[mask])), y)
if func == operator.ne: # pragma: no cover
np.putmask(result, -mask, True)
else:
np.putmask(result, -mask, False)
result = result.reshape(x.shape)
return result
@Appender('Wrapper for comparison method %s' % name)
def f(self, other):
if isinstance(other, self._constructor):
return self._compare_constructor(other, func)
elif isinstance(other, (self._constructor_sliced, DataFrame, Series)):
raise Exception("input needs alignment for this object [%s]" %
self._constructor)
else:
return self._combine_const(other, na_op)
f.__name__ = name
return f
class Panel(NDFrame):
"""
Represents wide format panel data, stored as 3-dimensional array
Parameters
----------
data : ndarray (items x major x minor), or dict of DataFrames
items : Index or array-like
axis=1
major_axis : Index or array-like
axis=1
minor_axis : Index or array-like
axis=2
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
"""
_AXIS_ORDERS = ['items', 'major_axis', 'minor_axis']
_AXIS_NUMBERS = dict([(a, i) for i, a in enumerate(_AXIS_ORDERS)])
_AXIS_ALIASES = {
'major': 'major_axis',
'minor': 'minor_axis'
}
_AXIS_NAMES = dict([(i, a) for i, a in enumerate(_AXIS_ORDERS)])
_AXIS_SLICEMAP = {
'major_axis': 'index',
'minor_axis': 'columns'
}
_AXIS_LEN = len(_AXIS_ORDERS)
# major
_default_stat_axis = 1
# info axis
_het_axis = 0
_info_axis = _AXIS_ORDERS[_het_axis]
items = lib.AxisProperty(0)
major_axis = lib.AxisProperty(1)
minor_axis = lib.AxisProperty(2)
@property
def _constructor(self):
return type(self)
# return the type of the slice constructor
_constructor_sliced = DataFrame
def _construct_axes_dict(self, axes=None, **kwargs):
""" return an axes dictionary for myself """
d = dict([(a, getattr(self, a)) for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
""" return an axes dictionary for the passed axes """
d = dict([(a, ax) for a, ax in zip(self._AXIS_ORDERS, axes)])
d.update(kwargs)
return d
def _construct_axes_dict_for_slice(self, axes=None, **kwargs):
""" return an axes dictionary for myself """
d = dict([(self._AXIS_SLICEMAP[a], getattr(self, a))
for a in (axes or self._AXIS_ORDERS)])
d.update(kwargs)
return d
__add__ = _arith_method(operator.add, '__add__')
__sub__ = _arith_method(operator.sub, '__sub__')
__truediv__ = _arith_method(operator.truediv, '__truediv__')
__floordiv__ = _arith_method(operator.floordiv, '__floordiv__')
__mul__ = _arith_method(operator.mul, '__mul__')
__pow__ = _arith_method(operator.pow, '__pow__')
__radd__ = _arith_method(operator.add, '__radd__')
__rmul__ = _arith_method(operator.mul, '__rmul__')
__rsub__ = _arith_method(lambda x, y: y - x, '__rsub__')
__rtruediv__ = _arith_method(lambda x, y: y / x, '__rtruediv__')
__rfloordiv__ = _arith_method(lambda x, y: y // x, '__rfloordiv__')
__rpow__ = _arith_method(lambda x, y: y ** x, '__rpow__')
if not py3compat.PY3:
__div__ = _arith_method(operator.div, '__div__')
__rdiv__ = _arith_method(lambda x, y: y / x, '__rdiv__')
def __init__(self, data=None, items=None, major_axis=None, minor_axis=None,
copy=False, dtype=None):
self._init_data(
data=data, items=items, major_axis=major_axis, minor_axis=minor_axis,
copy=copy, dtype=dtype)
def _init_data(self, data, copy, dtype, **kwargs):
""" generate ND initialization; axes are passed as required objects to __init__ """
if data is None:
data = {}
passed_axes = [kwargs.get(a) for a in self._AXIS_ORDERS]
axes = None
if isinstance(data, BlockManager):
if any(x is not None for x in passed_axes):
axes = [x if x is not None else y
for x, y in zip(passed_axes, data.axes)]
mgr = data
elif isinstance(data, dict):
mgr = self._init_dict(data, passed_axes, dtype=dtype)
copy = False
dtype = None
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, passed_axes, dtype=dtype, copy=copy)
copy = False
dtype = None
else: # pragma: no cover
raise PandasError('Panel constructor not properly called!')
NDFrame.__init__(self, mgr, axes=axes, copy=copy, dtype=dtype)
@classmethod
def _from_axes(cls, data, axes):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data)
else:
d = cls._construct_axes_dict_from(cls, axes, copy=False)
return cls(data, **d)
def _init_dict(self, data, axes, dtype=None):
from pandas.util.compat import OrderedDict
haxis = axes.pop(self._het_axis)
# prefilter if haxis passed
if haxis is not None:
haxis = _ensure_index(haxis)
data = OrderedDict((k, v) for k, v in data.items() if k in haxis)
else:
ks = list(data.keys())
if not isinstance(data,OrderedDict):
ks = _try_sort(ks)
haxis = Index(ks)
for k, v in data.items():
if isinstance(v, dict):
data[k] = self._constructor_sliced(v)
# extract axis for remaining axes & create the slicemap
raxes = [self._extract_axis(self, data, axis=i)
if a is None else a for i, a in enumerate(axes)]
raxes_sm = self._extract_axes_for_slice(self, raxes)
# shallow copy
arrays = []
haxis_shape = [len(a) for a in raxes]
for h in haxis:
v = values = data.get(h)
if v is None:
values = np.empty(haxis_shape, dtype=dtype)
values.fill(np.nan)
elif isinstance(v, self._constructor_sliced):
d = raxes_sm.copy()
d['copy'] = False
v = v.reindex(**d)
if dtype is not None:
v = v.astype(dtype)
values = v.values
arrays.append(values)
return self._init_arrays(arrays, haxis, [haxis] + raxes)
def _init_arrays(self, arrays, arr_names, axes):
return create_block_manager_from_arrays(arrays, arr_names, axes)
@property
def shape(self):
return [len(getattr(self, a)) for a in self._AXIS_ORDERS]
@classmethod
def from_dict(cls, data, intersect=False, orient='items', dtype=None):
"""
Construct Panel from dict of DataFrame objects
Parameters
----------
data : dict
{field : DataFrame}
intersect : boolean
Intersect indexes of input DataFrames
orient : {'items', 'minor'}, default 'items'
The "orientation" of the data. If the keys of the passed dict
should be the items of the result panel, pass 'items'
(default). Otherwise if the columns of the values of the passed
DataFrame objects should be the items (which in the case of
mixed-dtype data you should do), instead pass 'minor'
Returns
-------
Panel
"""
from pandas.util.compat import OrderedDict,OrderedDefaultdict
orient = orient.lower()
if orient == 'minor':
new_data = OrderedDefaultdict(dict)
for col, df in data.items():
for item, s in df.items():
new_data[item][col] = s
data = new_data
elif orient != 'items': # pragma: no cover
raise ValueError('only recognize items or minor for orientation')
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
ks = list(d['data'].keys())
if not isinstance(d['data'],OrderedDict):
ks = list(sorted(ks))
d[cls._info_axis] = Index(ks)
return cls(**d)
def __getitem__(self, key):
if isinstance(getattr(self, self._info_axis), MultiIndex):
return self._getitem_multilevel(key)
return super(Panel, self).__getitem__(key)
def _getitem_multilevel(self, key):
info = getattr(self, self._info_axis)
loc = info.get_loc(key)
if isinstance(loc, (slice, np.ndarray)):
new_index = info[loc]
result_index = _maybe_droplevels(new_index, key)
slices = [loc] + [slice(None) for x in range(
self._AXIS_LEN - 1)]
new_values = self.values[slices]
d = self._construct_axes_dict(self._AXIS_ORDERS[1:])
d[self._info_axis] = result_index
result = self._constructor(new_values, **d)
return result
else:
return self._get_item_cache(key)
def _init_matrix(self, data, axes, dtype=None, copy=False):
values = self._prep_ndarray(self, data, copy=copy)
if dtype is not None:
try:
values = values.astype(dtype)
except Exception:
raise ValueError('failed to cast to %s' % dtype)
shape = values.shape
fixed_axes = []
for i, ax in enumerate(axes):
if ax is None:
ax = _default_index(shape[i])
else:
ax = _ensure_index(ax)
fixed_axes.append(ax)
return create_block_manager_from_blocks([ values ], fixed_axes)
#----------------------------------------------------------------------
# Array interface
def __array__(self, dtype=None):
return self.values
def __array_wrap__(self, result):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d)
#----------------------------------------------------------------------
# Comparison methods
def _indexed_same(self, other):
return all([getattr(self, a).equals(getattr(other, a)) for a in self._AXIS_ORDERS])
def _compare_constructor(self, other, func):
if not self._indexed_same(other):
raise Exception('Can only compare identically-labeled '
'same type objects')
new_data = {}
for col in getattr(self, self._info_axis):
new_data[col] = func(self[col], other[col])
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
# boolean operators
__and__ = _arith_method(operator.and_, '__and__')
__or__ = _arith_method(operator.or_, '__or__')
__xor__ = _arith_method(operator.xor, '__xor__')
def __neg__(self):
return -1 * self
def __invert__(self):
return -1 * self
# Comparison methods
__eq__ = _comp_method(operator.eq, '__eq__')
__ne__ = _comp_method(operator.ne, '__ne__')
__lt__ = _comp_method(operator.lt, '__lt__')
__gt__ = _comp_method(operator.gt, '__gt__')
__le__ = _comp_method(operator.le, '__le__')
__ge__ = _comp_method(operator.ge, '__ge__')
eq = _comp_method(operator.eq, 'eq')
ne = _comp_method(operator.ne, 'ne')
gt = _comp_method(operator.gt, 'gt')
lt = _comp_method(operator.lt, 'lt')
ge = _comp_method(operator.ge, 'ge')
le = _comp_method(operator.le, 'le')
#----------------------------------------------------------------------
# Magic methods
def __str__(self):
"""
Return a string representation for a particular Panel
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if py3compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular Panel
Invoked by bytes(df) in py3 only.
Yields a bytestring in both py2/py3.
"""
encoding = com.get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __unicode__(self):
"""
Return a string representation for a particular Panel
Invoked by unicode(df) in py2 only. Yields a Unicode String in both py2/py3.
"""
class_name = str(self.__class__)
shape = self.shape
dims = 'Dimensions: %s' % ' x '.join(
["%d (%s)" % (s, a) for a, s in zip(self._AXIS_ORDERS, shape)])
def axis_pretty(a):
v = getattr(self, a)
if len(v) > 0:
return '%s axis: %s to %s' % (a.capitalize(), com.pprint_thing(v[0]), com.pprint_thing(v[-1]))
else:
return '%s axis: None' % a.capitalize()
output = '\n'.join(
[class_name, dims] + [axis_pretty(a) for a in self._AXIS_ORDERS])
return output
def __repr__(self):
"""
Return a string representation for a particular Panel
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __iter__(self):
return iter(getattr(self, self._info_axis))
def iteritems(self):
for h in getattr(self, self._info_axis):
yield h, self[h]
# Name that won't get automatically converted to items by 2to3. items is
# already in use for the first axis.
iterkv = iteritems
def _get_plane_axes(self, axis):
""" get my plane axes: these are already (as compared with higher level planes), as we are returning a DataFrame axes """
axis = self._get_axis_name(axis)
if axis == 'major_axis':
index = self.minor_axis
columns = self.items
if axis == 'minor_axis':
index = self.major_axis
columns = self.items
elif axis == 'items':
index = self.major_axis
columns = self.minor_axis
return index, columns
def _wrap_array(self, arr, axes, copy=False):
d = self._construct_axes_dict_from(self, axes, copy=copy)
return self._constructor(arr, **d)
fromDict = from_dict
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparsePanel
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparsePanel
frames = dict(self.iterkv())
return SparsePanel(frames, items=self.items,
major_axis=self.major_axis,
minor_axis=self.minor_axis,
default_kind=kind,
default_fill_value=fill_value)
def to_excel(self, path, na_rep=''):
"""
Write each DataFrame in Panel to a separate excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
na_rep : string, default ''
Missing data representation
"""
from pandas.io.parsers import ExcelWriter
writer = ExcelWriter(path)
for item, df in self.items():
name = str(item)
df.to_excel(writer, name, na_rep=na_rep)
writer.save()
# TODO: needed?
def keys(self):
return list(getattr(self, self._info_axis))
def _get_values(self):
self._consolidate_inplace()
return self._data.as_matrix()
values = property(fget=_get_values)
#----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args):
"""
Quickly retrieve single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
Returns
-------
value : scalar value
"""
# require an arg for each axis
assert(len(args) == self._AXIS_LEN)
# hm, two layers to the onion
frame = self._get_item_cache(args[0])
return frame.get_value(*args[1:])
def set_value(self, *args):
"""
Quickly set single value at (item, major, minor) location
Parameters
----------
item : item label (panel item)
major : major axis label (panel item row)
minor : minor axis label (panel item column)
value : scalar
Returns
-------
panel : Panel
If label combo is contained, will be reference to calling Panel,
otherwise a new object
"""
# require an arg for each axis and the value
assert(len(args) == self._AXIS_LEN + 1)
try:
frame = self._get_item_cache(args[0])
frame.set_value(*args[1:])
return self
except KeyError:
axes = self._expand_axes(args)
d = self._construct_axes_dict_from(self, axes, copy=False)
result = self.reindex(**d)
args = list(args)
likely_dtype, args[-1] = _infer_dtype_from_scalar(args[-1])
made_bigger = not np.array_equal(
axes[0], getattr(self, self._info_axis))
# how to make this logic simpler?
if made_bigger:
com._possibly_cast_item(result, args[0], likely_dtype)
return result.set_value(*args)
def _box_item_values(self, key, values):
d = self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:])
return self._constructor_sliced(values, **d)
def __getattr__(self, name):
"""After regular attribute access, try looking up the name of an item.
This allows simpler access to items for interactive use."""
if name in getattr(self, self._info_axis):
return self[name]
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def _slice(self, slobj, axis=0, raise_on_error=False):
new_data = self._data.get_slice(slobj, axis=axis, raise_on_error=raise_on_error)
return self._constructor(new_data)
def __setitem__(self, key, value):
shape = tuple(self.shape)
if isinstance(value, self._constructor_sliced):
value = value.reindex(
**self._construct_axes_dict_for_slice(self._AXIS_ORDERS[1:]))
mat = value.values
elif isinstance(value, np.ndarray):
assert(value.shape == shape[1:])
mat = np.asarray(value)
elif np.isscalar(value):
dtype, value = _infer_dtype_from_scalar(value)
mat = np.empty(shape[1:], dtype=dtype)
mat.fill(value)
else:
raise TypeError('Cannot set item of type: %s' % str(type(value)))
mat = mat.reshape(tuple([1]) + shape[1:])
NDFrame._set_item(self, key, mat)
def pop(self, item):
"""
Return item slice from panel and delete from panel
Parameters
----------
key : object
Must be contained in panel's items
Returns
-------
y : DataFrame
"""
return NDFrame.pop(self, item)
def __getstate__(self):
"Returned pickled representation of the panel"
return self._data
def __setstate__(self, state):
# old Panel pickle
if isinstance(state, BlockManager):
self._data = state
elif len(state) == 4: # pragma: no cover
self._unpickle_panel_compat(state)
else: # pragma: no cover
raise ValueError('unrecognized pickle')
self._item_cache = {}
def _unpickle_panel_compat(self, state): # pragma: no cover
"Unpickle the panel"
_unpickle = com._unpickle_array
vals, items, major, minor = state
items = _unpickle(items)
major = _unpickle(major)
minor = _unpickle(minor)
values = _unpickle(vals)
wp = Panel(values, items, major, minor)
self._data = wp._data
def conform(self, frame, axis='items'):
"""
Conform input DataFrame to align with chosen axis pair.
Parameters
----------
frame : DataFrame
axis : {'items', 'major', 'minor'}
Axis the input corresponds to. E.g., if axis='major', then
the frame's columns would be items, and the index would be
values of the minor axis
Returns
-------
DataFrame
"""
axes = self._get_plane_axes(axis)
return frame.reindex(**self._extract_axes_for_slice(self, axes))
def reindex(self, major=None, minor=None, method=None,
major_axis=None, minor_axis=None, copy=True, **kwargs):
"""
Conform panel to new axis or axes
Parameters
----------
major : Index or sequence, default None
Can also use 'major_axis' keyword
items : Index or sequence, default None
minor : Index or sequence, default None
Can also use 'minor_axis' keyword
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
Returns
-------
Panel (new object)
"""
result = self
major = _mut_exclusive(major, major_axis)
minor = _mut_exclusive(minor, minor_axis)
al = self._AXIS_LEN
# only allowing multi-index on Panel (and not > dims)
if (method is None and not self._is_mixed_type and al <= 3):
items = kwargs.get('items')
if com._count_not_none(items, major, minor) == 3:
try:
return self._reindex_multi(items, major, minor)
except:
pass
if major is not None:
result = result._reindex_axis(major, method, al - 2, copy)
if minor is not None:
result = result._reindex_axis(minor, method, al - 1, copy)
for i, a in enumerate(self._AXIS_ORDERS[0:al - 2]):
a = kwargs.get(a)
if a is not None:
result = result._reindex_axis(a, method, i, copy)
if result is self and copy:
raise ValueError('Must specify at least one axis')
return result
def _reindex_multi(self, items, major, minor):
a0, a1, a2 = len(items), len(major), len(minor)
values = self.values
new_values = np.empty((a0, a1, a2), dtype=values.dtype)
new_items, indexer0 = self.items.reindex(items)
new_major, indexer1 = self.major_axis.reindex(major)
new_minor, indexer2 = self.minor_axis.reindex(minor)
if indexer0 is None:
indexer0 = list(range(len(new_items)))
if indexer1 is None:
indexer1 = list(range(len(new_major)))
if indexer2 is None:
indexer2 = list(range(len(new_minor)))
for i, ind in enumerate(indexer0):
com.take_2d_multi(values[ind], (indexer1, indexer2),
out=new_values[i])
return Panel(new_values, items=new_items, major_axis=new_major,
minor_axis=new_minor)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True):
"""Conform Panel to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
copy=False
Parameters
----------
index : array-like, optional
New labels / index to conform to. Preferably an Index object to
avoid duplicating data
axis : {0, 1}
0 -> index (rows)
1 -> columns
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed DataFrame
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
copy : boolean, default True
Return a new object, even if the passed indexes are the same
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level
Returns
-------
reindexed : Panel
"""
self._consolidate_inplace()
return self._reindex_axis(labels, method, axis, copy)
def reindex_like(self, other, method=None):
""" return an object with matching indicies to myself
Parameters
----------
other : Panel
method : string or None
Returns
-------
reindexed : Panel
"""
d = other._construct_axes_dict(method=method)
return self.reindex(**d)
def dropna(self, axis=0, how='any'):
"""
Drop 2D from panel, holding passed axis constant
Parameters
----------
axis : int, default 0
Axis to hold constant. E.g. axis=1 will drop major_axis entries
having a certain amount of NA data
how : {'all', 'any'}, default 'any'
'any': one or more values are NA in the DataFrame along the
axis. For 'all' they all must be.
Returns
-------
dropped : Panel
"""
axis = self._get_axis_number(axis)
values = self.values
mask = com.notnull(values)
for ax in reversed(sorted(set(range(3)) - set([axis]))):
mask = mask.sum(ax)
per_slice = np.prod(values.shape[:axis] + values.shape[axis + 1:])
if how == 'all':
cond = mask > 0
else:
cond = mask == per_slice
new_ax = self._get_axis(axis)[cond]
return self.reindex_axis(new_ax, axis=axis)
def _combine(self, other, func, axis=0):
if isinstance(other, Panel):
return self._combine_panel(other, func)
elif isinstance(other, DataFrame):
return self._combine_frame(other, func, axis=axis)
elif np.isscalar(other):
return self._combine_const(other, func)
def _combine_const(self, other, func):
new_values = func(self.values, other)
d = self._construct_axes_dict()
return self._constructor(new_values, **d)
def _combine_frame(self, other, func, axis=0):
index, columns = self._get_plane_axes(axis)
axis = self._get_axis_number(axis)
other = other.reindex(index=index, columns=columns)
if axis == 0:
new_values = func(self.values, other.values)
elif axis == 1:
new_values = func(self.values.swapaxes(0, 1), other.values.T)
new_values = new_values.swapaxes(0, 1)
elif axis == 2:
new_values = func(self.values.swapaxes(0, 2), other.values)
new_values = new_values.swapaxes(0, 2)
return self._constructor(new_values, self.items, self.major_axis,
self.minor_axis)
def _combine_panel(self, other, func):
items = self.items + other.items
major = self.major_axis + other.major_axis
minor = self.minor_axis + other.minor_axis
# could check that everything's the same size, but forget it
this = self.reindex(items=items, major=major, minor=minor)
other = other.reindex(items=items, major=major, minor=minor)
result_values = func(this.values, other.values)
return self._constructor(result_values, items, major, minor)
def fillna(self, value=None, method=None):
"""
Fill NaN values using the specified method.
Member Series / TimeSeries are filled separately.
Parameters
----------
value : any kind (should be same type as array)
Value to use to fill holes (e.g. 0)
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default 'pad'
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
Returns
-------
y : DataFrame
See also
--------
DataFrame.reindex, DataFrame.asfreq
"""
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
result = {}
for col, s in self.iterkv():
result[col] = s.fillna(method=method, value=value)
return self._constructor.from_dict(result)
else:
if method is not None:
raise ValueError('cannot specify both a fill method and value')
new_data = self._data.fillna(value)
return self._constructor(new_data)
def ffill(self):
return self.fillna(method='ffill')
def bfill(self):
return self.fillna(method='bfill')
def major_xs(self, key, copy=True):
"""
Return slice of panel along major axis
Parameters
----------
key : object
Major axis label
copy : boolean, default False
Copy data
Returns
-------
y : DataFrame
index -> minor axis, columns -> items
"""
return self.xs(key, axis=self._AXIS_LEN - 2, copy=copy)
def minor_xs(self, key, copy=True):
"""
Return slice of panel along minor axis
Parameters
----------
key : object
Minor axis label
copy : boolean, default False
Copy data
Returns
-------
y : DataFrame
index -> major axis, columns -> items
"""
return self.xs(key, axis=self._AXIS_LEN - 1, copy=copy)
def xs(self, key, axis=1, copy=True):
"""
Return slice of panel along selected axis
Parameters
----------
key : object
Label
axis : {'items', 'major', 'minor}, default 1/'major'
Returns
-------
y : ndim(self)-1
"""
axis = self._get_axis_number(axis)
if axis == 0:
data = self[key]
if copy:
data = data.copy()
return data
self._consolidate_inplace()
axis_number = self._get_axis_number(axis)
new_data = self._data.xs(key, axis=axis_number, copy=copy)
return self._constructor_sliced(new_data)
_xs = xs
def _ixs(self, i, axis=0):
# for compatibility with .ix indexing
# Won't work with hierarchical indexing yet
key = self._get_axis(axis)[i]
# xs cannot handle a non-scalar key, so just reindex here
if _is_list_like(key):
return self.reindex(**{ self._get_axis_name(axis) : key })
return self.xs(key, axis=axis)
def groupby(self, function, axis='major'):
"""
Group data on given axis, returning GroupBy object
Parameters
----------
function : callable
Mapping function for chosen access
axis : {'major', 'minor', 'items'}, default 'major'
Returns
-------
grouped : PanelGroupBy
"""
from pandas.core.groupby import PanelGroupBy
axis = self._get_axis_number(axis)
return PanelGroupBy(self, function, axis=axis)
def swapaxes(self, axis1='major', axis2='minor', copy=True):
"""
Interchange axes and swap values axes appropriately
Returns
-------
y : Panel (new object)
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
raise ValueError('Cannot specify the same axis')
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k))
for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes)
def transpose(self, *args, **kwargs):
"""
Permute the dimensions of the Panel
Parameters
----------
items : int or one of {'items', 'major', 'minor'}
major : int or one of {'items', 'major', 'minor'}
minor : int or one of {'items', 'major', 'minor'}
copy : boolean, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
Returns
-------
y : Panel (new object)
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
if not a in kwargs:
try:
kwargs[a] = args.pop(0)
except (IndexError):
raise ValueError(
"not enough arguments specified to transpose!")
axes = [self._get_axis_number(kwargs[a]) for a in self._AXIS_ORDERS]
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError('Must specify %s unique axes' % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(
self, [self._get_axis(x) for x in axes])
new_values = self.values.transpose(tuple(axes))
if kwargs.get('copy') or (len(args) and args[-1]):
new_values = new_values.copy()
return self._constructor(new_values, **new_axes)
def to_frame(self, filter_observations=True):
"""
Transform wide format into long (stacked) format as DataFrame
Parameters
----------
filter_observations : boolean, default True
Drop (major, minor) pairs without a complete set of observations
across all the items
Returns
-------
y : DataFrame
"""
_, N, K = self.shape
if filter_observations:
mask = com.notnull(self.values).all(axis=0)
# size = mask.sum()
selector = mask.ravel()
else:
# size = N * K
selector = slice(None, None)
data = {}
for item in self.items:
data[item] = self[item].values.ravel()[selector]
major_labels = np.arange(N).repeat(K)[selector]
# Anyone think of a better way to do this? np.repeat does not
# do what I want
minor_labels = np.arange(K).reshape(1, K)[np.zeros(N, dtype=int)]
minor_labels = minor_labels.ravel()[selector]
maj_name = self.major_axis.name or 'major'
min_name = self.minor_axis.name or 'minor'
index = MultiIndex(levels=[self.major_axis, self.minor_axis],
labels=[major_labels, minor_labels],
names=[maj_name, min_name])
return DataFrame(data, index=index, columns=self.items)
to_long = deprecate('to_long', to_frame)
toLong = deprecate('toLong', to_frame)
def filter(self, items):
"""
Restrict items in panel to input list
Parameters
----------
items : sequence
Returns
-------
y : Panel
"""
intersection = self.items.intersection(items)
return self.reindex(items=intersection)
def apply(self, func, axis='major'):
"""
Apply
Parameters
----------
func : numpy function
Signature should match numpy.{sum, mean, var, std} etc.
axis : {'major', 'minor', 'items'}
fill_value : boolean, default True
Replace NaN values with specified first
Returns
-------
result : DataFrame or Panel
"""
i = self._get_axis_number(axis)
result = np.apply_along_axis(func, i, self.values)
return self._wrap_result(result, axis=axis)
def _reduce(self, op, axis=0, skipna=True):
axis_name = self._get_axis_name(axis)
axis_number = self._get_axis_number(axis_name)
f = lambda x: op(x, axis=axis_number, skipna=skipna)
result = f(self.values)
axes = self._get_plane_axes(axis_name)
if result.ndim == 2 and axis_name != self._info_axis:
result = result.T
return self._constructor_sliced(result, **self._extract_axes_for_slice(self, axes))
def _wrap_result(self, result, axis):
axis = self._get_axis_name(axis)
axes = self._get_plane_axes(axis)
if result.ndim == 2 and axis != self._info_axis:
result = result.T
# do we have reduced dimensionalility?
if self.ndim == result.ndim:
return self._constructor(result, **self._construct_axes_dict())
elif self.ndim == result.ndim + 1:
return self._constructor_sliced(result, **self._extract_axes_for_slice(self, axes))
raise PandasError("invalid _wrap_result [self->%s] [result->%s]" %
(self.ndim, result.ndim))
def count(self, axis='major'):
"""
Return number of observations over requested axis.
Parameters
----------
axis : {'items', 'major', 'minor'} or {0, 1, 2}
Returns
-------
count : DataFrame
"""
i = self._get_axis_number(axis)
values = self.values
mask = np.isfinite(values)
result = mask.sum(axis=i)
return self._wrap_result(result, axis)
def shift(self, lags, axis='major'):
"""
Shift major or minor axis by specified number of leads/lags. Drops
periods right now compared with DataFrame.shift
Parameters
----------
lags : int
axis : {'major', 'minor'}
Returns
-------
shifted : Panel
"""
values = self.values
items = self.items
major_axis = self.major_axis
minor_axis = self.minor_axis
if lags > 0:
vslicer = slice(None, -lags)
islicer = slice(lags, None)
elif lags == 0:
vslicer = islicer = slice(None)
else:
vslicer = slice(-lags, None)
islicer = slice(None, lags)
axis = self._get_axis_name(axis)
if axis == 'major_axis':
values = values[:, vslicer, :]
major_axis = major_axis[islicer]
elif axis == 'minor_axis':
values = values[:, :, vslicer]
minor_axis = minor_axis[islicer]
else:
raise ValueError('Invalid axis')
return self._constructor(values, items=items, major_axis=major_axis,
minor_axis=minor_axis)
def truncate(self, before=None, after=None, axis='major'):
"""Function truncates a sorted Panel before and/or after some
particular values on the requested axis
Parameters
----------
before : date
Left boundary
after : date
Right boundary
axis : {'major', 'minor', 'items'}
Returns
-------
Panel
"""
axis = self._get_axis_name(axis)
index = self._get_axis(axis)
beg_slice, end_slice = index.slice_locs(before, after)
new_index = index[beg_slice:end_slice]
return self.reindex(**{axis: new_index})
def join(self, other, how='left', lsuffix='', rsuffix=''):
"""
Join items with other Panel either on major and minor axes column
Parameters
----------
other : Panel or list of Panels
Index should be similar to one of the columns in this one
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
Returns
-------
joined : Panel
"""
from pandas.tools.merge import concat
if isinstance(other, Panel):
join_major, join_minor = self._get_join_index(other, how)
this = self.reindex(major=join_major, minor=join_minor)
other = other.reindex(major=join_major, minor=join_minor)
merged_data = this._data.merge(other._data, lsuffix, rsuffix)
return self._constructor(merged_data)
else:
if lsuffix or rsuffix:
raise ValueError('Suffixes not supported when passing '
'multiple panels')
if how == 'left':
how = 'outer'
join_axes = [self.major_axis, self.minor_axis]
elif how == 'right':
raise ValueError('Right join not supported with multiple '
'panels')
else:
join_axes = None
return concat([self] + list(other), axis=0, join=how,
join_axes=join_axes, verify_integrity=True)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify Panel in place using non-NA values from passed
Panel, or object coercible to Panel. Aligns on items
Parameters
----------
other : Panel, or object coercible to Panel
join : How to join individual DataFrames
{'left', 'right', 'outer', 'inner'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling panel
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : bool
If True, will raise an error if a DataFrame and other both
contain data in the same place.
"""
if not isinstance(other, self._constructor):
other = self._constructor(other)
axis = self._info_axis
axis_values = getattr(self, axis)
other = other.reindex(**{axis: axis_values})
for frame in axis_values:
self[frame].update(other[frame], join, overwrite, filter_func,
raise_conflict)
def _get_join_index(self, other, how):
if how == 'left':
join_major, join_minor = self.major_axis, self.minor_axis
elif how == 'right':
join_major, join_minor = other.major_axis, other.minor_axis
elif how == 'inner':
join_major = self.major_axis.intersection(other.major_axis)
join_minor = self.minor_axis.intersection(other.minor_axis)
elif how == 'outer':
join_major = self.major_axis.union(other.major_axis)
join_minor = self.minor_axis.union(other.minor_axis)
return join_major, join_minor
# miscellaneous data creation
@staticmethod
def _extract_axes(self, data, axes, **kwargs):
""" return a list of the axis indicies """
return [self._extract_axis(self, data, axis=i, **kwargs) for i, a in enumerate(axes)]
@staticmethod
def _extract_axes_for_slice(self, axes):
""" return the slice dictionary for these axes """
return dict([(self._AXIS_SLICEMAP[i], a) for i, a in zip(self._AXIS_ORDERS[self._AXIS_LEN - len(axes):], axes)])
@staticmethod
def _prep_ndarray(self, values, copy=True):
if not isinstance(values, np.ndarray):
values = np.asarray(values)
# NumPy strings are a pain, convert to object
if issubclass(values.dtype.type, str):
values = np.array(values, dtype=object, copy=True)
else:
if copy:
values = values.copy()
assert(values.ndim == self._AXIS_LEN)
return values
@staticmethod
def _homogenize_dict(self, frames, intersect=True, dtype=None):
"""
Conform set of _constructor_sliced-like objects to either an intersection
of indices / columns or a union.
Parameters
----------
frames : dict
intersect : boolean, default True
Returns
-------
dict of aligned results & indicies
"""
from pandas.util.compat import OrderedDict
result = dict()
if isinstance(frames,OrderedDict): # caller differs dict/ODict, presered type
result = OrderedDict()
adj_frames = OrderedDict()
for k, v in frames.items():
if isinstance(v, dict):
adj_frames[k] = self._constructor_sliced(v)
else:
adj_frames[k] = v
axes = self._AXIS_ORDERS[1:]
axes_dict = dict([(a, ax) for a, ax in zip(axes, self._extract_axes(
self, adj_frames, axes, intersect=intersect))])
reindex_dict = dict(
[(self._AXIS_SLICEMAP[a], axes_dict[a]) for a in axes])
reindex_dict['copy'] = False
for key, frame in adj_frames.items():
if frame is not None:
result[key] = frame.reindex(**reindex_dict)
else:
result[key] = None
axes_dict['data'] = result
return axes_dict
@staticmethod
def _extract_axis(self, data, axis=0, intersect=False):
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_frames = False
for v in list(data.values()):
if isinstance(v, self._constructor_sliced):
have_frames = True
indexes.append(v._get_axis(axis))
elif v is not None:
have_raw_arrays = True
raw_lengths.append(v.shape[axis])
if have_frames:
index = _get_combined_index(indexes, intersect=intersect)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('ndarrays must match shape on axis %d' % axis)
if have_frames:
if lengths[0] != len(index):
raise AssertionError('Length of data and index must match')
else:
index = Index(np.arange(lengths[0]))
if index is None:
index = Index([])
return _ensure_index(index)
@classmethod
def _add_aggregate_operations(cls):
""" add the operations to the cls; evaluate the doc strings again """
# doc strings substitors
_agg_doc = """
Wrapper method for %s
Parameters
----------
other : """ + "%s or %s" % (cls._constructor_sliced.__name__, cls.__name__) + """
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "}" + """
Axis to broadcast over
Returns
-------
""" + cls.__name__ + "\n"
def _panel_arith_method(op, name):
@Substitution(op)
@Appender(_agg_doc)
def f(self, other, axis=0):
return self._combine(other, op, axis=axis)
f.__name__ = name
return f
cls.add = _panel_arith_method(operator.add, 'add')
cls.subtract = cls.sub = _panel_arith_method(operator.sub, 'subtract')
cls.multiply = cls.mul = _panel_arith_method(operator.mul, 'multiply')
try:
cls.divide = cls.div = _panel_arith_method(operator.div, 'divide')
except AttributeError: # pragma: no cover
# Python 3
cls.divide = cls.div = _panel_arith_method(
operator.truediv, 'divide')
_agg_doc = """
Return %(desc)s over requested axis
Parameters
----------
axis : {""" + ', '.join(cls._AXIS_ORDERS) + "} or {" + ', '.join([str(i) for i in range(cls._AXIS_LEN)]) + """}
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
%(outname)s : """ + cls._constructor_sliced.__name__ + "\n"
_na_info = """
NA/null values are %s.
If all values are NA, result will be NA"""
@Substitution(desc='sum', outname='sum')
@Appender(_agg_doc)
def sum(self, axis='major', skipna=True):
return self._reduce(nanops.nansum, axis=axis, skipna=skipna)
cls.sum = sum
@Substitution(desc='mean', outname='mean')
@Appender(_agg_doc)
def mean(self, axis='major', skipna=True):
return self._reduce(nanops.nanmean, axis=axis, skipna=skipna)
cls.mean = mean
@Substitution(desc='unbiased variance', outname='variance')
@Appender(_agg_doc)
def var(self, axis='major', skipna=True):
return self._reduce(nanops.nanvar, axis=axis, skipna=skipna)
cls.var = var
@Substitution(desc='unbiased standard deviation', outname='stdev')
@Appender(_agg_doc)
def std(self, axis='major', skipna=True):
return self.var(axis=axis, skipna=skipna).apply(np.sqrt)
cls.std = std
@Substitution(desc='unbiased skewness', outname='skew')
@Appender(_agg_doc)
def skew(self, axis='major', skipna=True):
return self._reduce(nanops.nanskew, axis=axis, skipna=skipna)
cls.skew = skew
@Substitution(desc='product', outname='prod')
@Appender(_agg_doc)
def prod(self, axis='major', skipna=True):
return self._reduce(nanops.nanprod, axis=axis, skipna=skipna)
cls.prod = prod
@Substitution(desc='compounded percentage', outname='compounded')
@Appender(_agg_doc)
def compound(self, axis='major', skipna=True):
return (1 + self).prod(axis=axis, skipna=skipna) - 1
cls.compound = compound
@Substitution(desc='median', outname='median')
@Appender(_agg_doc)
def median(self, axis='major', skipna=True):
return self._reduce(nanops.nanmedian, axis=axis, skipna=skipna)
cls.median = median
@Substitution(desc='maximum', outname='maximum')
@Appender(_agg_doc)
def max(self, axis='major', skipna=True):
return self._reduce(nanops.nanmax, axis=axis, skipna=skipna)
cls.max = max
@Substitution(desc='minimum', outname='minimum')
@Appender(_agg_doc)
def min(self, axis='major', skipna=True):
return self._reduce(nanops.nanmin, axis=axis, skipna=skipna)
cls.min = min
Panel._add_aggregate_operations()
WidePanel = Panel
LongPanel = DataFrame
def _monotonic(arr):
return not (arr[1:] < arr[:-1]).any()
def install_ipython_completers(): # pragma: no cover
"""Register the Panel type with IPython's tab completion machinery, so
that it knows about accessing column names as attributes."""
from IPython.utils.generics import complete_object
@complete_object.when_type(Panel)
def complete_dataframe(obj, prev_completions):
return prev_completions + [c for c in list(obj.keys())
if isinstance(c, str) and py3compat.isidentifier(c)]
# Importing IPython brings in about 200 modules, so we want to avoid it unless
# we're in IPython (when those modules are loaded anyway).
if "IPython" in sys.modules: # pragma: no cover
try:
install_ipython_completers()
except Exception:
pass
| 33.219242
| 129
| 0.574169
|
75e0f56e6d618a191e5680cda8b169179ac4901c
| 445
|
py
|
Python
|
Chapter03/multi_plot.py
|
allen-zqh/plotly
|
bcaf0930901e77db07245b63bff049eb75893416
|
[
"MIT"
] | null | null | null |
Chapter03/multi_plot.py
|
allen-zqh/plotly
|
bcaf0930901e77db07245b63bff049eb75893416
|
[
"MIT"
] | null | null | null |
Chapter03/multi_plot.py
|
allen-zqh/plotly
|
bcaf0930901e77db07245b63bff049eb75893416
|
[
"MIT"
] | 1
|
2021-02-04T06:56:18.000Z
|
2021-02-04T06:56:18.000Z
|
import plotly as py
import plotly.graph_objs as go
# ----------pre def
pyplt = py.offline.plot
x = list('ABCDEF')
trace1 = go.Scatter(
x=x,
y=[1.5, 1, 1.3, 0.7, 0.8, 0.9],
name='line'
)
trace2 = go.Bar(
x=x,
y=[1, 0.5, 0.7, -1.2, 0.3, 0.4],
name = 'bar'
)
data = [trace1, trace2]
layout = dict(title = 'Bar-Line Demo')
fig = dict(data=data,layout=layout)
pyplt(data, filename='tmp/bar-line.html', show_link=False)
| 16.481481
| 58
| 0.58427
|
0689919610308dd0d732bac841c7b08afc742905
| 12,802
|
py
|
Python
|
training/coach_restyle_psp.py
|
jscarlson/restyle-encoder
|
8b0c97a6e44ad244efe7bd77f80d7201817c314c
|
[
"MIT"
] | null | null | null |
training/coach_restyle_psp.py
|
jscarlson/restyle-encoder
|
8b0c97a6e44ad244efe7bd77f80d7201817c314c
|
[
"MIT"
] | null | null | null |
training/coach_restyle_psp.py
|
jscarlson/restyle-encoder
|
8b0c97a6e44ad244efe7bd77f80d7201817c314c
|
[
"MIT"
] | null | null | null |
import os
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import torch
from torch import nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import torch.nn.functional as F
from utils import common, train_utils
from criteria import id_loss, w_norm, moco_loss
from configs import data_configs
from datasets.images_dataset import ImagesDataset
from criteria.lpips.lpips import LPIPS
from models.psp import pSp
from training.ranger import Ranger
from utils.wandb_utils import WBLogger
class Coach:
def __init__(self, opts):
self.opts = opts
self.global_step = 0
self.device = 'cuda:0'
self.opts.device = self.device
self.wb_logger = WBLogger(self.opts)
# Initialize network
self.net = pSp(self.opts).to(self.device)
# Estimate latent_avg via dense sampling if latent_avg is not available
if self.net.latent_avg is None:
self.net.latent_avg = self.net.decoder.mean_latent(int(1e5))[0].detach()
# get the image corresponding to the latent average
self.avg_image = self.net(self.net.latent_avg.unsqueeze(0),
input_code=True,
randomize_noise=False,
return_latents=False,
average_code=True)[0]
self.avg_image = self.avg_image.to(self.device).float().detach()
if self.opts.dataset_type == "cars_encode":
self.avg_image = self.avg_image[:, 32:224, :]
common.tensor2im(self.avg_image).save(os.path.join(self.opts.exp_dir, 'avg_image.jpg'))
# Initialize loss
if self.opts.id_lambda > 0 and self.opts.moco_lambda > 0:
raise ValueError('Both ID and MoCo loss have lambdas > 0! Please select only one to have non-zero lambda!')
self.mse_loss = nn.MSELoss().to(self.device).eval()
if self.opts.lpips_lambda > 0:
self.lpips_loss = LPIPS(net_type='alex').to(self.device).eval()
if self.opts.id_lambda > 0:
self.id_loss = id_loss.IDLoss().to(self.device).eval()
if self.opts.w_norm_lambda > 0:
self.w_norm_loss = w_norm.WNormLoss(start_from_latent_avg=self.opts.start_from_latent_avg)
if self.opts.moco_lambda > 0:
self.moco_loss = moco_loss.MocoLoss()
# Initialize optimizer
self.optimizer = self.configure_optimizers()
# Initialize dataset
self.train_dataset, self.test_dataset = self.configure_datasets()
self.train_dataloader = DataLoader(self.train_dataset,
batch_size=self.opts.batch_size,
shuffle=True,
num_workers=int(self.opts.workers),
drop_last=True)
self.test_dataloader = DataLoader(self.test_dataset,
batch_size=self.opts.test_batch_size,
shuffle=False,
num_workers=int(self.opts.test_workers),
drop_last=True)
# Initialize logger
log_dir = os.path.join(opts.exp_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
self.logger = SummaryWriter(log_dir=log_dir)
# Initialize checkpoint dir
self.checkpoint_dir = os.path.join(opts.exp_dir, 'checkpoints')
os.makedirs(self.checkpoint_dir, exist_ok=True)
self.best_val_loss = None
if self.opts.save_interval is None:
self.opts.save_interval = self.opts.max_steps
def perform_train_iteration_on_batch(self, x, y):
y_hat, latent = None, None
loss_dict, id_logs = None, None
y_hats = {idx: [] for idx in range(x.shape[0])}
for iter in range(self.opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = self.avg_image.unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
y_hat, latent = self.net.forward(x_input, latent=None, return_latents=True)
else:
y_hat_clone = y_hat.clone().detach().requires_grad_(True)
latent_clone = latent.clone().detach().requires_grad_(True)
x_input = torch.cat([x, y_hat_clone], dim=1)
y_hat, latent = self.net.forward(x_input, latent=latent_clone, return_latents=True)
if self.opts.dataset_type == "cars_encode":
y_hat = y_hat[:, :, 32:224, :]
loss, loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
loss.backward()
# store intermediate outputs
for idx in range(x.shape[0]):
y_hats[idx].append([y_hat[idx], id_logs[idx]['diff_target']])
return y_hats, loss_dict, id_logs
def train(self):
self.net.train()
while self.global_step < self.opts.max_steps:
for batch_idx, batch in enumerate(self.train_dataloader):
self.optimizer.zero_grad()
x, y = batch
x, y = x.to(self.device).float(), y.to(self.device).float()
y_hats, loss_dict, id_logs = self.perform_train_iteration_on_batch(x, y)
self.optimizer.step()
# Logging related
if self.global_step % self.opts.image_interval == 0 or (self.global_step < 1000 and self.global_step % 25 == 0):
self.parse_and_log_images(id_logs, x, y, y_hats, title='images/train')
if self.global_step % self.opts.board_interval == 0:
self.print_metrics(loss_dict, prefix='train')
self.log_metrics(loss_dict, prefix='train')
# Log images of first batch to wandb
if self.global_step % 1000 == 0:
self.wb_logger.log_images_to_wandb(x, y, y_hats, id_logs, prefix="train", step=self.global_step, opts=self.opts)
# Validation related
val_loss_dict = None
if self.global_step % self.opts.val_interval == 0 or self.global_step == self.opts.max_steps:
val_loss_dict = self.validate()
if val_loss_dict and (self.best_val_loss is None or val_loss_dict['loss'] < self.best_val_loss):
self.best_val_loss = val_loss_dict['loss']
self.checkpoint_me(val_loss_dict, is_best=True)
if self.global_step % self.opts.save_interval == 0 or self.global_step == self.opts.max_steps:
if val_loss_dict is not None:
self.checkpoint_me(val_loss_dict, is_best=False)
else:
self.checkpoint_me(loss_dict, is_best=False)
if self.global_step == self.opts.max_steps:
print('OMG, finished training!')
break
self.global_step += 1
def perform_val_iteration_on_batch(self, x, y):
y_hat, latent = None, None
cur_loss_dict, id_logs = None, None
y_hats = {idx: [] for idx in range(x.shape[0])}
for iter in range(self.opts.n_iters_per_batch):
if iter == 0:
avg_image_for_batch = self.avg_image.unsqueeze(0).repeat(x.shape[0], 1, 1, 1)
x_input = torch.cat([x, avg_image_for_batch], dim=1)
else:
x_input = torch.cat([x, y_hat], dim=1)
y_hat, latent = self.net.forward(x_input, latent=latent, return_latents=True)
if self.opts.dataset_type == "cars_encode":
y_hat = y_hat[:, :, 32:224, :]
loss, cur_loss_dict, id_logs = self.calc_loss(x, y, y_hat, latent)
# store intermediate outputs
for idx in range(x.shape[0]):
y_hats[idx].append([y_hat[idx], id_logs[idx]['diff_target']])
return y_hats, cur_loss_dict, id_logs
def validate(self):
self.net.eval()
agg_loss_dict = []
for batch_idx, batch in enumerate(self.test_dataloader):
x, y = batch
with torch.no_grad():
x, y = x.to(self.device).float(), y.to(self.device).float()
y_hats, cur_loss_dict, id_logs = self.perform_val_iteration_on_batch(x, y)
agg_loss_dict.append(cur_loss_dict)
# Logging related
self.parse_and_log_images(id_logs, x, y, y_hats, title='images/test', subscript='{:04d}'.format(batch_idx))
# Log images of first batch to wandb
self.wb_logger.log_images_to_wandb(x, y, y_hats, id_logs, prefix="test", step=self.global_step, opts=self.opts)
# For first step just do sanity test on small amount of data
if self.global_step == 0 and batch_idx >= 4:
self.net.train()
return None # Do not log, inaccurate in first batch
loss_dict = train_utils.aggregate_loss_dict(agg_loss_dict)
self.log_metrics(loss_dict, prefix='test')
self.print_metrics(loss_dict, prefix='test')
self.net.train()
return loss_dict
def checkpoint_me(self, loss_dict, is_best):
save_name = 'best_model.pt' if is_best else 'iteration_{}.pt'.format(self.global_step)
save_dict = self.__get_save_dict()
checkpoint_path = os.path.join(self.checkpoint_dir, save_name)
torch.save(save_dict, checkpoint_path)
with open(os.path.join(self.checkpoint_dir, 'timestamp.txt'), 'a') as f:
if is_best:
f.write('**Best**: Step - {}, Loss - {:.3f} \n{}\n'.format(self.global_step, self.best_val_loss, loss_dict))
else:
f.write('Step - {}, \n{}\n'.format(self.global_step, loss_dict))
def configure_optimizers(self):
params = list(self.net.encoder.parameters())
if self.opts.train_decoder:
params += list(self.net.decoder.parameters())
if self.opts.optim_name == 'adam':
optimizer = torch.optim.Adam(params, lr=self.opts.learning_rate)
else:
optimizer = Ranger(params, lr=self.opts.learning_rate)
return optimizer
def configure_datasets(self):
if self.opts.dataset_type not in data_configs.DATASETS.keys():
raise Exception('{} is not a valid dataset_type'.format(self.opts.dataset_type))
print('Loading dataset for {}'.format(self.opts.dataset_type))
dataset_args = data_configs.DATASETS[self.opts.dataset_type]
transforms_dict = dataset_args['transforms'](self.opts).get_transforms()
train_dataset = ImagesDataset(source_root=dataset_args['train_source_root'],
target_root=dataset_args['train_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_gt_train'],
opts=self.opts)
test_dataset = ImagesDataset(source_root=dataset_args['test_source_root'],
target_root=dataset_args['test_target_root'],
source_transform=transforms_dict['transform_source'],
target_transform=transforms_dict['transform_test'],
opts=self.opts)
self.wb_logger.log_dataset_wandb(train_dataset, dataset_name="Train")
self.wb_logger.log_dataset_wandb(test_dataset, dataset_name="Test")
print("Number of training samples: {}".format(len(train_dataset)))
print("Number of test samples: {}".format(len(test_dataset)))
return train_dataset, test_dataset
def calc_loss(self, x, y, y_hat, latent):
loss_dict = {}
loss = 0.0
id_logs = None
if self.opts.id_lambda > 0:
loss_id, sim_improvement, id_logs = self.id_loss(y_hat, y, x)
loss_dict['loss_id'] = float(loss_id)
loss_dict['id_improve'] = float(sim_improvement)
loss = loss_id * self.opts.id_lambda
if self.opts.l2_lambda > 0:
loss_l2 = F.mse_loss(y_hat, y)
loss_dict['loss_l2'] = float(loss_l2)
loss += loss_l2 * self.opts.l2_lambda
if self.opts.lpips_lambda > 0:
loss_lpips = self.lpips_loss(y_hat, y)
loss_dict['loss_lpips'] = float(loss_lpips)
loss += loss_lpips * self.opts.lpips_lambda
if self.opts.w_norm_lambda > 0:
loss_w_norm = self.w_norm_loss(latent, self.net.latent_avg)
loss_dict['loss_w_norm'] = float(loss_w_norm)
loss += loss_w_norm * self.opts.w_norm_lambda
if self.opts.moco_lambda > 0:
loss_moco, sim_improvement, id_logs = self.moco_loss(y_hat, y, x)
loss_dict['loss_moco'] = float(loss_moco)
loss_dict['id_improve'] = float(sim_improvement)
loss += loss_moco * self.opts.moco_lambda
loss_dict['loss'] = float(loss)
return loss, loss_dict, id_logs
def log_metrics(self, metrics_dict, prefix):
for key, value in metrics_dict.items():
self.logger.add_scalar('{}/{}'.format(prefix, key), value, self.global_step)
self.wb_logger.log(prefix, metrics_dict, self.global_step)
def print_metrics(self, metrics_dict, prefix):
print('Metrics for {}, step {}'.format(prefix, self.global_step))
for key, value in metrics_dict.items():
print('\t{} = '.format(key), value)
def parse_and_log_images(self, id_logs, x, y, y_hat, title, subscript=None, display_count=2):
im_data = []
for i in range(display_count):
if type(y_hat) == dict:
output_face = [
[common.tensor2im(y_hat[i][iter_idx][0]), y_hat[i][iter_idx][1]]
for iter_idx in range(len(y_hat[i]))
]
else:
output_face = [common.tensor2im(y_hat[i])]
cur_im_data = {
'input_face': common.tensor2im(x[i]),
'target_face': common.tensor2im(y[i]),
'output_face': output_face,
}
if id_logs is not None:
for key in id_logs[i]:
cur_im_data[key] = id_logs[i][key]
im_data.append(cur_im_data)
self.log_images(title, im_data=im_data, subscript=subscript)
def log_images(self, name, im_data, subscript=None, log_latest=False):
fig = common.vis_faces(im_data)
step = self.global_step
if log_latest:
step = 0
if subscript:
path = os.path.join(self.logger.log_dir, name, '{}_{:04d}.jpg'.format(subscript, step))
else:
path = os.path.join(self.logger.log_dir, name, '{:04d}.jpg'.format(step))
os.makedirs(os.path.dirname(path), exist_ok=True)
fig.savefig(path)
plt.close(fig)
def __get_save_dict(self):
save_dict = {
'state_dict': self.net.state_dict(),
'opts': vars(self.opts),
'latent_avg': self.net.latent_avg
}
return save_dict
| 37.652941
| 117
| 0.712467
|
a630dda8d40c9978d79ca686bc04c07c8e46e47d
| 526
|
py
|
Python
|
bardolph/controller/routine.py
|
al-fontes-jr/bardolph
|
209bba49765c729d8f1479903593043cef274aab
|
[
"Apache-2.0"
] | null | null | null |
bardolph/controller/routine.py
|
al-fontes-jr/bardolph
|
209bba49765c729d8f1479903593043cef274aab
|
[
"Apache-2.0"
] | 16
|
2020-06-15T11:04:10.000Z
|
2022-03-28T05:39:10.000Z
|
bardolph/controller/routine.py
|
al-fontes-jr/bardolph
|
209bba49765c729d8f1479903593043cef274aab
|
[
"Apache-2.0"
] | 1
|
2020-06-24T02:01:04.000Z
|
2020-06-24T02:01:04.000Z
|
class Routine:
def __init__(self, name, address=0):
self._name = name
self._address = address
self._params = []
@property
def name(self):
return self._name
@property
def params(self):
return self._params
def add_param(self, name):
self._params.append(name)
def has_param(self, name):
return name in self._params
def set_address(self, address):
self._address = address
def get_address(self):
return self._address
| 20.230769
| 40
| 0.604563
|
41e1a86f0c0820c472ad13f5daab6036bc45179a
| 7,153
|
py
|
Python
|
backend/database/wrapper/stats/global_stats_wrapper.py
|
wildermuthn/DistributedReplays
|
7fe6ab9f30a47be033779309546bbc9d1343402b
|
[
"Apache-2.0"
] | null | null | null |
backend/database/wrapper/stats/global_stats_wrapper.py
|
wildermuthn/DistributedReplays
|
7fe6ab9f30a47be033779309546bbc9d1343402b
|
[
"Apache-2.0"
] | null | null | null |
backend/database/wrapper/stats/global_stats_wrapper.py
|
wildermuthn/DistributedReplays
|
7fe6ab9f30a47be033779309546bbc9d1343402b
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import sys
import os
from sqlalchemy import func
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', '..'))
print(path)
sys.path.append(path)
from backend.blueprints.spa_api.errors.errors import CalculatedError
from backend.database.objects import PlayerGame, Game, Playlist
from backend.database.wrapper.query_filter_builder import QueryFilterBuilder
from backend.database.wrapper.rank_wrapper import get_rank_tier
from backend.database.wrapper.stats.shared_stats_wrapper import SharedStatsWrapper
from backend.utils.checks import ignore_filtering, is_local_dev
logger = logging.getLogger(__name__)
GLOBAL_STATS_PLAYLISTS = [Playlist.UNRANKED_DUELS,
Playlist.UNRANKED_DOUBLES,
Playlist.UNRANKED_STANDARD,
Playlist.UNRANKED_CHAOS,
Playlist.RANKED_DUELS,
Playlist.RANKED_DOUBLES,
Playlist.RANKED_SOLO_STANDARD,
Playlist.RANKED_STANDARD,
Playlist.RANKED_DROPSHOT,
Playlist.RANKED_HOOPS,
Playlist.RANKED_RUMBLE,
Playlist.RANKED_SNOW_DAY]
GLOBAL_STATS_PLAYLISTS = [int(p.value) for p in GLOBAL_STATS_PLAYLISTS]
class GlobalStatWrapper(SharedStatsWrapper):
"""
A database wrapper for global stats. Acts additionally on global stats in addition to player stats
"""
def __init__(self):
super().__init__()
# this Object needs to be pooled per a session so only one is used at a time
self.base_query = QueryFilterBuilder().with_relative_start_time(
days_ago=self.get_timeframe()).with_safe_checking().sticky()
def get_global_stats(self, sess, with_rank=True):
"""
:return: A list of stats by rank for every field.
"""
if with_rank:
ranks = list(range(20))
else:
ranks = [0]
playlist_results = {}
for playlist in GLOBAL_STATS_PLAYLISTS:
results = {}
for column, q in zip(self.get_player_stat_list(), self.get_player_stat_query()):
column_results = []
# set the column result
self.base_query.clean().with_stat_query([PlayerGame.player, q.label('avg')])
for rank in ranks:
result = self._get_global_stats_result(self.base_query, playlist, rank, sess, with_rank=with_rank)
column_results.append({'mean': self.float_maybe(result[0]), 'std': self.float_maybe(result[1])})
results[column.get_field_name()] = column_results
playlist_results[playlist] = results
return playlist_results
def _get_global_stats_result(self, query, playlist, rank, session, with_rank=True):
if with_rank:
query = query.with_rank(rank)
if not ignore_filtering():
query.with_playlists([playlist])
query = query.build_query(session)
query = query.group_by(PlayerGame.player)
if ignore_filtering():
query = query.subquery()
else:
query = query.filter(PlayerGame.game != "").filter(PlayerGame.time_in_game > 0).having(
func.count(PlayerGame.player) > 5).subquery()
return session.query(func.avg(query.c.avg), func.stddev_samp(query.c.avg)).first()
def get_global_stats_by_rank(self, session, query_filter: QueryFilterBuilder, stats_query, stds_query,
player_rank=None, redis=None, ids=None, playlist=13):
"""
Returns the global stats based on the rank of a player.
Does modify the query_filter only setting rank.
:param session: Session
:param query_filter: a query filter.
:param stats_query: A list of global stats
:param stds_query: A list of global stats for standard deviations
:param player_rank: The player that stats are associated with. Uses unranked if rank is not found
:param redis: The local cache
:return:
"""
if ids is None:
# Set the correct rank index
if player_rank is not None:
if isinstance(player_rank, dict) or isinstance(player_rank, list):
rank_index = get_rank_tier(player_rank, playlist=playlist)
else:
rank_index = player_rank
else:
rank_index = 0
stat_list = self.get_player_stat_list()
# Check to see if we have redis available (it usually is)
if redis is not None:
stat_string = redis.get('global_stats')
# Check to see if the key exists and if so load it
if stat_string is not None:
stats_dict = json.loads(stat_string)
if playlist is not None:
playlist = str(playlist)
if playlist not in stats_dict:
raise CalculatedError(404, 'Playlist does not exist in global stats')
stats_dict = stats_dict[playlist]
global_stats = []
global_stds = []
for stat in stat_list:
if stat.get_field_name() in stats_dict:
global_stats.append(stats_dict[stat.get_field_name()][rank_index]['mean'])
global_stds.append(stats_dict[stat.get_field_name()][rank_index]['std'])
else:
global_stats.append(1)
global_stds.append(1)
return global_stats, global_stds
if is_local_dev():
rank_index = 0
stats = self.get_global_stats(session, with_rank=False)
global_stats = [stats[stat.get_field_name()][rank_index]['mean'] for stat in stat_list]
global_stds = [stats[stat.get_field_name()][rank_index]['std'] for stat in stat_list]
return global_stats, global_stds
raise CalculatedError(500, "Global stats unavailable or have not been calculated yet.")
else:
query_filter.clean().with_replay_ids(ids)
return (query_filter.with_stat_query(stats_query).build_query(session).first(),
query_filter.with_stat_query(stds_query).build_query(session).first())
@staticmethod
def get_timeframe():
"""Returns the number of days we accept old stats"""
try:
from flask import current_app
return current_app.config['STAT_DAY_LIMIT']
except:
return 30 * 6
if __name__ == '__main__':
from backend.database.startup import startup
engine, Session = startup()
sess = Session()
try:
result = GlobalStatWrapper().get_global_stats(sess)
print(result)
except KeyboardInterrupt:
sess.close()
finally: # result = engine.execute()
sess.close()
| 42.325444
| 118
| 0.603243
|
054090dffbc5be3069d9a0b53a5dc267a9433c5e
| 3,510
|
py
|
Python
|
pyzoo/zoo/examples/automl/plugin_model_demo.py
|
pinggao18/analytics-zoo
|
30a50e7f93770cb833e4ab99439d5210e2489e86
|
[
"Apache-2.0"
] | null | null | null |
pyzoo/zoo/examples/automl/plugin_model_demo.py
|
pinggao18/analytics-zoo
|
30a50e7f93770cb833e4ab99439d5210e2489e86
|
[
"Apache-2.0"
] | 1
|
2021-01-20T15:41:01.000Z
|
2021-01-20T15:41:01.000Z
|
pyzoo/zoo/examples/automl/plugin_model_demo.py
|
pinggao18/analytics-zoo
|
30a50e7f93770cb833e4ab99439d5210e2489e86
|
[
"Apache-2.0"
] | 1
|
2020-12-21T11:48:49.000Z
|
2020-12-21T11:48:49.000Z
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either exp'
# ress or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.automl.search import SearchEngineFactory
from zoo.automl.model import ModelBuilder
import torch
import torch.nn as nn
from zoo.automl.config.recipe import Recipe
from ray import tune
import pandas as pd
import numpy as np
from zoo.orca import init_orca_context
def model_creator(config):
"""Returns a torch.nn.Module object."""
return nn.Linear(1, config.get("hidden_size", 1))
def optimizer_creator(model, config):
"""Returns optimizer defined upon the model parameters."""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-2))
def loss_creator(config):
return nn.MSELoss()
class SimpleRecipe(Recipe):
def __init__(self):
super().__init__()
self.num_samples = 2
def search_space(self, all_available_features):
return {
"lr": tune.uniform(0.001, 0.01),
"batch_size": tune.choice([32, 64])
}
def get_data():
def get_linear_data(a, b, size):
x = np.arange(0, 10, 10 / size, dtype=np.float32)
y = a*x + b
return x, y
train_x, train_y = get_linear_data(2, 5, 1000)
df = pd.DataFrame({'x': train_x, 'y': train_y})
val_x, val_y = get_linear_data(2, 5, 400)
val_df = pd.DataFrame({'x': val_x, 'y': val_y})
return df, val_df
if __name__ == "__main__":
# 1. the way to enable auto tuning model from creators.
init_orca_context(init_ray_on_spark=True)
modelBuilder = ModelBuilder.from_pytorch(model_creator=model_creator,
optimizer_creator=optimizer_creator,
loss_creator=loss_creator)
searcher = SearchEngineFactory.create_engine(backend="ray",
logs_dir="~/zoo_automl_logs",
resources_per_trial={"cpu": 2},
name="demo")
# pass input data, modelbuilder and recipe into searcher.compile. Note that if user doesn't pass
# feature transformer, the default identity feature transformer will be used.
df, val_df = get_data()
searcher.compile(df,
modelBuilder,
recipe=SimpleRecipe(),
feature_cols=["x"],
target_col="y",
validation_df=val_df)
searcher.run()
best_trials = searcher.get_best_trials(k=1)
print(best_trials[0].config)
# 2. you can also use the model builder with a fix config
model = modelBuilder.build(config={
"lr": 1e-2, # used in optimizer_creator
"batch_size": 32, # used in data_creator
})
val_result = model.fit_eval(x=df[["x"]],
y=df[["y"]],
validation_data=(val_df[["x"]], val_df["y"]),
epochs=1)
print(val_result)
| 33.75
| 100
| 0.611396
|
78b9a1633ea4c7e0d724b867a7057f5af2a35cf9
| 2,652
|
py
|
Python
|
interviewcake/greedy-algorithm/practices/apple_stocks.py
|
callmekungfu/daily
|
aacea65ba5d276ea54e171a59e9fd05365a3bf44
|
[
"MIT"
] | null | null | null |
interviewcake/greedy-algorithm/practices/apple_stocks.py
|
callmekungfu/daily
|
aacea65ba5d276ea54e171a59e9fd05365a3bf44
|
[
"MIT"
] | null | null | null |
interviewcake/greedy-algorithm/practices/apple_stocks.py
|
callmekungfu/daily
|
aacea65ba5d276ea54e171a59e9fd05365a3bf44
|
[
"MIT"
] | null | null | null |
'''
# Writing programming interview questions hasn't made me rich yet ...
so I might give up and start trading Apple stocks all day instead.
First, I wanna know how much money I could have made yesterday if I'd been trading Apple stocks all day.
So I grabbed Apple's stock prices from yesterday and put them in a list called stock_prices, where:
The indices are the time (in minutes) past trade opening time, which was 9:30am local time.
The values are the price (in US dollars) of one share of Apple stock at that time.
So if the stock cost $500 at 10:30am, that means stock_prices[60] = 500.
Write an efficient function that takes stock_prices and returns the best profit
I could have made from one purchase and one sale of one share of Apple stock yesterday.
For example:
stock_prices = [10, 7, 5, 8, 11, 9]
get_max_profit(stock_prices)
# Returns 6 (buying for $5 and selling for $11)
No "shorting"—you need to buy before you can sell. Also, you can't buy and sell in the same time step—at least 1 minute has to pass.
'''
# My Solution
def get_max_profit(stock_prices):
max_profit = stock_prices[1] - stock_prices[0]
lowest_price = stock_prices[0]
for price in stock_prices:
if price < lowest_price:
lowest_price = price
elif price - lowest_price > max_profit:
max_profit = price - lowest_price
return max_profit
# Given solution
def get_max_profit(stock_prices):
if len(stock_prices) < 2:
raise ValueError('Getting a profit requires at least 2 prices')
# We'll greedily update min_price and max_profit, so we initialize
# them to the first price and the first possible profit
min_price = stock_prices[0]
max_profit = stock_prices[1] - stock_prices[0]
# Start at the second (index 1) time
# We can't sell at the first time, since we must buy first,
# and we can't buy and sell at the same time!
# If we started at index 0, we'd try to buy *and* sell at time 0.
# This would give a profit of 0, which is a problem if our
# max_profit is supposed to be *negative*--we'd return 0.
for current_time in range(1, len(stock_prices)):
current_price = stock_prices[current_time]
# See what our profit would be if we bought at the
# min price and sold at the current price
potential_profit = current_price - min_price
# Update max_profit if we can do better
max_profit = max(max_profit, potential_profit)
# Update min_price so it's always
# the lowest price we've seen so far
min_price = min(min_price, current_price)
return max_profit
# Test code
stock_prices = [10, 8, 7, 6, 5, 4]
print(get_max_profit(stock_prices))
| 38.434783
| 132
| 0.725867
|
1075df8371410f31b25c674f9a1443a922c6ad95
| 2,395
|
py
|
Python
|
speech/setup.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
speech/setup.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
speech/setup.py
|
rodrigodias27/google-cloud-python
|
7d1161f70744c0dbbe67a3f472ea95667eaafe50
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
from setuptools import find_packages
from setuptools import setup
PACKAGE_ROOT = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_ROOT, 'README.rst')) as file_obj:
README = file_obj.read()
# NOTE: This is duplicated throughout and we should try to
# consolidate.
SETUP_BASE = {
'author': 'Google Cloud Platform',
'author_email': 'googleapis-publisher@google.com',
'scripts': [],
'url': 'https://github.com/GoogleCloudPlatform/google-cloud-python',
'license': 'Apache 2.0',
'platforms': 'Posix; MacOS X; Windows',
'include_package_data': True,
'zip_safe': False,
'classifiers': [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet',
],
}
REQUIREMENTS = [
'google-cloud-core >= 0.25.0, < 0.26dev',
'google-gax >= 0.15.13, < 0.16dev',
'googleapis-common-protos[grpc] >= 1.5.2, < 2.0dev',
]
setup(
name='google-cloud-speech',
version='0.27.0',
description='Python Client for Google Cloud Speech',
long_description=README,
namespace_packages=[
'google',
'google.cloud',
'google.cloud.gapic',
'google.cloud.gapic.speech',
'google.cloud.proto',
'google.cloud.proto.speech',
],
packages=find_packages(exclude=('tests*',)),
install_requires=REQUIREMENTS,
**SETUP_BASE
)
| 31.103896
| 74
| 0.655532
|
4b7e58f3a5193b6c5409dbb2da2b428a17be2b53
| 239
|
py
|
Python
|
erri/python/lesson_26/tableau.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
erri/python/lesson_26/tableau.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | 16
|
2020-05-30T12:38:13.000Z
|
2022-02-19T09:23:31.000Z
|
erri/python/lesson_26/tableau.py
|
TGITS/programming-workouts
|
799e805ccf3fd0936ec8ac2417f7193b8e9bcb55
|
[
"MIT"
] | null | null | null |
def séparation_positif_negatif(tableau):
t_positif = []
t_negatif = []
for elt in tableau:
if elt < 0:
t_negatif.append(elt)
else:
t_positif.append(elt)
return t_positif, t_negatif
| 19.916667
| 40
| 0.58159
|
0ab78bf08db78f0b0c3a4fb5de8765a80b08a439
| 6,549
|
py
|
Python
|
tests/test_df_profiler.py
|
XD-DENG/Optimus
|
13e7b180f0970addae77cafe128bd2a93be138a2
|
[
"Apache-2.0"
] | 1
|
2020-09-22T13:04:37.000Z
|
2020-09-22T13:04:37.000Z
|
tests/test_df_profiler.py
|
rafaelang/Optimus
|
809088f41588c968b2e30210f98a494a497b07ff
|
[
"Apache-2.0"
] | null | null | null |
tests/test_df_profiler.py
|
rafaelang/Optimus
|
809088f41588c968b2e30210f98a494a497b07ff
|
[
"Apache-2.0"
] | null | null | null |
from pyspark.sql.types import *
from optimus import Optimus
from optimus.helpers.json import json_enconding
from optimus.helpers.functions import deep_sort
import unittest
from pyspark.ml.linalg import Vectors, VectorUDT, DenseVector
import numpy as np
nan = np.nan
import datetime
from pyspark.sql import functions as F
from optimus.profiler.profiler import Profiler
null = None
true = True
p= Profiler()
op = Optimus(master='local')
source_df=op.create.df([('names', StringType(), True),('height(ft)', ShortType(), True),('function', StringType(), True),('rank', ByteType(), True),('age', IntegerType(), True),('weight(t)', FloatType(), True),('japanese name', ArrayType(StringType(),True), True),('last position seen', StringType(), True),('date arrival', StringType(), True),('last date seen', StringType(), True),('attributes', ArrayType(FloatType(),True), True),('Date Type', DateType(), True),('timestamp', TimestampType(), True),('Cybertronian', BooleanType(), True),('function(binary)', BinaryType(), True),('NullType', NullType(), True)], [('Optimus', -28, 'Leader', 10, 5000000, 4.300000190734863, ['Inochi', 'Convoy'], '19.442735,-99.201111', '1980/04/10', '2016/09/10', [8.53439998626709, 4300.0], datetime.date(2016, 9, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Leader'), None), ('bumbl#ebéé ', 17, 'Espionage', 7, 5000000, 2.0, ['Bumble', 'Goldback'], '10.642707,-71.612534', '1980/04/10', '2015/08/10', [5.334000110626221, 2000.0], datetime.date(2015, 8, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Espionage'), None), ('ironhide&', 26, 'Security', 7, 5000000, 4.0, ['Roadbuster'], '37.789563,-122.400356', '1980/04/10', '2014/07/10', [7.924799919128418, 4000.0], datetime.date(2014, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Security'), None), ('Jazz', 13, 'First Lieutenant', 8, 5000000, 1.7999999523162842, ['Meister'], '33.670666,-117.841553', '1980/04/10', '2013/06/10', [3.962399959564209, 1800.0], datetime.date(2013, 6, 24), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'First Lieutenant'), None), ('Megatron', None, 'None', 10, 5000000, 5.699999809265137, ['Megatron'], None, '1980/04/10', '2012/05/10', [None, 5700.0], datetime.date(2012, 5, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'None'), None), ('Metroplex_)^$', 300, 'Battle Station', 8, 5000000, None, ['Metroflex'], None, '1980/04/10', '2011/04/10', [91.44000244140625, None], datetime.date(2011, 4, 10), datetime.datetime(2014, 6, 24, 0, 0), True, bytearray(b'Battle Station'), None), (None, None, None, None, None, None, None, None, None, None, None, None, None, None, None, None)])
class Test_df_profiler(unittest.TestCase):
maxDiff = None
@staticmethod
def test_columns_agg():
actual_df =p.columns_agg(source_df,'*')
actual_df =json_enconding(actual_df)
expected_value =json_enconding({'names': {'count_uniques': 5, 'min': 'Jazz', 'max': 'ironhide&', 'count_na': 1, 'stddev': None, 'kurtosis': None, 'mean': None, 'skewness': None, 'sum': None, 'variance': None, 'zeros': 0}, 'height(ft)': {'count_uniques': 5, 'min': -28, 'max': 300, 'count_na': 2, 'stddev': 132.66612, 'kurtosis': 0.13863, 'mean': 65.6, 'skewness': 1.4049, 'sum': 328, 'variance': 17600.3, 'zeros': 0, 'percentile': {'0.75': 26, '0.95': 300, '0.05': -28, '0.25': 13, '0.5': 17}, 'hist': [{'count': 4.0, 'lower': -28.0, 'upper': 54.0}, {'count': 0.0, 'lower': 54.0, 'upper': 136.0}, {'count': 0.0, 'lower': 136.0, 'upper': 218.0}, {'count': 0.0, 'lower': 218.0, 'upper': 300.0}]}, 'function': {'count_uniques': 6, 'min': 'Battle Station', 'max': 'Security', 'count_na': 1, 'stddev': None, 'kurtosis': None, 'mean': None, 'skewness': None, 'sum': None, 'variance': None, 'zeros': 0}, 'rank': {'count_uniques': 3, 'min': 7, 'max': 10, 'count_na': 1, 'stddev': 1.36626, 'kurtosis': -1.5, 'mean': 8.33333, 'skewness': 0.3818, 'sum': 50, 'variance': 1.86667, 'zeros': 0, 'percentile': {'0.75': 10, '0.95': 10, '0.05': 7, '0.25': 7, '0.5': 8}, 'hist': [{'count': 4.0, 'lower': 7.0, 'upper': 8.5}, {'count': 0.0, 'lower': 8.5, 'upper': 10.0}]}, 'age': {'count_uniques': 1, 'min': 5000000, 'max': 5000000, 'count_na': 1, 'stddev': 0.0, 'kurtosis': nan, 'mean': 5000000.0, 'skewness': nan, 'sum': 30000000, 'variance': 0.0, 'zeros': 0, 'percentile': {'0.75': 5000000, '0.95': 5000000, '0.05': 5000000, '0.25': 5000000, '0.5': 5000000}, 'hist': [{'count': 6, 'lower': 5000000, 'upper': 5000001}]}, 'weight(t)': {'count_uniques': 5, 'min': 1.8, 'max': 5.7, 'count_na': 2, 'stddev': 1.64712, 'kurtosis': -1.43641, 'mean': 3.56, 'skewness': 0.06521, 'sum': 17.8, 'variance': 2.713, 'zeros': 0, 'percentile': {'0.75': 4.300000190734863, '0.95': 5.699999809265137, '0.05': 1.7999999523162842, '0.25': 2.0, '0.5': 4.0}, 'hist': [{'count': 1.0, 'lower': 1.8, 'upper': 2.78}, {'count': 0.0, 'lower': 2.78, 'upper': 3.75}, {'count': 2.0, 'lower': 3.75, 'upper': 4.73}, {'count': 1.0, 'lower': 4.73, 'upper': 5.7}]}, 'japanese name': {'count_uniques': 6, 'min': ['Bumble', 'Goldback'], 'max': ['Roadbuster'], 'count_na': 1}, 'last position seen': {'count_uniques': 4, 'min': '10.642707,-71.612534', 'max': '37.789563,-122.400356', 'count_na': 3, 'stddev': None, 'kurtosis': None, 'mean': None, 'skewness': None, 'sum': None, 'variance': None, 'zeros': 0}, 'date arrival': {'count_uniques': 1, 'min': '1980/04/10', 'max': '1980/04/10', 'count_na': 1, 'stddev': None, 'kurtosis': None, 'mean': None, 'skewness': None, 'sum': None, 'variance': None, 'zeros': 0}, 'last date seen': {'count_uniques': 6, 'min': '2011/04/10', 'max': '2016/09/10', 'count_na': 1, 'stddev': None, 'kurtosis': None, 'mean': None, 'skewness': None, 'sum': None, 'variance': None, 'zeros': 0}, 'attributes': {'count_uniques': 6, 'min': [None, 5700.0], 'max': [91.44000244140625, None], 'count_na': 1}, 'Date Type': {'count_uniques': 6, 'min': datetime.date(2011, 4, 10), 'max': datetime.date(2016, 9, 10), 'count_na': 1}, 'timestamp': {'count_uniques': 1, 'min': datetime.datetime(2014, 6, 24, 0, 0), 'max': datetime.datetime(2014, 6, 24, 0, 0), 'count_na': 1}, 'Cybertronian': {'count_uniques': 1, 'min': 1, 'max': 1, 'count_na': 1}, 'function(binary)': {'count_uniques': 6, 'min': bytearray(b'Battle Station'), 'max': bytearray(b'Security'), 'count_na': 1}, 'NullType': {'count_uniques': 0, 'min': None, 'max': None, 'count_na': 7}, 'p_count_na': 100.0, 'p_count_uniques': 0.0, 'range': 3.9000000000000004, 'median': 4.0, 'interquartile_range': 2.3000001907348633, 'coef_variation': 0.46267, 'mad': 1.7})
assert(expected_value == actual_df)
| 261.96
| 3,674
| 0.636128
|
e0aa9347ca7b1c4e39758c1da9173321b499d5cc
| 3,009
|
py
|
Python
|
tests/performance_benchmarking/island_benchmarks.py
|
nolanstr/bingo_multi_stage
|
7a88c4f5c59268d0612664be5864765db2edad51
|
[
"Apache-2.0"
] | null | null | null |
tests/performance_benchmarking/island_benchmarks.py
|
nolanstr/bingo_multi_stage
|
7a88c4f5c59268d0612664be5864765db2edad51
|
[
"Apache-2.0"
] | null | null | null |
tests/performance_benchmarking/island_benchmarks.py
|
nolanstr/bingo_multi_stage
|
7a88c4f5c59268d0612664be5864765db2edad51
|
[
"Apache-2.0"
] | null | null | null |
import timeit
import unittest.mock as mock
import numpy as np
from bingo.symbolic_regression.agraph.crossover import AGraphCrossover
from bingo.symbolic_regression.agraph.mutation import AGraphMutation
from bingo.symbolic_regression.agraph.generator import AGraphGenerator
from bingo.symbolic_regression.agraph.component_generator \
import ComponentGenerator
from bingo.symbolic_regression.explicit_regression import ExplicitRegression, \
ExplicitTrainingData
from bingo.evolutionary_algorithms.age_fitness import AgeFitnessEA
from bingo.evaluation.evaluation import Evaluation
from bingo.evolutionary_optimizers.island import Island
from bingo.local_optimizers.continuous_local_opt \
import ContinuousLocalOptimization
from benchmark_data import StatsPrinter
POP_SIZE = 128
STACK_SIZE = 64
MUTATION_PROBABILITY = 0.4
CROSSOVER_PROBABILITY = 0.4
NUM_POINTS = 100
START = -10
STOP = 10
ERROR_TOLERANCE = 10e-9
SEED = 20
def init_x_vals(start, stop, num_points):
return np.linspace(start, stop, num_points).reshape([-1, 1])
def equation_eval(x):
return x**2 + 3.5*x**3
def init_island():
np.random.seed(15)
x = init_x_vals(START, STOP, NUM_POINTS)
y = equation_eval(x)
training_data = ExplicitTrainingData(x, y)
component_generator = ComponentGenerator(x.shape[1])
component_generator.add_operator(2)
component_generator.add_operator(3)
component_generator.add_operator(4)
crossover = AGraphCrossover()
mutation = AGraphMutation(component_generator)
agraph_generator = AGraphGenerator(STACK_SIZE, component_generator)
fitness = ExplicitRegression(training_data=training_data)
local_opt_fitness = ContinuousLocalOptimization(fitness, algorithm='lm')
evaluator = Evaluation(local_opt_fitness)
ea_algorithm = AgeFitnessEA(evaluator, agraph_generator, crossover,
mutation, MUTATION_PROBABILITY,
CROSSOVER_PROBABILITY, POP_SIZE)
island = Island(ea_algorithm, agraph_generator, POP_SIZE)
return island
TEST_ISLAND = init_island()
class IslandStatsPrinter(StatsPrinter):
def __init__(self):
super().__init__()
self._output = ["-"*24+":::: REGRESSION BENCHMARKS ::::" + "-"*23,
self._header_format_string.format("NAME", "MEAN",
"STD", "MIN", "MAX"),
"-"*78]
def explicit_regression_benchmark():
island = init_island()
while island.get_best_individual().fitness > ERROR_TOLERANCE:
island._execute_generational_step()
def do_benchmarking():
printer = IslandStatsPrinter()
printer.add_stats("Explicit Regression",
timeit.repeat(explicit_regression_benchmark,
number=4,
repeat=4))
printer.print()
if __name__ == "__main__":
do_benchmarking()
| 31.34375
| 79
| 0.691924
|
6806288512e88d9c4050366bae98ab17cdaf2c0c
| 2,346
|
py
|
Python
|
filter.py
|
jcomeauictx/mitmproxy
|
6d09f5dcad7becb40f2048b2678079b34ca3ea6b
|
[
"MIT"
] | null | null | null |
filter.py
|
jcomeauictx/mitmproxy
|
6d09f5dcad7becb40f2048b2678079b34ca3ea6b
|
[
"MIT"
] | null | null | null |
filter.py
|
jcomeauictx/mitmproxy
|
6d09f5dcad7becb40f2048b2678079b34ca3ea6b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
'https://stackoverflow.com/a/45044199/493161'
import os, time
LOGDIR = os.getenv('LOGDIR') or os.path.join('var', 'log', 'mitmproxy')
HOME = os.path.expanduser('~')
CONFDIR = os.getenv('CONFDIR') or os.path.join(HOME, '.mitmproxy')
def response(flow):
'''
Log response to a unique log for these host-port combinations
'''
clientfilter = []
filterfile = os.path.join(CONFDIR, 'clientfilter.txt')
if os.path.exists(filterfile):
with open(filterfile) as infile:
clientfilter = [line.rstrip() for line in infile.readlines()]
print(f'clientfilter: {clientfilter}')
fromaddr = flow.client_conn.address
fromhost = fromaddr[0]
fromport = str(fromaddr[1])
tohost = flow.request.host
if clientfilter and fromhost not in clientfilter:
print(f'***** KILLING CONNECTION FROM {fromhost} *****')
flow.kill()
toport = str(flow.request.port)
unixtime = str(int(time.time()))
logname = '_'.join((fromhost, tohost, unixtime))
logdir = os.path.join(LOGDIR, 'traffic', tohost)
os.makedirs(logdir, exist_ok=True)
logpath = os.path.join(logdir, logname)
with open(logpath, 'a') as logfile:
print(f'Request from {fromhost}:{fromport} to'
f' {tohost}:{toport}', file=logfile)
print(f'Headers:', file=logfile)
print(f'{flow.request.method} {flow.request.path}'
f' {flow.request.http_version}', file=logfile)
for k, v in flow.request.headers.items():
value=' '.join(v.split())
print(f'{k}: {value}', file=logfile)
print(file=logfile)
print('Request body:', file=logfile)
print(flow.request.content.decode(), file=logfile)
print(file=logfile)
print(f'Response from {tohost}:{toport} to'
f' {fromhost}:{fromport}', file=logfile)
print(f'Headers:', file=logfile)
reason = flow.response.reason or ''
print(f'{flow.response.http_version} {flow.response.status_code}'
f' {reason}', file=logfile)
for k, v in flow.response.headers.items():
value=' '.join(v.split())
print(f'{k}: {value}', file=logfile)
print(file=logfile)
print('Response payload:', file=logfile)
print(flow.response.content.decode(), file=logfile)
| 41.157895
| 73
| 0.618926
|
d496e5724c70bd6b2b270eda0302ccf1ea71827c
| 19,976
|
py
|
Python
|
dynamic_rest/viewsets.py
|
sagargp/dynamic-rest
|
116dccb5e3c32b2dd9009f0700d5fa5f2a187c23
|
[
"MIT"
] | null | null | null |
dynamic_rest/viewsets.py
|
sagargp/dynamic-rest
|
116dccb5e3c32b2dd9009f0700d5fa5f2a187c23
|
[
"MIT"
] | null | null | null |
dynamic_rest/viewsets.py
|
sagargp/dynamic-rest
|
116dccb5e3c32b2dd9009f0700d5fa5f2a187c23
|
[
"MIT"
] | 1
|
2018-10-22T20:55:42.000Z
|
2018-10-22T20:55:42.000Z
|
"""This module contains custom viewset classes."""
from django.core.exceptions import ObjectDoesNotExist
from django.http import QueryDict
from django.utils import six
from rest_framework import exceptions, status, viewsets
from rest_framework.exceptions import ValidationError
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.response import Response
from dynamic_rest.conf import settings
from dynamic_rest.filters import DynamicFilterBackend, DynamicSortingFilter
from dynamic_rest.metadata import DynamicMetadata
from dynamic_rest.pagination import DynamicPageNumberPagination
from dynamic_rest.processors import SideloadingProcessor
from dynamic_rest.utils import is_truthy
UPDATE_REQUEST_METHODS = ('PUT', 'PATCH', 'POST')
DELETE_REQUEST_METHOD = 'DELETE'
class QueryParams(QueryDict):
"""
Extension of Django's QueryDict. Instantiated from a DRF Request
object, and returns a mutable QueryDict subclass. Also adds methods that
might be useful for our usecase.
"""
def __init__(self, query_params, *args, **kwargs):
if hasattr(query_params, 'urlencode'):
query_string = query_params.urlencode()
else:
assert isinstance(
query_params,
(six.string_types, six.binary_type)
)
query_string = query_params
kwargs['mutable'] = True
super(QueryParams, self).__init__(query_string, *args, **kwargs)
def add(self, key, value):
"""
Method to accept a list of values and append to flat list.
QueryDict.appendlist(), if given a list, will append the list,
which creates nested lists. In most cases, we want to be able
to pass in a list (for convenience) but have it appended into
a flattened list.
TODO: Possibly throw an error if add() is used on a non-list param.
"""
if isinstance(value, list):
for val in value:
self.appendlist(key, val)
else:
self.appendlist(key, value)
class WithDynamicViewSetMixin(object):
"""A viewset that can support dynamic API features.
Attributes:
features: A list of features supported by the viewset.
meta: Extra data that is added to the response by the DynamicRenderer.
"""
DEBUG = 'debug'
SIDELOADING = 'sideloading'
INCLUDE = 'include[]'
EXCLUDE = 'exclude[]'
FILTER = 'filter{}'
SORT = 'sort[]'
PAGE = settings.PAGE_QUERY_PARAM
PER_PAGE = settings.PAGE_SIZE_QUERY_PARAM
# TODO: add support for `sort{}`
pagination_class = DynamicPageNumberPagination
metadata_class = DynamicMetadata
features = (
DEBUG,
INCLUDE,
EXCLUDE,
FILTER,
PAGE,
PER_PAGE,
SORT,
SIDELOADING
)
meta = None
filter_backends = (DynamicFilterBackend, DynamicSortingFilter)
def initialize_request(self, request, *args, **kargs):
"""
Override DRF initialize_request() method to swap request.GET
(which is aliased by request.query_params) with a mutable instance
of QueryParams, and to convert request MergeDict to a subclass of dict
for consistency (MergeDict is not a subclass of dict)
"""
def handle_encodings(request):
"""
WSGIRequest does not support Unicode values in the query string.
WSGIRequest handling has a history of drifting behavior between
combinations of Python versions, Django versions and DRF versions.
Django changed its QUERY_STRING handling here:
https://goo.gl/WThXo6. DRF 3.4.7 changed its behavior here:
https://goo.gl/0ojIIO.
"""
try:
return QueryParams(request.GET)
except UnicodeEncodeError:
pass
s = request.environ.get('QUERY_STRING', '')
try:
s = s.encode('utf-8')
except UnicodeDecodeError:
pass
return QueryParams(s)
request.GET = handle_encodings(request)
request = super(WithDynamicViewSetMixin, self).initialize_request(
request, *args, **kargs
)
try:
# Django<1.9, DRF<3.2
# MergeDict doesn't have the same API as dict.
# Django has deprecated MergeDict and DRF is moving away from
# using it - thus, were comfortable replacing it with a QueryDict
# This will allow the data property to have normal dict methods.
from django.utils.datastructures import MergeDict
if isinstance(request._full_data, MergeDict):
data_as_dict = request.data.dicts[0]
for d in request.data.dicts[1:]:
data_as_dict.update(d)
request._full_data = data_as_dict
except:
pass
return request
def get_renderers(self):
"""Optionally block Browsable API rendering. """
renderers = super(WithDynamicViewSetMixin, self).get_renderers()
if settings.ENABLE_BROWSABLE_API is False:
return [
r for r in renderers if not isinstance(r, BrowsableAPIRenderer)
]
else:
return renderers
def get_request_feature(self, name):
"""Parses the request for a particular feature.
Arguments:
name: A feature name.
Returns:
A feature parsed from the URL if the feature is supported, or None.
"""
if '[]' in name:
# array-type
return self.request.query_params.getlist(
name) if name in self.features else None
elif '{}' in name:
# object-type (keys are not consistent)
return self._extract_object_params(
name) if name in self.features else {}
else:
# single-type
return self.request.query_params.get(
name) if name in self.features else None
def _extract_object_params(self, name):
"""
Extract object params, return as dict
"""
params = self.request.query_params.lists()
params_map = {}
prefix = name[:-1]
offset = len(prefix)
for name, value in params:
if name.startswith(prefix):
if name.endswith('}'):
name = name[offset:-1]
elif name.endswith('}[]'):
# strip off trailing []
# this fixes an Ember queryparams issue
name = name[offset:-3]
else:
# malformed argument like:
# filter{foo=bar
raise exceptions.ParseError(
'"%s" is not a well-formed filter key.' % name
)
else:
continue
params_map[name] = value
return params_map
def get_queryset(self, queryset=None):
"""
Returns a queryset for this request.
Arguments:
queryset: Optional root-level queryset.
"""
serializer = self.get_serializer()
return getattr(self, 'queryset', serializer.Meta.model.objects.all())
def get_request_fields(self):
"""Parses the INCLUDE and EXCLUDE features.
Extracts the dynamic field features from the request parameters
into a field map that can be passed to a serializer.
Returns:
A nested dict mapping serializer keys to
True (include) or False (exclude).
"""
if hasattr(self, '_request_fields'):
return self._request_fields
include_fields = self.get_request_feature(self.INCLUDE)
exclude_fields = self.get_request_feature(self.EXCLUDE)
request_fields = {}
for fields, include in(
(include_fields, True),
(exclude_fields, False)):
if fields is None:
continue
for field in fields:
field_segments = field.split('.')
num_segments = len(field_segments)
current_fields = request_fields
for i, segment in enumerate(field_segments):
last = i == num_segments - 1
if segment:
if last:
current_fields[segment] = include
else:
if segment not in current_fields:
current_fields[segment] = {}
current_fields = current_fields[segment]
elif not last:
# empty segment must be the last segment
raise exceptions.ParseError(
'"%s" is not a valid field.' %
field
)
self._request_fields = request_fields
return request_fields
def get_request_debug(self):
debug = self.get_request_feature(self.DEBUG)
return is_truthy(debug) if debug is not None else None
def get_request_sideloading(self):
sideloading = self.get_request_feature(self.SIDELOADING)
return is_truthy(sideloading) if sideloading is not None else None
def is_update(self):
if (
self.request and
self.request.method.upper() in UPDATE_REQUEST_METHODS
):
return True
else:
return False
def is_delete(self):
if (
self.request and
self.request.method.upper() == DELETE_REQUEST_METHOD
):
return True
else:
return False
def get_serializer(self, *args, **kwargs):
if 'request_fields' not in kwargs:
kwargs['request_fields'] = self.get_request_fields()
if 'sideloading' not in kwargs:
kwargs['sideloading'] = self.get_request_sideloading()
if 'debug' not in kwargs:
kwargs['debug'] = self.get_request_debug()
if 'envelope' not in kwargs:
kwargs['envelope'] = True
if self.is_update():
kwargs['include_fields'] = '*'
return super(
WithDynamicViewSetMixin, self
).get_serializer(
*args, **kwargs
)
def paginate_queryset(self, *args, **kwargs):
if self.PAGE in self.features:
# make sure pagination is enabled
if (
self.PER_PAGE not in self.features and
self.PER_PAGE in self.request.query_params
):
# remove per_page if it is disabled
self.request.query_params[self.PER_PAGE] = None
return super(
WithDynamicViewSetMixin, self
).paginate_queryset(
*args, **kwargs
)
return None
def _prefix_inex_params(self, request, feature, prefix):
values = self.get_request_feature(feature)
if not values:
return
del request.query_params[feature]
request.query_params.add(
feature,
[prefix + val for val in values]
)
def list_related(self, request, pk=None, field_name=None):
"""Fetch related object(s), as if sideloaded (used to support
link objects).
This method gets mapped to `/<resource>/<pk>/<field_name>/` by
DynamicRouter for all DynamicRelationField fields. Generally,
this method probably shouldn't be overridden.
An alternative implementation would be to generate reverse queries.
For an exploration of that approach, see:
https://gist.github.com/ryochiji/54687d675978c7d96503
"""
# Explicitly disable support filtering. Applying filters to this
# endpoint would require us to pass through sideload filters, which
# can have unintended consequences when applied asynchronously.
if self.get_request_feature(self.FILTER):
raise ValidationError(
'Filtering is not enabled on relation endpoints.'
)
# Prefix include/exclude filters with field_name so it's scoped to
# the parent object.
field_prefix = field_name + '.'
self._prefix_inex_params(request, self.INCLUDE, field_prefix)
self._prefix_inex_params(request, self.EXCLUDE, field_prefix)
# Filter for parent object, include related field.
self.request.query_params.add('filter{pk}', pk)
self.request.query_params.add(self.INCLUDE, field_prefix)
# Get serializer and field.
serializer = self.get_serializer()
field = serializer.fields.get(field_name)
if field is None:
raise ValidationError('Unknown field: "%s".' % field_name)
# Query for root object, with related field prefetched
queryset = self.get_queryset()
queryset = self.filter_queryset(queryset)
obj = queryset.first()
if not obj:
return Response("Not found", status=404)
# Serialize the related data. Use the field's serializer to ensure
# it's configured identically to the sideload case.
serializer = field.get_serializer(envelope=True)
try:
# TODO(ryo): Probably should use field.get_attribute() but that
# seems to break a bunch of things. Investigate later.
serializer.instance = getattr(obj, field.source)
except ObjectDoesNotExist:
# See:
# http://jsonapi.org/format/#fetching-relationships-responses-404
# This is a case where the "link URL exists but the relationship
# is empty" and therefore must return a 200.
return Response({}, status=200)
return Response(serializer.data)
def get_extra_filters(self, request):
# Override this method to enable addition of extra filters
# (i.e., a Q()) so custom filters can be added to the queryset without
# running into https://code.djangoproject.com/ticket/18437
# which, without this, would mean that filters added to the queryset
# after this is called may not behave as expected.
return None
class DynamicModelViewSet(WithDynamicViewSetMixin, viewsets.ModelViewSet):
ENABLE_BULK_PARTIAL_CREATION = settings.ENABLE_BULK_PARTIAL_CREATION
ENABLE_BULK_UPDATE = settings.ENABLE_BULK_UPDATE
def _get_bulk_payload(self, request):
plural_name = self.get_serializer_class().get_plural_name()
if isinstance(request.data, list):
return request.data
elif plural_name in request.data and len(request.data) == 1:
return request.data[plural_name]
return None
def _bulk_update(self, data, partial=False):
# Restrict the update to the filtered queryset.
serializer = self.get_serializer(
self.filter_queryset(self.get_queryset()),
data=data,
many=True,
partial=partial
)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data, status=status.HTTP_200_OK)
def update(self, request, *args, **kwargs):
"""Either update a single or many model instances. Use list to
indicate bulk update.
Examples:
PATCH /dogs/1/
{
'fur': 'white'
}
PATCH /dogs/
{
'dogs': [
{'id': 1, 'fur': 'white'},
{'id': 2, 'fur': 'black'},
{'id': 3, 'fur': 'yellow'}
]
}
PATCH /dogs/?filter{fur.contains}=brown
[
{'id': 3, 'fur': 'gold'}
]
"""
if self.ENABLE_BULK_UPDATE:
partial = 'partial' in kwargs
bulk_payload = self._get_bulk_payload(request)
if bulk_payload:
return self._bulk_update(bulk_payload, partial)
return super(DynamicModelViewSet, self).update(request, *args,
**kwargs)
def _create_many(self, data):
items = []
errors = []
result = {}
serializers = []
for entry in data:
serializer = self.get_serializer(data=entry)
try:
serializer.is_valid(raise_exception=True)
except exceptions.ValidationError as e:
errors.append({
'detail': str(e),
'source': entry
})
else:
if self.ENABLE_BULK_PARTIAL_CREATION:
self.perform_create(serializer)
items.append(
serializer.to_representation(serializer.instance))
else:
serializers.append(serializer)
if not self.ENABLE_BULK_PARTIAL_CREATION and not errors:
for serializer in serializers:
self.perform_create(serializer)
items.append(
serializer.to_representation(serializer.instance))
# Populate serialized data to the result.
result = SideloadingProcessor(
self.get_serializer(),
items
).data
# Include errors if any.
if errors:
result['errors'] = errors
code = (status.HTTP_201_CREATED if not errors else
status.HTTP_400_BAD_REQUEST)
return Response(result, status=code)
def create(self, request, *args, **kwargs):
"""
Either create a single or many model instances in bulk
using the Serializer's many=True ability from Django REST >= 2.2.5.
The data can be represented by the serializer name (single or plural
forms), dict or list.
Examples:
POST /dogs/
{
"name": "Fido",
"age": 2
}
POST /dogs/
{
"dog": {
"name": "Lucky",
"age": 3
}
}
POST /dogs/
{
"dogs": [
{"name": "Fido", "age": 2},
{"name": "Lucky", "age": 3}
]
}
POST /dogs/
[
{"name": "Fido", "age": 2},
{"name": "Lucky", "age": 3}
]
"""
bulk_payload = self._get_bulk_payload(request)
if bulk_payload:
return self._create_many(bulk_payload)
return super(DynamicModelViewSet, self).create(
request, *args, **kwargs)
def _destroy_many(self, data):
instances = self.get_queryset().filter(
id__in=[d['id'] for d in data]
).distinct()
for instance in instances:
self.check_object_permissions(self.request, instance)
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
def destroy(self, request, *args, **kwargs):
"""
Either delete a single or many model instances in bulk
DELETE /dogs/
{
"dogs": [
{"id": 1},
{"id": 2}
]
}
DELETE /dogs/
[
{"id": 1},
{"id": 2}
]
"""
bulk_payload = self._get_bulk_payload(request)
if bulk_payload:
return self._destroy_many(bulk_payload)
lookup_url_kwarg = self.lookup_url_kwarg or self.lookup_field
if lookup_url_kwarg not in kwargs:
# assume that it is a poorly formatted bulk request
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED)
return super(DynamicModelViewSet, self).destroy(
request, *args, **kwargs
)
| 34.560554
| 79
| 0.579095
|
e7b927fe8c62b162f5d2ea94d699fcae7b414a28
| 4,901
|
py
|
Python
|
optimus/engines/functions.py
|
Pcosmin/Optimus
|
ef3306d1b752bbfb1959ddb9103786acb8e9b9ba
|
[
"Apache-2.0"
] | 1
|
2020-09-22T13:04:37.000Z
|
2020-09-22T13:04:37.000Z
|
optimus/engines/functions.py
|
rafaelang/Optimus
|
809088f41588c968b2e30210f98a494a497b07ff
|
[
"Apache-2.0"
] | null | null | null |
optimus/engines/functions.py
|
rafaelang/Optimus
|
809088f41588c968b2e30210f98a494a497b07ff
|
[
"Apache-2.0"
] | null | null | null |
# Aggregations
from optimus.engines.base.functions import Functions
def min(series):
return Functions.min(series)
def max(series):
return Functions.max(series)
def kurtosis(series):
return series.functions.kurtosis()
# return Functions.kurtosis(series)
def skew(series):
return series.functions.skew()
# return Functions.kurtosis(series)
def mean(series):
return Functions.mean(series)
def mad(series, *args):
return Functions.mad(series, *args)
def mode(series):
return Functions.mode(series)
def std(series):
return Functions.std(series)
def sum(series):
return Functions.sum(series)
def var(series):
return Functions.var(series)
# Math
def abs(series):
return Functions.abs(series)
def exp(series):
return series.functions.exp()
def sqrt(series):
return series.functions.sqrt()
def mod(series, other):
return Functions.mod(series, other)
def pow(series, other):
return Functions.pow(series, other)
def floor(series):
return series.functions.floor()
def ceil(series):
return series.functions.ceil()
def round(series, decimals):
return Functions.round(series, decimals)
def range(series):
return series.functions.range()
def radians(series):
return series.functions.radian()
def degrees(series):
return series.functions.degrees()
def ln(series):
return series.functions.ln()
def log(series):
return series.functions.log()
# Trigonometrics
def sin(series):
return series.functions.sin()
def cos(series):
return series.functions.cos()
def tan(series):
return series.functions.tan()
def asin(series):
return series.functions.asin()
def acos(series):
return series.functions.acos()
def atan(series):
return series.functions.atan()
def sinh(series):
return series.functions.sinh()
def cosh(series):
return series.functions.cosh()
def tanh(series):
return series.functions.tanh()
def asinh(series):
return series.functions.asinh()
def acosh(series):
return series.functions.acosh()
def atanh(series):
return series.functions.atanh()
# strings
def lower(series):
return Functions.lower(series)
def upper(series):
return Functions.upper(series)
def extract(series):
return Functions.extract(series)
def slice(series):
return Functions.slice(series)
def percentile(series, *args):
return Functions.percentile(series, *args)
def proper(series):
return Functions.proper(series)
def trim(series):
return Functions.trim(series)
def remove_white_spaces(series):
return Functions.remove_white_spaces(series)
def len(series):
return Functions.len(series)
def find(series):
return Functions.find(series)
def rfind(series):
return Functions.rfind(series)
def left(series):
return Functions.left(series)
def right(series):
return Functions.right(series)
def starts_with(series):
return Functions.starts_with(series)
def ends_with(series):
return Functions.ends_with(series)
def char(series):
return Functions.char(series)
def unicode(series):
return Functions.unicode(series)
def exact(series):
return Functions.exact(series)
# dates
def date_format(series, current_format=None, output_format=None):
return series.functions.date_format(current_format=current_format, output_format=output_format)
def year(series, format):
return Functions.year(series, format)
def month(series, format):
return Functions.month(series, format)
def day(series, format):
return Functions.day(series, format)
def hour(series, format):
return Functions.hour(series, format)
def minute(series, format):
return Functions.minute(series, format)
def second(series, format):
return Functions.second(series, format)
def weekday(series, format):
return Functions.weekday(series, format)
def years_between(series, date_format):
return series.functions.years_between(date_format)
# other
def count_na(series):
return Functions.count_na(series)
def count_zeros(series):
return Functions.count_zeros(series)
def count_uniques(series, *args):
return Functions.count_uniques(series, *args)
def unique(series, *args):
return Functions.unique(series, *args)
def replace_string(series, *args):
return series.functions.replace_string(*args)
def replace_words(series, *args):
return Functions.replace_words(series, *args)
def replace_match(series, *args):
return Functions.replace_match(series, *args)
def remove_special_chars(series, *args):
return series.functions.remove_special_chars()
# return Functions.remove_special_chars(series, *args)
def remove_accents(series):
return series.functions.remove_accents()
def clip(series, lower_bound, upper_bound):
return series.functions.clip(lower_bound, upper_bound)
| 16.282392
| 99
| 0.725566
|
c8f6dc79e3b8698a8966084a2852180d5ee19050
| 9,072
|
py
|
Python
|
mlks/runner/run_http.py
|
ddsky/keras-machine-learning-framework
|
049ee78984f2f61165ff67506b0ced74b2928be9
|
[
"MIT"
] | 1
|
2020-11-10T09:00:54.000Z
|
2020-11-10T09:00:54.000Z
|
mlks/runner/run_http.py
|
ddsky/keras-machine-learning-framework
|
049ee78984f2f61165ff67506b0ced74b2928be9
|
[
"MIT"
] | null | null | null |
mlks/runner/run_http.py
|
ddsky/keras-machine-learning-framework
|
049ee78984f2f61165ff67506b0ced74b2928be9
|
[
"MIT"
] | null | null | null |
# Machine Learning Keras Suite
#
# The base http server for debugging purposes
#
# Author: Björn Hempel <bjoern@hempel.li>
# Date: 13.10.2019
# Web: https://github.com/bjoern-hempel/machine-learning-keras-suite
#
# LICENSE
#
# MIT License
#
# Copyright (c) 2019 Björn Hempel <bjoern@hempel.li>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ssl
import click
import os
import sys
from http.server import HTTPServer
from mlks.http.simple_http_request_handler import SimpleHTTPRequestHandler
from mlks.helper.filesystem import get_root_project_path, get_formatted_file_size, get_changed_date, get_database
class HttpRunner:
@staticmethod
def POST_prediction_hook(argument, upload_data):
# only flower and food models are allowed in that moment
if argument not in ['flower', 'food']:
raise AssertionError('Unsupported model type "%s".' % argument)
# get file to evaluate
evaluation_file = upload_data['upload_path']
evaluation_file_web = upload_data['upload_path_web']
prediction_array = HttpRunner.get_fake_prediction_array(argument)
print(prediction_array)
prediction_class = prediction_array['prediction_class']
prediction_accuracy = prediction_array['prediction_accuracy']
prediction_overview_array = prediction_array['prediction_array']
return_value = {
'evaluated_file': evaluation_file,
'graph_file': evaluation_file,
'evaluated_file_web': evaluation_file_web,
'graph_file_web': evaluation_file_web,
'prediction_overview_array': prediction_overview_array,
'prediction_class': prediction_class,
'prediction_accuracy': prediction_accuracy,
'prediction_time': 0.1
}
return return_value
@staticmethod
def GET_prediction_get_model_hook(argument):
return HttpRunner.get_model_data(argument)
@staticmethod
def POST_prediction_get_model_hook(argument):
return HttpRunner.get_model_data(argument)
@staticmethod
def get_model_data(argument):
if argument not in SimpleHTTPRequestHandler.allowed_model_types:
raise AssertionError('Unknown model type "%s"' % argument)
model_path = 'C:/Users/bjoern/data/processed/flower_10/flower_10_1.inceptionv3.best.17-0.95.h5'
model_name = os.path.basename(model_path)
model_size = get_formatted_file_size(model_path) if os.path.isfile(model_path) else '121.12 MB'
model_classes = 12
model_learning_epochs = 20
model_date = get_changed_date(model_path) if os.path.isfile(model_path) else '2019-10-20T11:54:25.125386+00:00'
model_version = '1.02'
return {
'model_name': model_name,
'model_size': model_size,
'model_classes': model_classes,
'model_learning_epochs': model_learning_epochs,
'model_date': model_date,
'model_version': model_version
}
@staticmethod
@click.command()
@click.option('--data-path', '-d', required=True, type=str)
@click.option('--port', '-p', required=False, type=int, default=8080, show_default=True)
@click.option('--port-ssl', '-p', required=False, type=int, default=4443, show_default=True)
@click.option('--bind_ip', '-i', required=False, type=str, default='0.0.0.0', show_default=True)
def run(data_path, port, port_ssl, bind_ip):
"""This scripts starts a simple demo http service for testing purpose."""
try:
SimpleHTTPRequestHandler.set_hook('POST_prediction', {
'lambda': HttpRunner.POST_prediction_hook,
'arguments': []
})
SimpleHTTPRequestHandler.set_hook('POST_prediction_get_model', {
'lambda': HttpRunner.POST_prediction_get_model_hook,
'arguments': []
})
SimpleHTTPRequestHandler.set_hook('GET_prediction_get_model', {
'lambda': HttpRunner.GET_prediction_get_model_hook,
'arguments': []
})
SimpleHTTPRequestHandler.set_property('root_data_path', data_path)
SimpleHTTPRequestHandler.set_property('root_data_path_web', '/')
SimpleHTTPRequestHandler.set_property('root_project_path', get_root_project_path())
use_ssl = False
port = port_ssl if use_ssl else port
httpd = HTTPServer((bind_ip, port), SimpleHTTPRequestHandler)
print('Webserver started on port %s:%d..' % (bind_ip, port))
# activate ssl (openssl req -newkey rsa:2048 -new -nodes -keyout key.pem -out csr.pem)
if use_ssl:
httpd.socket = ssl.wrap_socket(
httpd.socket,
keyfile='./key.pem',
certfile='./csr.pem',
server_side=True
)
httpd.serve_forever()
except KeyboardInterrupt:
print('^C received, shutting down the web server')
httpd.socket.close()
@staticmethod
def get_fake_prediction_array(model_type):
prediction_array = {
'flower': {
'prediction_class': 'dahlia',
'prediction_accuracy': 94.21,
'prediction_array': [
{
'class_name': 'dahlia',
'predicted_value': 0.9421
},
{
'class_name': 'sunflower',
'predicted_value': 0.0309
},
{
'class_name': 'rose',
'predicted_value': 0.0162
},
{
'class_name': 'coneflower',
'predicted_value': 0.0084
},
{
'class_name': 'daisy',
'predicted_value': 0.0010
}
]
},
'food': {
'prediction_class': 'pizza',
'prediction_accuracy': 94.21,
'prediction_array': [
{
'class_name': 'pizza',
'predicted_value': 0.9421
},
{
'class_name': 'burger',
'predicted_value': 0.0309
},
{
'class_name': 'salad',
'predicted_value': 0.0162
},
{
'class_name': 'brownies',
'predicted_value': 0.0084
},
{
'class_name': 'martini_on_the_rock',
'predicted_value': 0.0011
},
{
'class_name': 'dogs',
'predicted_value': 0.0010
},
{
'class_name': 'dog',
'predicted_value': 0.0009
},
{
'class_name': 'hot dogs',
'predicted_value': 0.0008
},
{
'class_name': 'hot dog',
'predicted_value': 0.0007
},
{
'class_name': 'hotdog',
'predicted_value': 0.0006
},
{
'class_name': 'cola',
'predicted_value': 0.0006
}
]
}
}
if model_type not in prediction_array:
return []
return prediction_array[model_type]
def run():
http_runner = HttpRunner()
http_runner.run()
| 37.958159
| 119
| 0.554563
|
0da0f34900c917f7b6f4a901b36a3959b02d4254
| 41,966
|
py
|
Python
|
src/python/pants/engine/internals/graph.py
|
g-cassie/pants
|
b73fe2816c62d265975122234013c08df9f9f4dd
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/engine/internals/graph.py
|
g-cassie/pants
|
b73fe2816c62d265975122234013c08df9f9f4dd
|
[
"Apache-2.0"
] | null | null | null |
src/python/pants/engine/internals/graph.py
|
g-cassie/pants
|
b73fe2816c62d265975122234013c08df9f9f4dd
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import functools
import itertools
import logging
import os.path
from dataclasses import dataclass
from pathlib import PurePath
from typing import Dict, Iterable, List, NamedTuple, Optional, Sequence, Set, Tuple, Type
from pants.base.exceptions import ResolveError
from pants.base.specs import (
AddressSpecs,
AscendantAddresses,
FilesystemLiteralSpec,
FilesystemSpecs,
Specs,
)
from pants.engine.addresses import (
Address,
Addresses,
AddressInput,
BuildFileAddress,
UnparsedAddressInputs,
)
from pants.engine.collection import Collection
from pants.engine.fs import (
EMPTY_SNAPSHOT,
Digest,
MergeDigests,
PathGlobs,
Paths,
Snapshot,
SpecsSnapshot,
)
from pants.engine.internals.target_adaptor import TargetAdaptor
from pants.engine.rules import Get, MultiGet, collect_rules, rule
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
FieldSet,
FieldSetsPerTarget,
FieldSetsPerTargetRequest,
GeneratedSources,
GenerateSourcesRequest,
HydratedSources,
HydrateSourcesRequest,
InferDependenciesRequest,
InferredDependencies,
InjectDependenciesRequest,
InjectedDependencies,
NoApplicableTargetsBehavior,
RegisteredTargetTypes,
SecondaryOwnerMixin,
Sources,
SourcesPaths,
SourcesPathsRequest,
SpecialCasedDependencies,
Subtargets,
Target,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
Targets,
TransitiveTargets,
TransitiveTargetsRequest,
UnexpandedTargets,
UnrecognizedTargetTypeException,
WrappedTarget,
generate_subtarget,
generate_subtarget_address,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import GlobalOptions, OwnersNotFoundBehavior
from pants.source.filespec import matches_filespec
from pants.util.docutil import doc_url
from pants.util.logging import LogLevel
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------------------------------------
# Address -> Target(s)
# -----------------------------------------------------------------------------------------------
@rule
async def resolve_unexpanded_targets(addresses: Addresses) -> UnexpandedTargets:
wrapped_targets = await MultiGet(Get(WrappedTarget, Address, a) for a in addresses)
return UnexpandedTargets(wrapped_target.target for wrapped_target in wrapped_targets)
@rule
async def generate_subtargets(address: Address) -> Subtargets:
if address.is_file_target:
raise ValueError(f"Cannot generate file Targets for a file Address: {address}")
wrapped_build_target = await Get(WrappedTarget, Address, address)
build_target = wrapped_build_target.target
if not build_target.has_field(Dependencies) or not build_target.has_field(Sources):
# If a target type does not support dependencies, we do not split it, as that would prevent
# the BUILD target from depending on its splits.
return Subtargets(build_target, ())
# Generate a subtarget per source.
paths = await Get(SourcesPaths, SourcesPathsRequest(build_target[Sources]))
wrapped_subtargets = await MultiGet(
Get(
WrappedTarget,
Address,
generate_subtarget_address(address, full_file_name=subtarget_file),
)
for subtarget_file in paths.files
)
return Subtargets(build_target, tuple(wt.target for wt in wrapped_subtargets))
@rule
async def resolve_target(
address: Address,
registered_target_types: RegisteredTargetTypes,
union_membership: UnionMembership,
) -> WrappedTarget:
if address.is_file_target:
build_target = await Get(WrappedTarget, Address, address.maybe_convert_to_build_target())
subtarget = generate_subtarget(
build_target.target, full_file_name=address.filename, union_membership=union_membership
)
return WrappedTarget(subtarget)
target_adaptor = await Get(TargetAdaptor, Address, address)
target_type = registered_target_types.aliases_to_types.get(target_adaptor.type_alias, None)
if target_type is None:
raise UnrecognizedTargetTypeException(
target_adaptor.type_alias, registered_target_types, address=address
)
target = target_type(target_adaptor.kwargs, address, union_membership=union_membership)
return WrappedTarget(target)
@rule
async def resolve_targets(targets: UnexpandedTargets) -> Targets:
# Split out and expand any BUILD targets.
other_targets = []
build_targets = []
for target in targets:
if not target.address.is_file_target:
build_targets.append(target)
else:
other_targets.append(target)
build_targets_subtargets = await MultiGet(
Get(Subtargets, Address, bt.address) for bt in build_targets
)
# Zip the subtargets back to the BUILD targets and replace them.
# NB: If a target had no subtargets, we use the original.
expanded_targets = OrderedSet(other_targets)
expanded_targets.update(
target
for subtargets in build_targets_subtargets
for target in (subtargets.subtargets if subtargets.subtargets else (subtargets.base,))
)
return Targets(expanded_targets)
# -----------------------------------------------------------------------------------------------
# TransitiveTargets
# -----------------------------------------------------------------------------------------------
class CycleException(Exception):
def __init__(self, subject: Address, path: Tuple[Address, ...]) -> None:
path_string = "\n".join((f"-> {a}" if a == subject else f" {a}") for a in path)
super().__init__(
f"The dependency graph contained a cycle:\n{path_string}\n\nTo fix this, first verify "
"if your code has an actual import cycle. If it does, you likely need to re-architect "
"your code to avoid the cycle.\n\nIf there is no cycle in your code, then you may need "
"to use more granular targets. Split up the problematic targets into smaller targets "
"with more granular `sources` fields so that you can adjust the `dependencies` fields "
"to avoid introducing a cycle.\n\nAlternatively, use Python dependency inference "
"(`--python-infer-imports`), rather than explicit `dependencies`. Pants will infer "
"dependencies on specific files, rather than entire targets. This extra precision "
"means that you will only have cycles if your code actually does have cycles in it."
)
self.subject = subject
self.path = path
def _detect_cycles(
roots: Tuple[Address, ...], dependency_mapping: Dict[Address, Tuple[Address, ...]]
) -> None:
path_stack: OrderedSet[Address] = OrderedSet()
visited: Set[Address] = set()
def maybe_report_cycle(address: Address) -> None:
# NB: File-level dependencies are cycle tolerant.
if address.is_file_target or address not in path_stack:
return
# The path of the cycle is shorter than the entire path to the cycle: if the suffix of
# the path representing the cycle contains a file dep, it is ignored.
in_cycle = False
for path_address in path_stack:
if in_cycle and path_address.is_file_target:
# There is a file address inside the cycle: do not report it.
return
elif in_cycle:
# Not a file address.
continue
else:
# We're entering the suffix of the path that contains the cycle if we've reached
# the address in question.
in_cycle = path_address == address
# If we did not break out early, it's because there were no file addresses in the cycle.
raise CycleException(address, (*path_stack, address))
def visit(address: Address):
if address in visited:
maybe_report_cycle(address)
return
path_stack.add(address)
visited.add(address)
for dep_address in dependency_mapping[address]:
visit(dep_address)
path_stack.remove(address)
for root in roots:
visit(root)
if path_stack:
raise AssertionError(
f"The stack of visited nodes should have been empty at the end of recursion, "
f"but it still contained: {path_stack}"
)
@rule(desc="Resolve transitive targets")
async def transitive_targets(request: TransitiveTargetsRequest) -> TransitiveTargets:
"""Find all the targets transitively depended upon by the target roots.
This uses iteration, rather than recursion, so that we can tolerate dependency cycles. Unlike a
traditional BFS algorithm, we batch each round of traversals via `MultiGet` for improved
performance / concurrency.
"""
roots_as_targets = await Get(Targets, Addresses(request.roots))
visited: OrderedSet[Target] = OrderedSet()
queued = FrozenOrderedSet(roots_as_targets)
dependency_mapping: Dict[Address, Tuple[Address, ...]] = {}
while queued:
direct_dependencies = await MultiGet(
Get(
Targets,
DependenciesRequest(
tgt.get(Dependencies),
include_special_cased_deps=request.include_special_cased_deps,
),
)
for tgt in queued
)
dependency_mapping.update(
zip(
(t.address for t in queued),
(tuple(t.address for t in deps) for deps in direct_dependencies),
)
)
queued = FrozenOrderedSet(itertools.chain.from_iterable(direct_dependencies)).difference(
visited
)
visited.update(queued)
# NB: We use `roots_as_targets` to get the root addresses, rather than `request.roots`. This
# is because expanding from the `Addresses` -> `Targets` may have resulted in generated
# subtargets being used, so we need to use `roots_as_targets` to have this expansion.
_detect_cycles(tuple(t.address for t in roots_as_targets), dependency_mapping)
# Apply any transitive excludes (`!!` ignores).
transitive_excludes: FrozenOrderedSet[Target] = FrozenOrderedSet()
unevaluated_transitive_excludes = []
for t in (*roots_as_targets, *visited):
unparsed = t.get(Dependencies).unevaluated_transitive_excludes
if unparsed.values:
unevaluated_transitive_excludes.append(unparsed)
if unevaluated_transitive_excludes:
nested_transitive_excludes = await MultiGet(
Get(Targets, UnparsedAddressInputs, unparsed)
for unparsed in unevaluated_transitive_excludes
)
transitive_excludes = FrozenOrderedSet(
itertools.chain.from_iterable(excludes for excludes in nested_transitive_excludes)
)
return TransitiveTargets(
tuple(roots_as_targets), FrozenOrderedSet(visited.difference(transitive_excludes))
)
# -----------------------------------------------------------------------------------------------
# Find the owners of a file
# -----------------------------------------------------------------------------------------------
class InvalidOwnersOfArgs(Exception):
pass
@dataclass(frozen=True)
class OwnersRequest:
"""A request for the owners of a set of file paths."""
sources: Tuple[str, ...]
owners_not_found_behavior: OwnersNotFoundBehavior = OwnersNotFoundBehavior.ignore
class Owners(Collection[Address]):
pass
@rule(desc="Find which targets own certain files")
async def find_owners(owners_request: OwnersRequest) -> Owners:
# Determine which of the sources are live and which are deleted.
sources_paths = await Get(Paths, PathGlobs(owners_request.sources))
live_files = FrozenOrderedSet(sources_paths.files)
deleted_files = FrozenOrderedSet(s for s in owners_request.sources if s not in live_files)
live_dirs = FrozenOrderedSet(os.path.dirname(s) for s in live_files)
deleted_dirs = FrozenOrderedSet(os.path.dirname(s) for s in deleted_files)
# Walk up the buildroot looking for targets that would conceivably claim changed sources.
# For live files, we use expanded Targets, which have file level precision but which are
# only created for existing files. For deleted files we use UnexpandedTargets, which have
# the original declared glob.
live_candidate_specs = tuple(AscendantAddresses(directory=d) for d in live_dirs)
deleted_candidate_specs = tuple(AscendantAddresses(directory=d) for d in deleted_dirs)
live_candidate_tgts, deleted_candidate_tgts = await MultiGet(
Get(Targets, AddressSpecs(live_candidate_specs)),
Get(UnexpandedTargets, AddressSpecs(deleted_candidate_specs)),
)
matching_addresses: OrderedSet[Address] = OrderedSet()
unmatched_sources = set(owners_request.sources)
for live in (True, False):
candidate_tgts: Sequence[Target]
if live:
candidate_tgts = live_candidate_tgts
sources_set = live_files
else:
candidate_tgts = deleted_candidate_tgts
sources_set = deleted_files
build_file_addresses = await MultiGet(
Get(BuildFileAddress, Address, tgt.address) for tgt in candidate_tgts
)
for candidate_tgt, bfa in zip(candidate_tgts, build_file_addresses):
matching_files = set(
matches_filespec(candidate_tgt.get(Sources).filespec, paths=sources_set)
)
# Also consider secondary ownership, meaning it's not a `Sources` field with primary
# ownership, but the target still should match the file. We can't use `tgt.get()`
# because this is a mixin, and there technically may be >1 field.
secondary_owner_fields = tuple(
field # type: ignore[misc]
for field in candidate_tgt.field_values.values()
if isinstance(field, SecondaryOwnerMixin)
)
for secondary_owner_field in secondary_owner_fields:
matching_files.update(
matches_filespec(secondary_owner_field.filespec, paths=sources_set)
)
if not matching_files and bfa.rel_path not in sources_set:
continue
unmatched_sources -= matching_files
matching_addresses.add(candidate_tgt.address)
if (
unmatched_sources
and owners_request.owners_not_found_behavior != OwnersNotFoundBehavior.ignore
):
_log_or_raise_unmatched_owners(
[PurePath(path) for path in unmatched_sources], owners_request.owners_not_found_behavior
)
return Owners(matching_addresses)
# -----------------------------------------------------------------------------------------------
# Specs -> Addresses
# -----------------------------------------------------------------------------------------------
def _log_or_raise_unmatched_owners(
file_paths: Sequence[PurePath],
owners_not_found_behavior: OwnersNotFoundBehavior,
ignore_option: Optional[str] = None,
) -> None:
option_msg = (
f"\n\nIf you would like to ignore un-owned files, please pass `{ignore_option}`."
if ignore_option
else ""
)
if len(file_paths) == 1:
prefix = (
f"No owning targets could be found for the file `{file_paths[0]}`.\n\n"
f"Please check that there is a BUILD file in the parent directory "
f"{file_paths[0].parent} with a target whose `sources` field includes the file."
)
else:
prefix = (
f"No owning targets could be found for the files {sorted(map(str, file_paths))}`.\n\n"
f"Please check that there are BUILD files in each file's parent directory with a "
f"target whose `sources` field includes the file."
)
msg = (
f"{prefix} See {doc_url('targets')} for more information on target definitions."
f"\n\nYou may want to run `./pants tailor` to autogenerate your BUILD files. See "
f"{doc_url('create-initial-build-files')}.{option_msg}"
)
if owners_not_found_behavior == OwnersNotFoundBehavior.warn:
logger.warning(msg)
else:
raise ResolveError(msg)
@rule
async def addresses_from_filesystem_specs(
filesystem_specs: FilesystemSpecs, global_options: GlobalOptions
) -> Addresses:
"""Find the owner(s) for each FilesystemSpec.
Every returned address will be a generated subtarget, meaning that each address will have
exactly one file in its `sources` field.
"""
owners_not_found_behavior = global_options.options.owners_not_found_behavior
paths_per_include = await MultiGet(
Get(
Paths,
PathGlobs,
filesystem_specs.path_globs_for_spec(
spec, owners_not_found_behavior.to_glob_match_error_behavior()
),
)
for spec in filesystem_specs.includes
)
owners_per_include = await MultiGet(
Get(Owners, OwnersRequest(sources=paths.files)) for paths in paths_per_include
)
addresses: Set[Address] = set()
for spec, owners in zip(filesystem_specs.includes, owners_per_include):
if (
owners_not_found_behavior != OwnersNotFoundBehavior.ignore
and isinstance(spec, FilesystemLiteralSpec)
and not owners
):
_log_or_raise_unmatched_owners(
[PurePath(str(spec))],
global_options.options.owners_not_found_behavior,
ignore_option="--owners-not-found-behavior=ignore",
)
addresses.update(owners)
return Addresses(sorted(addresses))
@rule(desc="Find targets from input specs", level=LogLevel.DEBUG)
async def resolve_addresses_from_specs(specs: Specs) -> Addresses:
from_address_specs, from_filesystem_specs = await MultiGet(
Get(Addresses, AddressSpecs, specs.address_specs),
Get(Addresses, FilesystemSpecs, specs.filesystem_specs),
)
# We use a set to dedupe because it's possible to have the same address from both an address
# and filesystem spec.
return Addresses(sorted({*from_address_specs, *from_filesystem_specs}))
# -----------------------------------------------------------------------------------------------
# SourcesSnapshot
# -----------------------------------------------------------------------------------------------
@rule(desc="Find all sources from input specs", level=LogLevel.DEBUG)
async def resolve_specs_snapshot(specs: Specs, global_options: GlobalOptions) -> SpecsSnapshot:
"""Resolve all files matching the given specs.
Address specs will use their `Sources` field, and Filesystem specs will use whatever args were
given. Filesystem specs may safely refer to files with no owning target.
"""
targets = await Get(Targets, AddressSpecs, specs.address_specs)
all_hydrated_sources = await MultiGet(
Get(HydratedSources, HydrateSourcesRequest(tgt[Sources]))
for tgt in targets
if tgt.has_field(Sources)
)
filesystem_specs_digest = (
await Get(
Digest,
PathGlobs,
specs.filesystem_specs.to_path_globs(
global_options.options.owners_not_found_behavior.to_glob_match_error_behavior()
),
)
if specs.filesystem_specs
else None
)
# NB: We merge into a single snapshot to avoid the same files being duplicated if they were
# covered both by address specs and filesystem specs.
digests = [hydrated_sources.snapshot.digest for hydrated_sources in all_hydrated_sources]
if filesystem_specs_digest:
digests.append(filesystem_specs_digest)
result = await Get(Snapshot, MergeDigests(digests))
return SpecsSnapshot(result)
# -----------------------------------------------------------------------------------------------
# Resolve the Sources field
# -----------------------------------------------------------------------------------------------
class AmbiguousCodegenImplementationsException(Exception):
"""Exception for when there are multiple codegen implementations and it is ambiguous which to
use."""
def __init__(
self,
generators: Iterable[Type["GenerateSourcesRequest"]],
*,
for_sources_types: Iterable[Type["Sources"]],
) -> None:
bulleted_list_sep = "\n * "
all_same_generator_paths = (
len(set((generator.input, generator.output) for generator in generators)) == 1
)
example_generator = list(generators)[0]
input = example_generator.input.__name__
if all_same_generator_paths:
output = example_generator.output.__name__
possible_generators = sorted(generator.__name__ for generator in generators)
super().__init__(
f"Multiple of the registered code generators can generate {output} from {input}. "
"It is ambiguous which implementation to use.\n\nPossible implementations:"
f"{bulleted_list_sep}{bulleted_list_sep.join(possible_generators)}"
)
else:
possible_output_types = sorted(
generator.output.__name__
for generator in generators
if issubclass(generator.output, tuple(for_sources_types))
)
possible_generators_with_output = [
f"{generator.__name__} -> {generator.output.__name__}"
for generator in sorted(generators, key=lambda generator: generator.output.__name__)
]
super().__init__(
f"Multiple of the registered code generators can generate one of "
f"{possible_output_types} from {input}. It is ambiguous which implementation to "
f"use. This can happen when the call site requests too many different output types "
f"from the same original protocol sources.\n\nPossible implementations with their "
f"output type: {bulleted_list_sep}"
f"{bulleted_list_sep.join(possible_generators_with_output)}"
)
@rule(desc="Hydrate the `sources` field")
async def hydrate_sources(
request: HydrateSourcesRequest,
global_options: GlobalOptions,
union_membership: UnionMembership,
) -> HydratedSources:
sources_field = request.field
# First, find if there are any code generators for the input `sources_field`. This will be used
# to determine if the sources_field is valid or not.
# We could alternatively use `sources_field.can_generate()`, but we want to error if there are
# 2+ generators due to ambiguity.
generate_request_types = union_membership.get(GenerateSourcesRequest)
relevant_generate_request_types = [
generate_request_type
for generate_request_type in generate_request_types
if isinstance(sources_field, generate_request_type.input)
and issubclass(generate_request_type.output, request.for_sources_types)
]
if request.enable_codegen and len(relevant_generate_request_types) > 1:
raise AmbiguousCodegenImplementationsException(
relevant_generate_request_types, for_sources_types=request.for_sources_types
)
generate_request_type = next(iter(relevant_generate_request_types), None)
# Now, determine if any of the `for_sources_types` may be used, either because the
# sources_field is a direct subclass or can be generated into one of the valid types.
def compatible_with_sources_field(valid_type: Type[Sources]) -> bool:
is_instance = isinstance(sources_field, valid_type)
can_be_generated = (
request.enable_codegen
and generate_request_type is not None
and issubclass(generate_request_type.output, valid_type)
)
return is_instance or can_be_generated
sources_type = next(
(
valid_type
for valid_type in request.for_sources_types
if compatible_with_sources_field(valid_type)
),
None,
)
if sources_type is None:
return HydratedSources(EMPTY_SNAPSHOT, sources_field.filespec, sources_type=None)
# Now, hydrate the `globs`. Even if we are going to use codegen, we will need the original
# protocol sources to be hydrated.
path_globs = sources_field.path_globs(global_options.options.files_not_found_behavior)
snapshot = await Get(Snapshot, PathGlobs, path_globs)
sources_field.validate_resolved_files(snapshot.files)
# Finally, return if codegen is not in use; otherwise, run the relevant code generator.
if not request.enable_codegen or generate_request_type is None:
return HydratedSources(snapshot, sources_field.filespec, sources_type=sources_type)
wrapped_protocol_target = await Get(WrappedTarget, Address, sources_field.address)
generated_sources = await Get(
GeneratedSources,
GenerateSourcesRequest,
generate_request_type(snapshot, wrapped_protocol_target.target),
)
return HydratedSources(
generated_sources.snapshot, sources_field.filespec, sources_type=sources_type
)
@rule(desc="Resolve `sources` field file names")
async def resolve_source_paths(
request: SourcesPathsRequest, global_options: GlobalOptions
) -> SourcesPaths:
sources_field = request.field
path_globs = sources_field.path_globs(global_options.options.files_not_found_behavior)
paths = await Get(Paths, PathGlobs, path_globs)
sources_field.validate_resolved_files(paths.files)
return SourcesPaths(files=paths.files, dirs=paths.dirs)
# -----------------------------------------------------------------------------------------------
# Resolve addresses, including the Dependencies field
# -----------------------------------------------------------------------------------------------
class ParsedDependencies(NamedTuple):
addresses: List[AddressInput]
ignored_addresses: List[AddressInput]
class TransitiveExcludesNotSupportedError(ValueError):
def __init__(
self,
*,
bad_value: str,
address: Address,
registered_target_types: Sequence[Type[Target]],
union_membership: UnionMembership,
) -> None:
applicable_target_types = sorted(
target_type.alias
for target_type in registered_target_types
if (
target_type.class_has_field(Dependencies, union_membership=union_membership)
and target_type.class_get_field(
Dependencies, union_membership=union_membership
).supports_transitive_excludes
)
)
super().__init__(
f"Bad value '{bad_value}' in the `dependencies` field for {address}. "
"Transitive excludes with `!!` are not supported for this target type. Did you mean "
"to use a single `!` for a direct exclude?\n\nTransitive excludes work with these "
f"target types: {applicable_target_types}"
)
@rule
async def determine_explicitly_provided_dependencies(
request: DependenciesRequest,
union_membership: UnionMembership,
registered_target_types: RegisteredTargetTypes,
global_options: GlobalOptions,
) -> ExplicitlyProvidedDependencies:
parse = functools.partial(
AddressInput.parse,
relative_to=request.field.address.spec_path,
subproject_roots=global_options.options.subproject_roots,
)
addresses: List[AddressInput] = []
ignored_addresses: List[AddressInput] = []
for v in request.field.value or ():
is_ignore = v.startswith("!")
if is_ignore:
# Check if it's a transitive exclude, rather than a direct exclude.
if v.startswith("!!"):
if not request.field.supports_transitive_excludes:
raise TransitiveExcludesNotSupportedError(
bad_value=v,
address=request.field.address,
registered_target_types=registered_target_types.types,
union_membership=union_membership,
)
v = v[2:]
else:
v = v[1:]
result = parse(v)
if is_ignore:
ignored_addresses.append(result)
else:
addresses.append(result)
parsed_includes = await MultiGet(Get(Address, AddressInput, ai) for ai in addresses)
parsed_ignores = await MultiGet(Get(Address, AddressInput, ai) for ai in ignored_addresses)
return ExplicitlyProvidedDependencies(
FrozenOrderedSet(sorted(parsed_includes)), FrozenOrderedSet(sorted(parsed_ignores))
)
@rule(desc="Resolve direct dependencies")
async def resolve_dependencies(
request: DependenciesRequest, union_membership: UnionMembership, global_options: GlobalOptions
) -> Addresses:
explicitly_provided = await Get(ExplicitlyProvidedDependencies, DependenciesRequest, request)
# Inject any dependencies. This is determined by the `request.field` class. For example, if
# there is a rule to inject for FortranDependencies, then FortranDependencies and any subclass
# of FortranDependencies will use that rule.
inject_request_types = union_membership.get(InjectDependenciesRequest)
injected = await MultiGet(
Get(InjectedDependencies, InjectDependenciesRequest, inject_request_type(request.field))
for inject_request_type in inject_request_types
if isinstance(request.field, inject_request_type.inject_for)
)
inference_request_types = union_membership.get(InferDependenciesRequest)
inferred: Tuple[InferredDependencies, ...] = ()
if inference_request_types:
# Dependency inference is solely determined by the `Sources` field for a Target, so we
# re-resolve the original target to inspect its `Sources` field, if any.
wrapped_tgt = await Get(WrappedTarget, Address, request.field.address)
sources_field = wrapped_tgt.target.get(Sources)
relevant_inference_request_types = [
inference_request_type
for inference_request_type in inference_request_types
if isinstance(sources_field, inference_request_type.infer_from)
]
inferred = await MultiGet(
Get(
InferredDependencies,
InferDependenciesRequest,
inference_request_type(sources_field),
)
for inference_request_type in relevant_inference_request_types
)
# If this is a BUILD target, or no dependency inference implementation can infer dependencies on
# a file address's sibling files, then we inject dependencies on all the BUILD target's
# generated subtargets.
subtarget_addresses: Tuple[Address, ...] = ()
no_sibling_file_deps_inferrable = not inferred or all(
inferred_deps.sibling_dependencies_inferrable is False for inferred_deps in inferred
)
if not request.field.address.is_file_target or no_sibling_file_deps_inferrable:
subtargets = await Get(
Subtargets, Address, request.field.address.maybe_convert_to_build_target()
)
subtarget_addresses = tuple(
t.address for t in subtargets.subtargets if t.address != request.field.address
)
# If the target has `SpecialCasedDependencies`, such as the `archive` target having
# `files` and `packages` fields, then we possibly include those too. We don't want to always
# include those dependencies because they should often be excluded from the result due to
# being handled elsewhere in the calling code.
special_cased: Tuple[Address, ...] = ()
if request.include_special_cased_deps:
wrapped_tgt = await Get(WrappedTarget, Address, request.field.address)
# Unlike normal, we don't use `tgt.get()` because there may be >1 subclass of
# SpecialCasedDependencies.
special_cased_fields = tuple(
field
for field in wrapped_tgt.target.field_values.values()
if isinstance(field, SpecialCasedDependencies)
)
# We can't use the normal `Get(Addresses, UnparsedAddressInputs)` due to a graph cycle.
special_cased = await MultiGet(
Get(
Address,
AddressInput,
AddressInput.parse(
addr,
relative_to=request.field.address.spec_path,
subproject_roots=global_options.options.subproject_roots,
),
)
for special_cased_field in special_cased_fields
for addr in special_cased_field.to_unparsed_address_inputs().values
)
result = {
addr
for addr in (
*subtarget_addresses,
*explicitly_provided.includes,
*itertools.chain.from_iterable(injected),
*itertools.chain.from_iterable(inferred),
*special_cased,
)
if addr not in explicitly_provided.ignores
}
return Addresses(sorted(result))
@rule(desc="Resolve addresses")
async def resolve_unparsed_address_inputs(
request: UnparsedAddressInputs, global_options: GlobalOptions
) -> Addresses:
addresses = await MultiGet(
Get(
Address,
AddressInput,
AddressInput.parse(
v,
relative_to=request.relative_to,
subproject_roots=global_options.options.subproject_roots,
),
)
for v in request.values
)
return Addresses(addresses)
# -----------------------------------------------------------------------------------------------
# Find applicable field sets
# -----------------------------------------------------------------------------------------------
class NoApplicableTargetsException(Exception):
def __init__(
self,
targets: Iterable[Target],
specs: Specs,
union_membership: UnionMembership,
*,
applicable_target_types: Iterable[Type[Target]],
goal_description: str,
) -> None:
applicable_target_aliases = sorted(
{target_type.alias for target_type in applicable_target_types}
)
inapplicable_target_aliases = sorted({tgt.alias for tgt in targets})
bulleted_list_sep = "\n * "
msg = (
"No applicable files or targets matched."
if inapplicable_target_aliases
else "No files or targets specified."
)
msg += (
f" {goal_description.capitalize()} works "
f"with these target types:\n{bulleted_list_sep}"
f"{bulleted_list_sep.join(applicable_target_aliases)}\n\n"
)
# Explain what was specified, if relevant.
if inapplicable_target_aliases:
if bool(specs.filesystem_specs) and bool(specs.address_specs):
specs_description = " files and targets with "
elif bool(specs.filesystem_specs):
specs_description = " files with "
elif bool(specs.address_specs):
specs_description = " targets with "
else:
specs_description = " "
msg += (
f"However, you only specified{specs_description}these target types:\n"
f"{bulleted_list_sep}{bulleted_list_sep.join(inapplicable_target_aliases)}\n\n"
)
# Add a remedy.
#
# We sometimes suggest using `./pants filedeps` to find applicable files. However, this
# command only works if at least one of the targets has a Sources field.
#
# NB: Even with the "secondary owners" mechanism - used by target types like `pex_binary`
# and `python_awslambda` to still work with file args - those targets will not show the
# associated files when using filedeps.
filedeps_goal_works = any(
tgt.class_has_field(Sources, union_membership) for tgt in applicable_target_types
)
pants_filter_command = (
f"./pants filter --target-type={','.join(applicable_target_aliases)} ::"
)
remedy = (
f"Please specify relevant files and/or targets. Run `{pants_filter_command}` to "
"find all applicable targets in your project"
)
if filedeps_goal_works:
remedy += (
f", or run `{pants_filter_command} | xargs ./pants filedeps` to find all "
"applicable files."
)
else:
remedy += "."
msg += remedy
super().__init__(msg)
@classmethod
def create_from_field_sets(
cls,
targets: Iterable[Target],
specs: Specs,
union_membership: UnionMembership,
registered_target_types: RegisteredTargetTypes,
*,
field_set_types: Iterable[type[FieldSet]],
goal_description: str,
) -> NoApplicableTargetsException:
applicable_target_types = {
target_type
for field_set_type in field_set_types
for target_type in field_set_type.applicable_target_types(
registered_target_types.types, union_membership
)
}
return cls(
targets,
specs,
union_membership,
applicable_target_types=applicable_target_types,
goal_description=goal_description,
)
class TooManyTargetsException(Exception):
def __init__(self, targets: Iterable[Target], *, goal_description: str) -> None:
bulleted_list_sep = "\n * "
addresses = sorted(tgt.address.spec for tgt in targets)
super().__init__(
f"{goal_description.capitalize()} only works with one valid target, but was given "
f"multiple valid targets:{bulleted_list_sep}{bulleted_list_sep.join(addresses)}\n\n"
"Please select one of these targets to run."
)
class AmbiguousImplementationsException(Exception):
"""A target has multiple valid FieldSets, but a goal expects there to be one FieldSet."""
def __init__(
self,
target: Target,
field_sets: Iterable[FieldSet],
*,
goal_description: str,
) -> None:
# TODO: improve this error message. A better error message would explain to users how they
# can resolve the issue.
possible_field_sets_types = sorted(field_set.__class__.__name__ for field_set in field_sets)
bulleted_list_sep = "\n * "
super().__init__(
f"Multiple of the registered implementations for {goal_description} work for "
f"{target.address} (target type {repr(target.alias)}). It is ambiguous which "
"implementation to use.\n\nPossible implementations:"
f"{bulleted_list_sep}{bulleted_list_sep.join(possible_field_sets_types)}"
)
@rule
async def find_valid_field_sets_for_target_roots(
request: TargetRootsToFieldSetsRequest,
specs: Specs,
union_membership: UnionMembership,
registered_target_types: RegisteredTargetTypes,
) -> TargetRootsToFieldSets:
# NB: This must be in an `await Get`, rather than the rule signature, to avoid a rule graph
# issue.
targets = await Get(Targets, Specs, specs)
field_sets_per_target = await Get(
FieldSetsPerTarget, FieldSetsPerTargetRequest(request.field_set_superclass, targets)
)
targets_to_applicable_field_sets = {}
for tgt, field_sets in zip(targets, field_sets_per_target.collection):
if field_sets:
targets_to_applicable_field_sets[tgt] = field_sets
# Possibly warn or error if no targets were applicable.
if not targets_to_applicable_field_sets:
no_applicable_exception = NoApplicableTargetsException.create_from_field_sets(
targets,
specs,
union_membership,
registered_target_types,
field_set_types=union_membership[request.field_set_superclass],
goal_description=request.goal_description,
)
if request.no_applicable_targets_behavior == NoApplicableTargetsBehavior.error:
raise no_applicable_exception
if request.no_applicable_targets_behavior == NoApplicableTargetsBehavior.warn:
logger.warning(str(no_applicable_exception))
result = TargetRootsToFieldSets(targets_to_applicable_field_sets)
if not request.expect_single_field_set:
return result
if len(result.targets) > 1:
raise TooManyTargetsException(result.targets, goal_description=request.goal_description)
if len(result.field_sets) > 1:
raise AmbiguousImplementationsException(
result.targets[0], result.field_sets, goal_description=request.goal_description
)
return result
@rule
def find_valid_field_sets(
request: FieldSetsPerTargetRequest, union_membership: UnionMembership
) -> FieldSetsPerTarget:
field_set_types = union_membership.get(request.field_set_superclass)
return FieldSetsPerTarget(
(
field_set_type.create(target)
for field_set_type in field_set_types
if field_set_type.is_applicable(target)
)
for target in request.targets
)
def rules():
return collect_rules()
| 40.351923
| 100
| 0.654315
|
beb95d0398c0bb7d1bc8a17754c715dd4dc6da5f
| 112
|
py
|
Python
|
.history/ClassFiles/Control Flow/Elif_20210101185036.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/ClassFiles/Control Flow/Elif_20210101185036.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/ClassFiles/Control Flow/Elif_20210101185036.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
''' elif Statements
Used to check multiple expressions for True conditions.
if(Condition1):
'''
| 8.615385
| 55
| 0.651786
|
4899933dab6ffbb06a04fa8b8ab65777d7398302
| 381
|
py
|
Python
|
raspsocket.py
|
2dadsgn/smart-vase-sensor-raspberry
|
54051dec478dd94d3a4e04772a1a50c842b687ff
|
[
"Apache-2.0"
] | null | null | null |
raspsocket.py
|
2dadsgn/smart-vase-sensor-raspberry
|
54051dec478dd94d3a4e04772a1a50c842b687ff
|
[
"Apache-2.0"
] | null | null | null |
raspsocket.py
|
2dadsgn/smart-vase-sensor-raspberry
|
54051dec478dd94d3a4e04772a1a50c842b687ff
|
[
"Apache-2.0"
] | null | null | null |
import socket
import json
def invio_dati(dati, ip):
#connessione al server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,1238))
#trasformo in oggetto json
# invio dati
s.send(bytes("#123abc", "utf-8"))
s.send(bytes(dati, "utf-8"))
#messaggio per connesione riuscita
msg = s.recv(1024)
print(msg.decode("utf-8"))
| 18.142857
| 57
| 0.645669
|
d119ccb372ef7486083ae82040aab93bfc5fe853
| 927
|
py
|
Python
|
test/test_item_number_type_dto.py
|
Dangl-IT/avacloud-client-python
|
66f555096bbbc87d02d02e4e2dfb0c6accb18f95
|
[
"RSA-MD"
] | 1
|
2019-01-12T18:10:24.000Z
|
2019-01-12T18:10:24.000Z
|
test/test_item_number_type_dto.py
|
Dangl-IT/avacloud-client-python
|
66f555096bbbc87d02d02e4e2dfb0c6accb18f95
|
[
"RSA-MD"
] | null | null | null |
test/test_item_number_type_dto.py
|
Dangl-IT/avacloud-client-python
|
66f555096bbbc87d02d02e4e2dfb0c6accb18f95
|
[
"RSA-MD"
] | null | null | null |
# coding: utf-8
"""
AVACloud API 1.17.3
AVACloud API specification # noqa: E501
OpenAPI spec version: 1.17.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import avacloud_client_python
from avacloud_client_python.models.item_number_type_dto import ItemNumberTypeDto # noqa: E501
from avacloud_client_python.rest import ApiException
class TestItemNumberTypeDto(unittest.TestCase):
"""ItemNumberTypeDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testItemNumberTypeDto(self):
"""Test ItemNumberTypeDto"""
# FIXME: construct object with mandatory attributes with example values
# model = avacloud_client_python.models.item_number_type_dto.ItemNumberTypeDto() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.609756
| 102
| 0.719525
|
275a94d0640c159ccd4012eaf0de555538e3df06
| 2,096
|
py
|
Python
|
docs/conf.py
|
euanrussano/tinyshop
|
263280258e55351c833064889ee847ce137dd4d3
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
euanrussano/tinyshop
|
263280258e55351c833064889ee847ce137dd4d3
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
euanrussano/tinyshop
|
263280258e55351c833064889ee847ce137dd4d3
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
import django
sys.path.insert(0, os.path.abspath('..'))
os.environ['DJANGO_SETTINGS_MODULE'] = 'tinyshop.settings'
django.setup()
# -- Project information -----------------------------------------------------
project = 'Tiny Shop'
copyright = '2021, Euan Russano'
author = 'Euan Russano'
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [ 'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 34.933333
| 79
| 0.670324
|
bb33ffa3044b171376a992e241fd21a6a52096c8
| 1,020
|
py
|
Python
|
linkit-project/linkit/urls.py
|
OIrabor24/Reddit-Clone-RestAPI
|
001660c2523244b411804cab70f0c92e2228de65
|
[
"MIT"
] | null | null | null |
linkit-project/linkit/urls.py
|
OIrabor24/Reddit-Clone-RestAPI
|
001660c2523244b411804cab70f0c92e2228de65
|
[
"MIT"
] | null | null | null |
linkit-project/linkit/urls.py
|
OIrabor24/Reddit-Clone-RestAPI
|
001660c2523244b411804cab70f0c92e2228de65
|
[
"MIT"
] | null | null | null |
"""linkit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from posts import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api/posts', views.PostList.as_view()),
path('api/posts/<int:pk>', views.PostRetrieveDestroy.as_view()),
path('api/posts/<int:pk>/vote', views.VoteCreate.as_view()),
path('api-auth/', include('rest_framework.urls')),
]
| 39.230769
| 77
| 0.701961
|
2f228cde794b127e59c3fca3d5ac0602bcca1644
| 396
|
py
|
Python
|
exercicios/ex028.py
|
RaquelBotelhoof/Python-curso-em-video
|
919b2f44e85647c096c6b734c991635f1bfd1af9
|
[
"MIT"
] | null | null | null |
exercicios/ex028.py
|
RaquelBotelhoof/Python-curso-em-video
|
919b2f44e85647c096c6b734c991635f1bfd1af9
|
[
"MIT"
] | null | null | null |
exercicios/ex028.py
|
RaquelBotelhoof/Python-curso-em-video
|
919b2f44e85647c096c6b734c991635f1bfd1af9
|
[
"MIT"
] | null | null | null |
from random import randint
from time import sleep
computador = randint(0, 5)
print('-=-'*20)
print('Vou pensar em um número de 0 a 5. Tente adivinhar... ')
print('-=-'*20)
jogador = int(input('Em qual número eu pensei?'))
print('PROCESSANDO...')
sleep(3)
if jogador == computador:
print('Você acertou, me VENCEU!! Parabéns')
else:
print('Eu VENCI. O número era {} '.format(computador))
| 24.75
| 62
| 0.676768
|
8674f3643a69b6982cdbd02d05135198ce509c1f
| 5,671
|
py
|
Python
|
tools_webrtc/version_updater/update_version.py
|
jickykung/webrtc_src
|
a18cad9c099d8fe7fd5ac91d4adfd468a1ac95e7
|
[
"BSD-3-Clause"
] | null | null | null |
tools_webrtc/version_updater/update_version.py
|
jickykung/webrtc_src
|
a18cad9c099d8fe7fd5ac91d4adfd468a1ac95e7
|
[
"BSD-3-Clause"
] | null | null | null |
tools_webrtc/version_updater/update_version.py
|
jickykung/webrtc_src
|
a18cad9c099d8fe7fd5ac91d4adfd468a1ac95e7
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Script to auto-update the WebRTC source version in call/version.cc"""
import argparse
import datetime
import logging
import os
import re
import subprocess
import sys
def FindSrcDirPath():
"""Returns the abs path to the src/ dir of the project."""
src_dir = os.path.dirname(os.path.abspath(__file__))
while os.path.basename(src_dir) != 'src':
src_dir = os.path.normpath(os.path.join(src_dir, os.pardir))
return src_dir
UPDATE_BRANCH_NAME = 'webrtc_version_update'
CHECKOUT_SRC_DIR = FindSrcDirPath()
def _RemovePreviousUpdateBranch():
active_branch, branches = _GetBranches()
if active_branch == UPDATE_BRANCH_NAME:
active_branch = 'master'
if UPDATE_BRANCH_NAME in branches:
logging.info('Removing previous update branch (%s)',
UPDATE_BRANCH_NAME)
subprocess.check_call(['git', 'checkout', active_branch])
subprocess.check_call(['git', 'branch', '-D', UPDATE_BRANCH_NAME])
logging.info('No branch to remove')
def _GetLastAuthor():
"""Returns a string with the author of the last commit."""
author = subprocess.check_output(['git', 'log',
'-1',
'--pretty=format:"%an"']).splitlines()
return author
def _GetBranches():
"""Returns a tuple (active, branches).
'active' is a string with name of the currently active branch, while
'branches' is the list of all branches.
"""
lines = subprocess.check_output(['git', 'branch']).splitlines()
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches
def _CreateUpdateBranch():
logging.info('Creating update branch: %s', UPDATE_BRANCH_NAME)
subprocess.check_call(['git', 'checkout', '-b', UPDATE_BRANCH_NAME])
def _UpdateWebRTCVersion(filename):
with open(filename) as f:
content = f.read()
d = datetime.datetime.utcnow()
# pylint: disable=line-too-long
new_content = re.sub(
r'WebRTC source stamp [0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}',
r'WebRTC source stamp %02d-%02d-%02dT%02d:%02d:%02d' % (d.year,
d.month,
d.day,
d.hour,
d.minute,
d.second),
content,
flags=re.MULTILINE)
# pylint: enable=line-too-long
with open(filename, 'w') as f:
f.write(new_content)
def _IsTreeClean():
stdout = subprocess.check_output(['git', 'status', '--porcelain'])
if len(stdout) == 0:
return True
return False
def _LocalCommit():
logging.info('Committing changes locally.')
d = datetime.datetime.utcnow()
git_author = subprocess.check_output(['git', 'config',
'user.email']).strip()
commit_msg = ('Update WebRTC code version (%02d-%02d-%02dT%02d:%02d:%02d).'
'\n\nBug: None')
commit_msg = commit_msg % (d.year, d.month, d.day, d.hour, d.minute,
d.second)
subprocess.check_call(['git', 'add', '--update', '.'])
subprocess.check_call(['git', 'commit', '-m', commit_msg])
def _UploadCL(commit_queue_mode):
"""Upload the committed changes as a changelist to Gerrit.
commit_queue_mode:
- 2: Submit to commit queue.
- 1: Run trybots but do not submit to CQ.
- 0: Skip CQ, upload only.
"""
cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks',
'--cc=""', '--bypass-watchlist']
if commit_queue_mode >= 2:
logging.info('Sending the CL to the CQ...')
cmd.extend(['-o', 'label=Bot-Commit+1'])
cmd.extend(['-o', 'label=Commit-Queue+2'])
elif commit_queue_mode >= 1:
logging.info('Starting CQ dry run...')
cmd.extend(['-o', 'label=Commit-Queue+1'])
subprocess.check_call(cmd)
def main():
logging.basicConfig(level=logging.INFO)
p = argparse.ArgumentParser()
p.add_argument('--clean',
action='store_true',
default=False,
help='Removes any previous local update branch.')
opts = p.parse_args()
if opts.clean:
_RemovePreviousUpdateBranch()
if _GetLastAuthor() == 'webrtc-version-updater':
logging.info('Last commit is a version change, skipping CL.')
return 0
version_filename = os.path.join(CHECKOUT_SRC_DIR, 'call', 'version.cc')
_CreateUpdateBranch()
_UpdateWebRTCVersion(version_filename)
if _IsTreeClean():
logging.info('No WebRTC version change detected, skipping CL.')
else:
_LocalCommit()
logging.info('Uploading CL...')
_UploadCL(2)
return 0
if __name__ == '__main__':
sys.exit(main())
| 33.358824
| 85
| 0.585787
|
a9b590618c1fefa51d964c6ae70708512d20a395
| 1,169
|
py
|
Python
|
django-payments-instamojo/setup.py
|
FadedCoder/Teeshood
|
4d0659ba1ece52ca9579582165fadc095770ad37
|
[
"BSD-3-Clause"
] | null | null | null |
django-payments-instamojo/setup.py
|
FadedCoder/Teeshood
|
4d0659ba1ece52ca9579582165fadc095770ad37
|
[
"BSD-3-Clause"
] | null | null | null |
django-payments-instamojo/setup.py
|
FadedCoder/Teeshood
|
4d0659ba1ece52ca9579582165fadc095770ad37
|
[
"BSD-3-Clause"
] | 1
|
2021-12-02T18:49:31.000Z
|
2021-12-02T18:49:31.000Z
|
from setuptools import setup
setup(name='django-payments-instamojo',
version='0.1.3',
description='Instamojo Provider for django-payments',
url='http://github.com/FadedCoder/django-payments-instamojo',
author='Soham Sen',
author_email='contact@sohamsen.me',
maintainer='Soham Sen',
license='BSD License',
packages=['django_payments_instamojo'],
long_description=open('README.md').read(),
include_package_data=True,
zip_safe=False,
keywords='django,payments,ecommerce,saleor,delivery',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 38.966667
| 81
| 0.620188
|
280fffebf95edfd9a8b494a76e7782865780262f
| 6,741
|
py
|
Python
|
custom_jinja_filters.py
|
276793422/attack-website
|
9be8abbb8d77c3c73243cae2689f1af9725d19f9
|
[
"Apache-2.0"
] | 327
|
2018-11-22T07:01:30.000Z
|
2022-03-30T09:25:02.000Z
|
custom_jinja_filters.py
|
276793422/attack-website
|
9be8abbb8d77c3c73243cae2689f1af9725d19f9
|
[
"Apache-2.0"
] | 239
|
2019-02-11T13:47:44.000Z
|
2022-03-30T06:20:47.000Z
|
custom_jinja_filters.py
|
276793422/attack-website
|
9be8abbb8d77c3c73243cae2689f1af9725d19f9
|
[
"Apache-2.0"
] | 111
|
2019-01-25T12:58:56.000Z
|
2022-03-30T09:25:04.000Z
|
#!/usr/bin/env python
import json
import markdown
import re
import os
from modules import site_config
# Template for HTML references inside of STIX data
reference_marker_template = ("<span onclick=scrollToRef('scite-{}') "
"id=\"scite-ref-{}-a\" class=\"scite"
"-citeref-number\" "
"data-reference=\"{}\"><sup><a href=\"{}\" "
"target=\"_blank\" data-hasqtip=\"{}\" "
"aria-describedby=\"qtip-{}\">[{}]</a></sup></span>")
reference_marker_template_no_url = ("<span onclick=scrollToRef('scite-{}') "
"id=\"scite-ref-{}-a\" "
"class=\"scite-citeref-number\" "
"data-reference=\"{}\">"
"<sup>[{}]</sup></span>")
# Pelican settings global variable
pelican_settings = {}
pelican_settings_f = os.path.join(site_config.data_directory, "pelican_settings.json")
with open(pelican_settings_f, "r", encoding='utf8') as json_f:
pelican_settings = json.load(json_f)
# Custom Jinja Filters
def remove_whitespace(word):
return ''.join(word.split(" "))
def escape_spaces(word):
return '%20'.join(word.split(" "))
def clean_path(path):
""" remove index.html from end of a path, add / if not at beginning """
path = path.split("index.html")[0]
if not path.startswith("/"): path = "/" + path
if not path.endswith("/"): path += "/"
return path
def flatten_tree(root):
""" get a flattened tree of the "paths" of all children of a tree of objects.
used in sidenav
"""
ret = []
if root["path"]: ret.append(root["path"])
for child in root["children"]:
ret = ret + flatten_tree(child)
return ret
def clean_stix_data(data):
""" Clean stix data from unwanted characters """
return data.replace("\n", "")\
.replace("{", "{{")\
.replace("}", "}}")\
.replace("”","\"")\
.replace("“","\"")
def get_citations(data):
""" Given a description, find all of the citations """
p = re.compile('\(Citation: (.*?)\)')
return p.findall(data)
def get_html_citation(citations, citation_name):
""" Given a citation name and the citation list, replace
the html link with the citation name and update the
list with the current number plus one
"""
global reference_marker_template
global reference_marker_template_no_url
citation = citations.get(citation_name)
reference_html = ""
if citation:
ref_number = None
if citation.get('number'):
ref_number = citation['number']
else:
ref_number = citations['current_number'] + 1
citations['current_number'] = ref_number
citation['number'] = ref_number
if not citation.get('url'):
reference_html = reference_marker_template_no_url.format(ref_number,ref_number,citation_name,ref_number)
else:
reference_html = reference_marker_template.format(ref_number,ref_number,citation_name,citation['url'],ref_number - 1, ref_number - 1, ref_number)
return reference_html
def update_citations(data, citations):
""" Given a data string and the citation list, update
citations with the citation names that are held in string
"""
citation_template = "(Citation: {})"
citation_names = get_citations(data)
for citation_name in citation_names:
replace_string = get_html_citation(citations, citation_name)
if replace_string:
data = data.replace(citation_template.format(citation_name), replace_string)
return data
def remove_citations(data):
""" Remove citations from strings """
# Get citations names to remove from string
citation_names = get_citations(data)
for citation_name in citation_names:
data = data.replace("(Citation: " + citation_name + ")","")
return data
def filter_urls(data):
"""Filters out URLs to return path and not domain"""
if not pelican_settings["no_stix_link_replacement"]:
if "https://attack.mitre.org/groups/" in data:
data = data.replace(
"https://attack.mitre.org/groups/", "/groups/")
if "https://attack.mitre.org/software/" in data:
data = data.replace(
"https://attack.mitre.org/software/", "/software/")
if "https://attack.mitre.org/techniques/" in data:
data = data.replace(
"https://attack.mitre.org/techniques/", "/techniques/")
if "https://attack.mitre.org/technique/" in data:
data = data.replace(
"https://attack.mitre.org/technique/", "/techniques/")
return data
def stixToHTML(data, citations, firstParagraphOnly):
""" Clean output of STIX content.
params:
data (required, string), the STIX description to format
citations (optional, object), if not None, add citation markers to the data.
firstParagraphOnly (optional, boolean), if true, only return the first paragraph of the data in question.
"""
# Replace data from markdown format
data = markdown.markdown(data)
# Replace url links
data = filter_urls(data)
# Get first paragraph from data
if firstParagraphOnly:
data = data.split('</p>')[0] + '</p>'
if data.startswith("<p>") and data.endswith("</p>"):
data = data[3:-4]
if citations:
# Update citations
data = update_citations(data, citations)
else:
# Remove citations
data = remove_citations(data)
data = clean_stix_data(data)
return data
current_version_permalink = None
def permalink(link):
"""convert from a link to a permalink of that link, e.g /x/y => /versions/v6/x/y
uses data/versions.json's current object to determine what the current version is for the permalink
"""
global current_version_permalink
# load the current version permalink
if not current_version_permalink:
with open(os.path.join("data", "versions.json"), "r", encoding='utf8') as f:
currentVersion = json.load(f)["current"]
current_version_permalink = currentVersion["path"] if "path" in currentVersion else currentVersion["name"].split(".")[0]
current_version_permalink = "/versions/" + current_version_permalink
# remove index.html from the end
link = link.split("index.html")[0] if link.endswith("index.html") else link
# strip index.html from path
return current_version_permalink + "/" + link
| 35.293194
| 157
| 0.612965
|
b3faf9b915462d3f0505f23278314f9f82ba3cd2
| 772
|
py
|
Python
|
habitacion_main/urls.py
|
RamirezJosue/habitacion-service
|
5e67e7401c5a1515856165d3879c9472d8f486d9
|
[
"MIT"
] | null | null | null |
habitacion_main/urls.py
|
RamirezJosue/habitacion-service
|
5e67e7401c5a1515856165d3879c9472d8f486d9
|
[
"MIT"
] | null | null | null |
habitacion_main/urls.py
|
RamirezJosue/habitacion-service
|
5e67e7401c5a1515856165d3879c9472d8f486d9
|
[
"MIT"
] | null | null | null |
"""habitacion_main URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| 35.090909
| 79
| 0.707254
|
0abdcdfd5207f83bcde2d6b9e5f082ea55ebbfeb
| 3,403
|
py
|
Python
|
intersight/models/storage_local_disk.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/storage_local_disk.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
intersight/models/storage_local_disk.py
|
gumpcraca/intersight-python
|
780e6703c739f329084beacbbf2ad7a6a2e59b2b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Intersight REST API
This is Intersight REST API
OpenAPI spec version: 1.0.9-255
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class StorageLocalDisk(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'slot_number': 'int'
}
attribute_map = {
'slot_number': 'SlotNumber'
}
def __init__(self, slot_number=None):
"""
StorageLocalDisk - a model defined in Swagger
"""
self._slot_number = None
if slot_number is not None:
self.slot_number = slot_number
@property
def slot_number(self):
"""
Gets the slot_number of this StorageLocalDisk.
Specifies the slot number of the disk to be referenced. As this is a policy object, this slot number may or may not be valid depending on the number of disks in the associated server
:return: The slot_number of this StorageLocalDisk.
:rtype: int
"""
return self._slot_number
@slot_number.setter
def slot_number(self, slot_number):
"""
Sets the slot_number of this StorageLocalDisk.
Specifies the slot number of the disk to be referenced. As this is a policy object, this slot number may or may not be valid depending on the number of disks in the associated server
:param slot_number: The slot_number of this StorageLocalDisk.
:type: int
"""
self._slot_number = slot_number
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, StorageLocalDisk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.007937
| 193
| 0.566559
|
85ffa2f1e82b35e8538c7ac057df575cce10608a
| 7,883
|
py
|
Python
|
wbb/__main__.py
|
Noobie-uwu/itsuki-bot
|
019a5b5c872f426ac0d8154c7a215ea7880727c9
|
[
"MIT"
] | null | null | null |
wbb/__main__.py
|
Noobie-uwu/itsuki-bot
|
019a5b5c872f426ac0d8154c7a215ea7880727c9
|
[
"MIT"
] | null | null | null |
wbb/__main__.py
|
Noobie-uwu/itsuki-bot
|
019a5b5c872f426ac0d8154c7a215ea7880727c9
|
[
"MIT"
] | 1
|
2022-02-03T08:01:00.000Z
|
2022-02-03T08:01:00.000Z
|
"""
MIT License
Copyright (c) 2021 TheHamkerCat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import importlib
import re
import uvloop
from pyrogram import filters, idle
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
from wbb import BOT_NAME, BOT_USERNAME, aiohttpsession, app
from wbb.modules import ALL_MODULES
from wbb.modules.sudoers import bot_sys_stats
from wbb.utils import paginate_modules
loop = asyncio.get_event_loop()
HELPABLE = {}
async def start_bot():
global COMMANDS_COUNT
for module in ALL_MODULES:
imported_module = importlib.import_module("wbb.modules." + module)
if (
hasattr(imported_module, "__MODULE__")
and imported_module.__MODULE__
):
imported_module.__MODULE__ = imported_module.__MODULE__
if (
hasattr(imported_module, "__HELP__")
and imported_module.__HELP__
):
HELPABLE[imported_module.__MODULE__.lower()] = imported_module
bot_modules = ""
j = 1
for i in ALL_MODULES:
if j == 4:
bot_modules += "|{:<15}|\n".format(i)
j = 0
else:
bot_modules += "|{:<15}".format(i)
j += 1
print("+===============================================================+")
print("| WBB |")
print("+===============+===============+===============+===============+")
print(bot_modules)
print("+===============+===============+===============+===============+")
print(f"[INFO]: BOT STARTED AS {BOT_NAME}!")
await idle()
await aiohttpsession.close()
@app.on_message(filters.command(["help", "start"]))
async def help_command(_, message):
if message.chat.type != "private":
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Help",
url=f"t.me/{BOT_USERNAME}?start=help",
),
InlineKeyboardButton(
text="Support", url="t.me/quintessential_support"
),
],
]
)
await message.reply("Yo Hello there!", reply_markup=keyboard)
return
keyboard = InlineKeyboardMarkup(
[
[
InlineKeyboardButton(
text="Commands", callback_data="bot_commands"
),
InlineKeyboardButton(
text="Repo 🛠",
url="https://github.com/thehamkercat/WilliamButcherBot",
),
],
[
InlineKeyboardButton(
text="System Stats 🖥", callback_data="stats_callback"
),
InlineKeyboardButton(text="Support", url="t.me/quintessential_support"),
],
[
InlineKeyboardButton(
text="Add Me To Your Group",
url=f"http://t.me/{BOT_USERNAME}?startgroup=new",
)
],
]
)
await message.reply(
f"Hey there! My name is Itsuki Nakano[.](https://telegra.ph/file/fcd62d551e72a68142733.jpg) I can manage your group with lots of useful features, feel free to add me to your group.",
reply_markup=keyboard,
)
async def help_parser(name, keyboard=None):
if not keyboard:
keyboard = InlineKeyboardMarkup(paginate_modules(0, HELPABLE, "help"))
return (
"""Hello {first_name}! My name is Itsuki Nakano!
I'm a group management bot with some usefule features.
You can choose an option below, by clicking a button.
Also you can ask anything in Support Group.
General command are:
- /start: Start the bot
- /help: Give this message""".format(
first_name=name,
bot_name=BOT_NAME,
),
keyboard,
)
@app.on_callback_query(filters.regex("bot_commands"))
async def commands_callbacc(_, CallbackQuery):
text, keyboard = await help_parser(CallbackQuery.from_user.mention)
await app.send_message(
CallbackQuery.message.chat.id, text=text, reply_markup=keyboard
)
await CallbackQuery.message.delete()
@app.on_callback_query(filters.regex("stats_callback"))
async def stats_callbacc(_, CallbackQuery):
text = await bot_sys_stats()
await app.answer_callback_query(CallbackQuery.id, text, show_alert=True)
@app.on_callback_query(filters.regex(r"help_(.*?)"))
async def help_button(client, query):
mod_match = re.match(r"help_module\((.+?)\)", query.data)
prev_match = re.match(r"help_prev\((.+?)\)", query.data)
next_match = re.match(r"help_next\((.+?)\)", query.data)
back_match = re.match(r"help_back", query.data)
create_match = re.match(r"help_create", query.data)
top_text = f"""
Hello {query.from_user.first_name}! My name is {BOT_NAME}!
I'm a group management bot with some usefule features.
You can choose an option below, by clicking a button.
Also you can ask anything in Support Group.
General command are:
- /start: Start the bot
- /help: Give this message
"""
if mod_match:
module = mod_match.group(1)
text = (
"{} **{}**:\n".format(
"Here is the help for", HELPABLE[module].__MODULE__
)
+ HELPABLE[module].__HELP__
)
await query.message.edit(
text=text,
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("back", callback_data="help_back")]]
),
disable_web_page_preview=True,
)
elif prev_match:
curr_page = int(prev_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(curr_page - 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif next_match:
next_page = int(next_match.group(1))
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(next_page + 1, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif back_match:
await query.message.edit(
text=top_text,
reply_markup=InlineKeyboardMarkup(
paginate_modules(0, HELPABLE, "help")
),
disable_web_page_preview=True,
)
elif create_match:
text, keyboard = await help_parser(query)
await query.message.edit(
text=text, reply_markup=keyboard, disable_web_page_preview=True
)
return await client.answer_callback_query(query.id)
if __name__ == "__main__":
uvloop.install()
loop.run_until_complete(start_bot())
| 33.832618
| 190
| 0.60104
|
1a2a0a5ce7fdc44f8f8b3f74bd19927cc6a9331f
| 577
|
py
|
Python
|
ch02/question_5.py
|
dhrey112/IntroToPython_Deitel
|
7ecbff931d05c467ad64da0bd829f79fedf729ba
|
[
"MIT"
] | null | null | null |
ch02/question_5.py
|
dhrey112/IntroToPython_Deitel
|
7ecbff931d05c467ad64da0bd829f79fedf729ba
|
[
"MIT"
] | null | null | null |
ch02/question_5.py
|
dhrey112/IntroToPython_Deitel
|
7ecbff931d05c467ad64da0bd829f79fedf729ba
|
[
"MIT"
] | null | null | null |
# TODO: 5. (Circle Area, Diameter and Circumference) For a circle of radius 2,
# display the diameter, circumference and area. Use the value 3.14159 for π.
# Use the following formulas (r is the radius): diameter = 2r, circumference
# = 2πr and area = πr . [In a later chapter, we’ll introduce Python’s math
# module which contains a higher-precision representation of π.]
pi = 3.14159
radius = 2
diameter = 2 * radius
circumference = 2 * radius * pi
area = pi * (radius ** 2)
print('Diameter =', diameter)
print('Circumference =', circumference)
print('Area =', area)
| 32.055556
| 78
| 0.707106
|
bd5334171211e832df27ed93c383fb929905b971
| 16,290
|
py
|
Python
|
tests/components/homekit/test_type_media_players.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 2
|
2021-09-13T21:44:02.000Z
|
2021-12-17T21:20:51.000Z
|
tests/components/homekit/test_type_media_players.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 5
|
2021-02-08T20:55:25.000Z
|
2022-03-12T00:51:18.000Z
|
tests/components/homekit/test_type_media_players.py
|
twrecked/core
|
d3ae8a938cdea9b6e0d443c91c37ac3dbbd459ab
|
[
"Apache-2.0"
] | 2
|
2020-11-04T07:40:01.000Z
|
2021-09-13T21:44:03.000Z
|
"""Test different accessory types: Media Players."""
from homeassistant.components.homekit.const import (
ATTR_VALUE,
CONF_FEATURE_LIST,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
)
from homeassistant.components.homekit.type_media_players import (
MediaPlayer,
TelevisionMediaPlayer,
)
from homeassistant.components.media_player import DEVICE_CLASS_TV
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN,
)
from homeassistant.const import (
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
EVENT_HOMEASSISTANT_START,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
)
from homeassistant.core import CoreState
from homeassistant.helpers import entity_registry
from tests.common import async_mock_service
async def test_media_player_set_state(hass, hk_driver, events):
"""Test if accessory and HA are updated accordingly."""
config = {
CONF_FEATURE_LIST: {
FEATURE_ON_OFF: None,
FEATURE_PLAY_PAUSE: None,
FEATURE_PLAY_STOP: None,
FEATURE_TOGGLE_MUTE: None,
}
}
entity_id = "media_player.test"
hass.states.async_set(
entity_id,
None,
{ATTR_SUPPORTED_FEATURES: 20873, ATTR_MEDIA_VOLUME_MUTED: False},
)
await hass.async_block_till_done()
acc = MediaPlayer(hass, hk_driver, "MediaPlayer", entity_id, 2, config)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 8 # Switch
assert acc.chars[FEATURE_ON_OFF].value is False
assert acc.chars[FEATURE_PLAY_PAUSE].value is False
assert acc.chars[FEATURE_PLAY_STOP].value is False
assert acc.chars[FEATURE_TOGGLE_MUTE].value is False
hass.states.async_set(entity_id, STATE_ON, {ATTR_MEDIA_VOLUME_MUTED: True})
await hass.async_block_till_done()
assert acc.chars[FEATURE_ON_OFF].value is True
assert acc.chars[FEATURE_TOGGLE_MUTE].value is True
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
assert acc.chars[FEATURE_ON_OFF].value is False
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
assert acc.chars[FEATURE_ON_OFF].value is True
hass.states.async_set(entity_id, STATE_STANDBY)
await hass.async_block_till_done()
assert acc.chars[FEATURE_ON_OFF].value is False
hass.states.async_set(entity_id, STATE_PLAYING)
await hass.async_block_till_done()
assert acc.chars[FEATURE_PLAY_PAUSE].value is True
assert acc.chars[FEATURE_PLAY_STOP].value is True
hass.states.async_set(entity_id, STATE_PAUSED)
await hass.async_block_till_done()
assert acc.chars[FEATURE_PLAY_PAUSE].value is False
hass.states.async_set(entity_id, STATE_IDLE)
await hass.async_block_till_done()
assert acc.chars[FEATURE_PLAY_STOP].value is False
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
call_media_play = async_mock_service(hass, DOMAIN, "media_play")
call_media_pause = async_mock_service(hass, DOMAIN, "media_pause")
call_media_stop = async_mock_service(hass, DOMAIN, "media_stop")
call_toggle_mute = async_mock_service(hass, DOMAIN, "volume_mute")
await hass.async_add_executor_job(
acc.chars[FEATURE_ON_OFF].client_update_value, True
)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_ON_OFF].client_update_value, False
)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_PLAY_PAUSE].client_update_value, True
)
await hass.async_block_till_done()
assert call_media_play
assert call_media_play[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_PLAY_PAUSE].client_update_value, False
)
await hass.async_block_till_done()
assert call_media_pause
assert call_media_pause[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 4
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_PLAY_STOP].client_update_value, True
)
await hass.async_block_till_done()
assert call_media_play
assert call_media_play[1].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 5
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_PLAY_STOP].client_update_value, False
)
await hass.async_block_till_done()
assert call_media_stop
assert call_media_stop[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 6
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_TOGGLE_MUTE].client_update_value, True
)
await hass.async_block_till_done()
assert call_toggle_mute
assert call_toggle_mute[0].data[ATTR_ENTITY_ID] == entity_id
assert call_toggle_mute[0].data[ATTR_MEDIA_VOLUME_MUTED] is True
assert len(events) == 7
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(
acc.chars[FEATURE_TOGGLE_MUTE].client_update_value, False
)
await hass.async_block_till_done()
assert call_toggle_mute
assert call_toggle_mute[1].data[ATTR_ENTITY_ID] == entity_id
assert call_toggle_mute[1].data[ATTR_MEDIA_VOLUME_MUTED] is False
assert len(events) == 8
assert events[-1].data[ATTR_VALUE] is None
async def test_media_player_television(hass, hk_driver, events, caplog):
"""Test if television accessory and HA are updated accordingly."""
entity_id = "media_player.television"
# Supports 'select_source', 'volume_step', 'turn_on', 'turn_off',
# 'volume_mute', 'volume_set', 'pause'
hass.states.async_set(
entity_id,
None,
{
ATTR_DEVICE_CLASS: DEVICE_CLASS_TV,
ATTR_SUPPORTED_FEATURES: 3469,
ATTR_MEDIA_VOLUME_MUTED: False,
ATTR_INPUT_SOURCE_LIST: ["HDMI 1", "HDMI 2", "HDMI 3", "HDMI 4"],
},
)
await hass.async_block_till_done()
acc = TelevisionMediaPlayer(hass, hk_driver, "MediaPlayer", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 31 # Television
assert acc.char_active.value == 0
assert acc.char_remote_key.value == 0
assert acc.char_input_source.value == 0
assert acc.char_mute.value is False
hass.states.async_set(entity_id, STATE_ON, {ATTR_MEDIA_VOLUME_MUTED: True})
await hass.async_block_till_done()
assert acc.char_active.value == 1
assert acc.char_mute.value is True
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_set(entity_id, STATE_ON)
await hass.async_block_till_done()
assert acc.char_active.value == 1
hass.states.async_set(entity_id, STATE_STANDBY)
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_INPUT_SOURCE: "HDMI 2"})
await hass.async_block_till_done()
assert acc.char_input_source.value == 1
hass.states.async_set(entity_id, STATE_ON, {ATTR_INPUT_SOURCE: "HDMI 3"})
await hass.async_block_till_done()
assert acc.char_input_source.value == 2
hass.states.async_set(entity_id, STATE_ON, {ATTR_INPUT_SOURCE: "HDMI 5"})
await hass.async_block_till_done()
assert acc.char_input_source.value == 0
assert caplog.records[-2].levelname == "WARNING"
# Set from HomeKit
call_turn_on = async_mock_service(hass, DOMAIN, "turn_on")
call_turn_off = async_mock_service(hass, DOMAIN, "turn_off")
call_media_play = async_mock_service(hass, DOMAIN, "media_play")
call_media_pause = async_mock_service(hass, DOMAIN, "media_pause")
call_media_play_pause = async_mock_service(hass, DOMAIN, "media_play_pause")
call_toggle_mute = async_mock_service(hass, DOMAIN, "volume_mute")
call_select_source = async_mock_service(hass, DOMAIN, "select_source")
call_volume_up = async_mock_service(hass, DOMAIN, "volume_up")
call_volume_down = async_mock_service(hass, DOMAIN, "volume_down")
call_volume_set = async_mock_service(hass, DOMAIN, "volume_set")
await hass.async_add_executor_job(acc.char_active.client_update_value, 1)
await hass.async_block_till_done()
assert call_turn_on
assert call_turn_on[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_active.client_update_value, 0)
await hass.async_block_till_done()
assert call_turn_off
assert call_turn_off[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_remote_key.client_update_value, 11)
await hass.async_block_till_done()
assert call_media_play_pause
assert call_media_play_pause[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] is None
hass.states.async_set(entity_id, STATE_PLAYING)
await hass.async_block_till_done()
await hass.async_add_executor_job(acc.char_remote_key.client_update_value, 11)
await hass.async_block_till_done()
assert call_media_pause
assert call_media_pause[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 4
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_remote_key.client_update_value, 10)
await hass.async_block_till_done()
assert len(events) == 4
assert events[-1].data[ATTR_VALUE] is None
hass.states.async_set(entity_id, STATE_PAUSED)
await hass.async_block_till_done()
await hass.async_add_executor_job(acc.char_remote_key.client_update_value, 11)
await hass.async_block_till_done()
assert call_media_play
assert call_media_play[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 5
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_mute.client_update_value, True)
await hass.async_block_till_done()
assert call_toggle_mute
assert call_toggle_mute[0].data[ATTR_ENTITY_ID] == entity_id
assert call_toggle_mute[0].data[ATTR_MEDIA_VOLUME_MUTED] is True
assert len(events) == 6
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_mute.client_update_value, False)
await hass.async_block_till_done()
assert call_toggle_mute
assert call_toggle_mute[1].data[ATTR_ENTITY_ID] == entity_id
assert call_toggle_mute[1].data[ATTR_MEDIA_VOLUME_MUTED] is False
assert len(events) == 7
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_input_source.client_update_value, 1)
await hass.async_block_till_done()
assert call_select_source
assert call_select_source[0].data[ATTR_ENTITY_ID] == entity_id
assert call_select_source[0].data[ATTR_INPUT_SOURCE] == "HDMI 2"
assert len(events) == 8
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_volume_selector.client_update_value, 0)
await hass.async_block_till_done()
assert call_volume_up
assert call_volume_up[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 9
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_volume_selector.client_update_value, 1)
await hass.async_block_till_done()
assert call_volume_down
assert call_volume_down[0].data[ATTR_ENTITY_ID] == entity_id
assert len(events) == 10
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_volume.client_update_value, 20)
await hass.async_block_till_done()
assert call_volume_set[0]
assert call_volume_set[0].data[ATTR_ENTITY_ID] == entity_id
assert call_volume_set[0].data[ATTR_MEDIA_VOLUME_LEVEL] == 20
assert len(events) == 11
assert events[-1].data[ATTR_VALUE] is None
async def test_media_player_television_basic(hass, hk_driver, events, caplog):
"""Test if basic television accessory and HA are updated accordingly."""
entity_id = "media_player.television"
# Supports turn_on', 'turn_off'
hass.states.async_set(
entity_id,
None,
{ATTR_DEVICE_CLASS: DEVICE_CLASS_TV, ATTR_SUPPORTED_FEATURES: 384},
)
await hass.async_block_till_done()
acc = TelevisionMediaPlayer(hass, hk_driver, "MediaPlayer", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.chars_tv == []
assert acc.chars_speaker == []
assert acc.support_select_source is False
hass.states.async_set(entity_id, STATE_ON, {ATTR_MEDIA_VOLUME_MUTED: True})
await hass.async_block_till_done()
assert acc.char_active.value == 1
hass.states.async_set(entity_id, STATE_OFF)
await hass.async_block_till_done()
assert acc.char_active.value == 0
hass.states.async_set(entity_id, STATE_ON, {ATTR_INPUT_SOURCE: "HDMI 3"})
await hass.async_block_till_done()
assert acc.char_active.value == 1
assert not caplog.messages or "Error" not in caplog.messages[-1]
async def test_media_player_television_supports_source_select_no_sources(
hass, hk_driver, events, caplog
):
"""Test if basic tv that supports source select but is missing a source list."""
entity_id = "media_player.television"
# Supports turn_on', 'turn_off'
hass.states.async_set(
entity_id,
None,
{ATTR_DEVICE_CLASS: DEVICE_CLASS_TV, ATTR_SUPPORTED_FEATURES: 3469},
)
await hass.async_block_till_done()
acc = TelevisionMediaPlayer(hass, hk_driver, "MediaPlayer", entity_id, 2, None)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.support_select_source is False
async def test_tv_restore(hass, hk_driver, events):
"""Test setting up an entity from state in the event registry."""
hass.state = CoreState.not_running
registry = await entity_registry.async_get_registry(hass)
registry.async_get_or_create(
"media_player",
"generic",
"1234",
suggested_object_id="simple",
device_class=DEVICE_CLASS_TV,
)
registry.async_get_or_create(
"media_player",
"generic",
"9012",
suggested_object_id="all_info_set",
capabilities={
ATTR_INPUT_SOURCE_LIST: ["HDMI 1", "HDMI 2", "HDMI 3", "HDMI 4"],
},
supported_features=3469,
device_class=DEVICE_CLASS_TV,
)
hass.bus.async_fire(EVENT_HOMEASSISTANT_START, {})
await hass.async_block_till_done()
acc = TelevisionMediaPlayer(
hass, hk_driver, "MediaPlayer", "media_player.simple", 2, None
)
assert acc.category == 31
assert acc.chars_tv == []
assert acc.chars_speaker == []
assert acc.support_select_source is False
assert not hasattr(acc, "char_input_source")
acc = TelevisionMediaPlayer(
hass, hk_driver, "MediaPlayer", "media_player.all_info_set", 2, None
)
assert acc.category == 31
assert acc.chars_tv == ["RemoteKey"]
assert acc.chars_speaker == [
"Name",
"Active",
"VolumeControlType",
"VolumeSelector",
"Volume",
]
assert acc.support_select_source is True
assert acc.char_input_source is not None
| 36.119734
| 86
| 0.728668
|
96a110795685bc9ebd8d91200db74ff19c4486ec
| 1,133
|
py
|
Python
|
setup.py
|
armgilles/jitenshea
|
65c04dfda5b10e93d7f0674e96bfa91ed42d0755
|
[
"MIT"
] | null | null | null |
setup.py
|
armgilles/jitenshea
|
65c04dfda5b10e93d7f0674e96bfa91ed42d0755
|
[
"MIT"
] | null | null | null |
setup.py
|
armgilles/jitenshea
|
65c04dfda5b10e93d7f0674e96bfa91ed42d0755
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import setuptools
with open("README.md") as fobj:
LONG_DESCRIPTION = fobj.read()
INSTALL_REQUIRES = ["pandas", "requests", "psycopg2-binary", "luigi", 'sqlalchemy',
'lxml', 'xgboost', 'daiquiri', 'flask-restplus', 'sh',
'scikit-learn', 'tables']
setuptools.setup(
name='jitenshea',
version='0.1',
license='BSD',
url='https://github.com/garaud/jitenshea',
packages=setuptools.find_packages(),
include_package_data=True,
install_requires=INSTALL_REQUIRES,
extras_require={'dev': ['pytest', 'pytest-sugar', 'ipython', 'ipdb']},
author="Damien Garaud",
author_email='damien.garaud@gmail.com',
description="Bicycle-sharing data analysis",
long_description=LONG_DESCRIPTION,
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Scientific/Engineering",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
]
)
| 29.815789
| 83
| 0.622242
|
036a47d8cb38e5cb916e0b23dd2ea6071f8dffc7
| 16,683
|
py
|
Python
|
analysis/dimensionalty_sim/neurons.py
|
htem/cb2_project_analysis
|
a677cbadc7e3bf0074975a94ed1d06b4801899c0
|
[
"MIT"
] | null | null | null |
analysis/dimensionalty_sim/neurons.py
|
htem/cb2_project_analysis
|
a677cbadc7e3bf0074975a94ed1d06b4801899c0
|
[
"MIT"
] | null | null | null |
analysis/dimensionalty_sim/neurons.py
|
htem/cb2_project_analysis
|
a677cbadc7e3bf0074975a94ed1d06b4801899c0
|
[
"MIT"
] | null | null | null |
import random
import math
import copy
import numpy as np
import logging
import collections
logger = logging.getLogger(__name__)
class GranuleCell():
def __init__(
self,
# num_mfs,
# num_dendrite,
claws,
claw_weights=None,
# act_threshold,
# grc_act_on_failure_rate,
# grc_act_off_failure_rate,
# max_weight=255,
# act_lv=0.01,
):
self.activations = []
self.inputs = []
# self.act_threshold = act_threshold
# self.max_weight = max_weight
# self.output_weight = int(max_weight/2)
# self.grc_act_on_failure_rate = grc_act_on_failure_rate
# self.grc_act_off_failure_rate = grc_act_off_failure_rate
# if act_threshold < 1:
# act_threshold = act_threshold*num_dendrite
# self.act_threshold = act_threshold
self.claws = claws
# while len(self.claws) < num_dendrite:
# mf_id = random.randint(0, num_mfs-1)
# if mf_id not in self.claws:
# self.claws.append(mf_id)
# self.claws.sort()
# print(self.claws)
self.claws.sort()
if claw_weights:
self.claw_weights = claw_weights
assert False, "Untested"
else:
self.claw_weights = [1]*len(self.claws)
self.activated = False
self.act_lv_scale = 1
self.broken = False
# self.act_lv = act_lv
def activate(
self, pattern,
# grc_act_off_failure_rate=None,
):
if self.broken:
self.activations.append(0)
self.activated = False
return False
sum = 0.0
for i, claw in enumerate(self.claws):
sum += pattern[claw]
self.inputs.append(sum)
if sum >= self.act_lv_scale:
activated = True
self.activations.append(1)
else:
activated = False
self.activations.append(0)
self.activated = activated
return activated
def train(self, input_mfs, output):
act = False
act = self.activate(input_mfs, grc_act_off_failure_rate=0)
if act:
if output:
self.output_weight = min(
self.output_weight+1, self.max_weight)
else:
self.output_weight = max(
self.output_weight-1, 0)
def reset(self):
self.inputs = []
self.activations = []
def calibrate_activation_level(self, act_lv):
self.inputs.sort()
idx = int((1-act_lv)*len(self.inputs))
if self.inputs[idx] == 0:
# print(self.inputs)
print(self.claws)
# scale = 1.0 / self.inputs[idx]
# self.act_lv_scale = scale
self.act_lv_scale = self.inputs[idx]
class MossyFiber():
def __init__(self, mf_id):
self.mf_id = mf_id
self.activations = []
pass
def reset(self):
self.activations = []
def activate(self, pattern):
self.activations.append(pattern[self.mf_id])
class Simulation():
def __init__(
self,
input_graph,
# num_grc=None,
# num_mfs=None,
# num_dendrite=None,
# grc_act_threshold=None,
# grc_act_on_failure_rate=0,
# grc_act_off_failure_rate=0,
# max_synapse_weight=255,
# min_train_it=15000,
min_eval_it=5000,
# default_input_noise=0.05,
# default_decoder_error_margin=0.10,
# n_evaluate_sampling=1,
# evaluate_sampling_majority=False,
):
self.num_mfs = len(input_graph.mfs)
self.num_grcs = len(input_graph.grcs)
self.min_eval_it = min_eval_it
self.init_mfs()
self.init_grcs(input_graph)
self.failure_rate = None
def reset(self):
for grc in self.grcs:
grc.reset()
for mf in self.mfs:
mf.reset()
# random.seed(0)
def init_mfs(self):
self.mfs = []
for i in range(self.num_mfs):
self.mfs.append(MossyFiber(mf_id=i))
def init_grcs(self, input_graph):
self.grcs = []
mapping = {}
counter = 0
for mf_id, mf in input_graph.mfs.items():
mapping[mf_id] = counter
counter += 1
for grc_id, grc in input_graph.grcs.items():
claws = [mapping[mf_id] for mf_id, _ in grc.edges]
self.grcs.append(
GranuleCell(
claws=claws,
)
)
def set_failure_rate(self, failure_rate, seed):
random.seed(seed)
for grc in self.grcs:
grc.broken = True if random.random() < failure_rate else False
def generate_patterns(
self,
count,
type='random',
# independent_noise=0,
):
patterns = []
# outputs = []
pattern_len = self.num_mfs
for i in range(count):
if type == 'random':
b = [None]*pattern_len
for k in range(pattern_len):
b[k] = random.random()
elif type == 'gaussian':
mu, sigma = 0.5, 0.2 # mean and standard deviation
b = np.random.normal(mu, sigma, pattern_len)
output = random.randint(0, 1)
# outputs.append(output)
patterns.append((b, output))
return patterns
def add_input_noise(cls, pattern, input_noise, scaled_noise=False):
if input_noise > 0:
pattern = copy.deepcopy(pattern)
if scaled_noise:
p0 = 1-input_noise
for i in range(len(pattern)):
r = random.random()
pattern[i] = pattern[i]*p0 + r*input_noise
else:
for i in range(len(pattern)):
if random.random() < input_noise:
pattern[i] = random.random()
return pattern
def train(
self,
patterns,
n_iteration=None,
# input_noise=None,
seed=0
):
if n_iteration is None:
n_iteration = len(patterns)*10
# if n_iteration < self.min_train_it:
# n_iteration = self.min_train_it
# if input_noise is None:
# input_noise = self.default_input_noise
# stats
activated_grcs = 0
random.seed(seed)
for i in range(n_iteration):
# print(patterns[random.randint(0, len(patterns)-1)])
ind = random.randint(0, len(patterns)-1)
# print(ind)
# print(patterns[ind])
pattern, output = patterns[ind]
pattern = self.add_input_noise(pattern, input_noise)
for grc in self.grcs:
grc.train(pattern, output)
if grc.activated:
activated_grcs += 1
# if i % 1000 == 0:
# print(f'{i}..')
activated_grcs_level = activated_grcs / len(self.grcs) / n_iteration
logger.debug(f'activated_grcs_level: {activated_grcs_level} ({activated_grcs / n_iteration} grcs out of {len(self.grcs)})')
def encode(self, input_pattern, out_array=None):
if out_array is None:
out_array = np.empty(len(self.grcs), dtype=np.uint8)
for i, grc in enumerate(self.grcs):
if grc.activate(input_pattern):
out_array[i] = 1
else:
out_array[i] = 0
return out_array
def evaluate(
self,
patterns,
n_iteration=None,
no_random=False,
# input_noise=None,
# decoder_error_margin=None,
seed=0,
calibrate_activation_level=False,
# output_act_lv=False,
):
if n_iteration is None:
n_iteration = 10*len(patterns)
n_iteration = max(self.min_eval_it, n_iteration)
if no_random:
n_iteration = len(patterns)
self.reset()
# for grc in self.grcs[0:20]:
# print(f'len: {len(grc.claws)}, scale: {grc.act_lv_scale:.2f}')
random.seed(seed)
for i in range(n_iteration):
if no_random:
pattern, output = patterns[i]
else:
pattern, output = patterns[random.randint(0, len(patterns)-1)]
self.set_mfs_pattern(pattern)
for grc in self.grcs:
act = grc.activate(pattern)
if calibrate_activation_level is not False:
self.calibrate_grc_activation_level(calibrate_activation_level)
return
def print_grc_weights(self, count=200):
weights = []
for i, grc in enumerate(self.grcs):
weights.append(grc.output_weight)
if i > count:
break
print(weights)
def set_mfs_pattern(self, pattern):
for mf in self.mfs:
mf.activate(pattern)
def get_mfs_activities(self):
# ret = []
# for mf in self.mfs:
# ret.append(mf.activations)
for mf in self.mfs:
xlen = len(self.mfs)
ylen = len(mf.activations)
break
ret = np.empty((ylen, xlen), dtype=np.float32)
for i, mf in enumerate(self.mfs):
for j, val in enumerate(mf.activations):
ret[j][i] = val
return ret
def get_grc_activities(self):
# ret = []
# for grc in self.grcs:
# ret.append(grc.activations)
# return ret
for mf in self.grcs:
xlen = len(self.grcs)
ylen = len(mf.activations)
break
ret = np.empty((ylen, xlen), dtype=np.uint8)
for i, mf in enumerate(self.grcs):
for j, val in enumerate(mf.activations):
ret[j][i] = val
return ret
def calibrate_grc_activation_level(self, act_lv=None):
if act_lv is None:
act_lv = self.act_lv
for grc in self.grcs:
grc.calibrate_activation_level(act_lv)
def add_noise_patterns(
self, patterns, prob, n, seed=None, scaled_noise=False):
if seed is not None:
random.seed(seed)
out_arr = []
for pattern_output in patterns:
# print(pattern_output)
pattern, output = pattern_output
for i in range(n):
new_pattern = self.add_input_noise(pattern, prob, scaled_noise)
out_arr.append((new_pattern, output))
return out_arr
def print_grc_act_lv_scale(self):
# scales = []
# for grc in self.grcs:
# scales.append(grc.act_lv_scale)
# print(scales)
print([grc.act_lv_scale for grc in self.grcs])
from collections import defaultdict
import itertools
def count_redundancy(g):
pos = 0
grcs_claws = []
mf_to_grcs = defaultdict(set)
for grc_id, dendrite_count in enumerate(g.dendrite_counts):
claws = []
for j in range(dendrite_count):
mf_id = g.dendrite_mf_map[pos]
pos += 1
claws.append(mf_id)
mf_to_grcs[mf_id].add(grc_id)
grcs_claws.append(set(claws))
nshares = defaultdict(int)
for mf_id, grcs in mf_to_grcs.items():
for pair in itertools.combinations(grcs, 2):
nshare = len(grcs_claws[pair[0]] & grcs_claws[pair[1]])
nshares[nshare] += 1
for n in sorted(nshares.keys()):
print(f'{n}: {nshares[n]/len(g.dendrite_counts)}')
# count_redundancy(sim_lite)
def generate_binary_patterns(pattern_len, count, f):
patterns = []
# np.random.seed(seed)
# random.seed(seed)
threshold = int(pattern_len*f+0.5)
base = np.zeros(pattern_len, dtype=np.uint8)
base[0:threshold] = 1
for i in range(count):
np.random.shuffle(base)
b = base.copy()
output = random.randint(0, 1)
patterns.append((b, output))
return patterns
# def add_input_noise(pattern, input_noise):
# if input_noise > 0:
# pattern = copy.deepcopy(pattern)
# for i in range(len(pattern)):
# if random.random() < input_noise:
# # pattern[i] = not pattern[i]
# pattern[i] = random.random()
# return pattern
# def add_noise_binary_patterns(pattern, prob, f=None, n=1, seed=0):
# if f is None:
# f = pattern.sum() / len(pattern)
# ones = []
# zeros = []
# for i, b in enumerate(pattern):
# if b:
# ones.append(i)
# else:
# zeros.append(i)
# ones = np.array(ones, dtype=np.uint32)
# zeros = np.array(zeros, dtype=np.uint32)
# ret = []
# num_flips = int(prob*f*len(pattern)+.5)
# for i in range(n):
# new_pat = pattern.copy()
# np.random.shuffle(ones)
# for j in range(num_flips):
# new_pat[ones[j]] = 0
# np.random.shuffle(zeros)
# for j in range(num_flips):
# new_pat[zeros[j]] = 1
# ret.append(new_pat)
# return ret
def add_noise_binary_patterns(pattern, prob, f=None, n=1, seed=0):
if f is None:
f = pattern.sum() / len(pattern)
ret = []
for i in range(n):
noisy_pattern = copy.deepcopy(pattern)
for i in range(len(noisy_pattern)):
if random.random() < prob:
r = random.random()
if r < f:
noisy_pattern[i] = 1
else:
noisy_pattern[i] = 0
ret.append(noisy_pattern)
return ret
def generate_random_pattern(pattern_len, type='random'):
b = [None]*pattern_len
for k in range(pattern_len):
b[k] = random.random()
return b
def make_noisy_patterns_float(
patterns, prob, n, seed=None, scaled_noise=False, signal_mask=None):
if signal_mask:
assert not scaled_noise
if seed is not None:
random.seed(seed)
out_arr = []
for pattern_output in patterns:
# print(pattern_output)
pattern, output = pattern_output
for i in range(n):
new_pattern = add_input_noise_float(pattern, prob, scaled_noise, signal_mask)
out_arr.append((new_pattern, output))
return out_arr
def add_input_noise_float(
pattern, input_noise, scaled_noise=False, signal_mask=None):
if input_noise > 0:
pattern = copy.deepcopy(pattern)
if scaled_noise:
p0 = 1-input_noise
for i in range(len(pattern)):
r = random.random()
pattern[i] = pattern[i]*p0 + r*input_noise
elif signal_mask:
for i in range(len(pattern)):
if not signal_mask[i]:
if random.random() < input_noise:
pattern[i] = random.random()
else:
for i in range(len(pattern)):
if random.random() < input_noise:
pattern[i] = random.random()
return pattern
def add_noise_to_core_patterns(
patterns, prob, n,
seed=None):
if seed:
random.seed(seed)
np.random.seed(seed)
out_arr = []
pattern_len = len(patterns[0][0])
assert pattern_len <= 65535
noise_mask_len = int(prob*pattern_len+0.5)
noise_mask = np.zeros(pattern_len, dtype=np.uint8)
noise_mask[0:noise_mask_len] = 1
for pattern_output in patterns:
pattern, output = pattern_output
np.random.shuffle(noise_mask)
for i in range(n):
random_pat = copy.deepcopy(pattern)
for j in range(pattern_len):
if noise_mask[j]:
random_pat[j] = random.random()
out_arr.append((random_pat, output))
return out_arr
# def add_noise_to_core_patterns(
# patterns, prob, n,
# seed=0):
# random.seed(seed)
# np.random.seed(seed)
# out_arr = []
# pattern_len = len(patterns[0][0])
# core_len = int(pattern_len*(1-prob))
# random_core_indices = [k for k in range(pattern_len)]
# assert pattern_len <= 65535
# random_core_indices = np.array(random_core_indices, dtype=np.uint16)
# for pattern_output in patterns:
# pattern, output = pattern_output
# np.random.shuffle(random_core_indices)
# for i in range(n):
# random_pat = generate_random_pattern(pattern_len)
# for j in range(core_len):
# random_pat[random_core_indices[j]] = pattern[j]
# out_arr.append((random_pat, output))
# return out_arr
| 30.837338
| 131
| 0.556195
|
b18b95973d20e571990f73a94bf956ca8e68f980
| 3,614
|
py
|
Python
|
lec22/step5.py
|
christophernhill-dev/fall-2021-12.010
|
5b417379d436896c6c20886780a93a4876797a1c
|
[
"MIT"
] | 2
|
2021-09-09T19:50:06.000Z
|
2021-09-16T19:09:02.000Z
|
lec22/step5.py
|
christophernhill-dev/fall-2021-12.010
|
5b417379d436896c6c20886780a93a4876797a1c
|
[
"MIT"
] | null | null | null |
lec22/step5.py
|
christophernhill-dev/fall-2021-12.010
|
5b417379d436896c6c20886780a93a4876797a1c
|
[
"MIT"
] | 8
|
2021-12-02T02:15:45.000Z
|
2021-12-02T20:30:39.000Z
|
#!/usr/bin/env python
#
# From N processes, each with their own id (rank), use MPI library to report
# each process rank, total number of processes and the name of the host machine
# on which the process resides.
#
from mpi4py import MPI
import numpy as np
import matplotlib.pyplot as plt
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
host = MPI.Get_processor_name()
print('I am rank','%4d'%(rank),'of',size,'executing on',host)
#
# Have all the processes send a message to the
# one lower rank process, except for rank 0 which
# sends to the highest rank process.
#
send_message='Hello from rank %d'%(rank)
msg_dest=rank-1
msg_srce=rank+1
if rank == 0:
msg_dest=size-1
if rank == size-1:
msg_srce=0
comm.send(send_message,dest=msg_dest)
recv_message=comm.recv(source=msg_srce)
print( 'I just sent "%s" to %4d'%(send_message,msg_dest) )
print( 'I just received "%s" from %4d'%(recv_message,msg_srce) )
#
# Lets divide a 1d array of larr mesh points across the ranks,
# such that each rank has matching number of points within
# 1. Each divided part has nh halo points at each end that hold
# copies of points from its neighbors (rank-1 and rank+1).
#
nh=2 # halo width
larr=10000
larr=size*100
rem=larr%size
nbase=int(larr/size)
if rank < rem:
myl=nbase+1
mygstart=rank*(myl)
mygend=mygstart+myl-1
else:
myl=nbase
mygstart=rank*(myl)+rem
mygend=mygstart+myl-1
total_len=comm.allreduce(myl,op=MPI.SUM)
print( 'Rank %4d local array section length %d'%(rank,myl) )
print( 'Rank %4d total array length %d'%(rank,total_len) )
mysec=np.zeros(myl+nh*2)
i0loh_indices=[*range(0,nh)]
i0hih_indices=[*range(myl+nh,myl+2*nh)]
i0sec_indices=[*range(nh,myl+nh)]
comm.Barrier()
print( 'Rank %4d local section is %6d to %6d'%(rank,mygstart,mygend) )
#
# Each rank writes rank number to the local sections of mysec
# and then sends and receives updates from neighbors
#
if nh > myl:
print('ERROR: nh must be less than myl')
mysec[i0sec_indices]=rank
i0rm1=rank-1
i0rp1=rank+1
if rank == 0:
i0rm1=size-1
if rank == size-1:
i0rp1=0
# Send to my n-1 side and receive from my n+1 side
comm.send(mysec[i0sec_indices][0:nh],dest=i0rm1)
mysec[i0hih_indices]=comm.recv(source=i0rp1)
# Send to my n+1 side and receive from my n-1 side
comm.send(mysec[i0sec_indices][-nh:],dest=i0rp1)
mysec[i0loh_indices]=comm.recv(source=i0rm1)
#
# Now lets check what is in each section
#
print('Rank %4d mysec values ='%(rank),mysec)
def halo(fld,isec,im1,ip1,ihi,ilo,nh):
comm.send(fld[isec][0:nh],dest=im1)
fld[ihi]=comm.recv(source=ip1)
comm.send(fld[isec][-nh:],dest=ip1)
fld[ilo]=comm.recv(source=im1)
#
# OK now lets start thinking about a numerical problem
# - we will use several variables
# 1. field values and the first and second derivatives in x
# 2. grid locations
# 3. diffusion coefficient
# 4. initial conditions
# 5. timestep, domain size
# Set parameters
Lx=1.
dx=Lx/larr
rdx=1./dx
dt=5.
# 1. setting initial conditions
# Lets create an array of grid locations
xc=np.zeros(myl+nh*2)
xc[i0sec_indices]=( np.linspace(mygstart,mygend,myl)+0.5 )*dx
print('Rank %4d xc values ='%(rank),xc)
halo(xc,i0sec_indices,i0rm1,i0rp1,i0hih_indices,i0loh_indices,nh)
print('Rank %4d xc values ='%(rank),xc)
phi_init=np.zeros(myl+nh*2)
phi_init[i0sec_indices]=np.exp( -50.*(xc[i0sec_indices]/Lx-1./2.)**2 )
print('Rank %4d phi_init values ='%(rank),phi_init)
halo(phi_init,i0sec_indices,i0rm1,i0rp1,i0hih_indices,i0loh_indices,nh)
print('Rank %4d phi_init values ='%(rank),phi_init)
plt.plot(phi_init)
plt.savefig('phi_init_rank%d.png'%(rank))
MPI.Finalize()
| 27.378788
| 80
| 0.722745
|
b996966f5ee392d24ca229934e33b5e27ce1dd77
| 104
|
py
|
Python
|
src/api/pdi/domain/lookup/LookupDto.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | 1
|
2020-12-18T21:37:28.000Z
|
2020-12-18T21:37:28.000Z
|
src/api/pdi/domain/lookup/LookupDto.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | null | null | null |
src/api/pdi/domain/lookup/LookupDto.py
|
ahmetcagriakca/pythondataintegrator
|
079b968d6c893008f02c88dbe34909a228ac1c7b
|
[
"MIT"
] | 1
|
2020-12-18T21:37:31.000Z
|
2020-12-18T21:37:31.000Z
|
from dataclasses import dataclass
@dataclass
class LookupDto:
Id: int = None
Name: int = None
| 13
| 33
| 0.701923
|
f5670e1a09cbf4be4de71a208d20f78092619f65
| 5,794
|
py
|
Python
|
scripts/penta10test.py
|
emathew1/MPI_3DCompact
|
1945eabdc240e8754c9ea356ba954683dee0149f
|
[
"MIT"
] | 2
|
2021-01-14T21:13:53.000Z
|
2022-01-16T23:03:43.000Z
|
scripts/penta10test.py
|
emathew1/MPI_3DCompact
|
1945eabdc240e8754c9ea356ba954683dee0149f
|
[
"MIT"
] | null | null | null |
scripts/penta10test.py
|
emathew1/MPI_3DCompact
|
1945eabdc240e8754c9ea356ba954683dee0149f
|
[
"MIT"
] | 3
|
2018-11-17T21:24:57.000Z
|
2020-08-02T03:19:42.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 10 21:24:14 2019
@author: edwin
"""
# %%
import numpy as np
import matplotlib.pyplot as plt
#%%
N = 25
A = np.zeros((N,N))
B = np.zeros((N,N))
#Interior points
#Penta 10
beta = 1.0/20.0;
alpha = 1.0/2.0;
a = 17.0/12.0;
b = 101.0/150.0;
c = 1.0/100.0;
#Tri6
#beta = 0;
#alpha = 1.0/3.0;
#a = 14.0/9.0;
#b = 1.0/9.0;
#c = 0.0/100.0;
#Point 0, N-1
#Penta 10
#beta1 = 28.0;
#alpha1 = 16.0;
#a1 = -1181.0/280.0;
#b1 = -892.0/35.0;
#c1 = 77.0/5.0;
#d1 = 56.0/3.0;
#e1 = -35.0/6.0;
#f1 = 28.0/15.0;
#g1 = -7.0/15.0;
#h1 = 8.0/105.0;
#i1 = -1.0/168.0;
#j1 = 0.0
#Tri 10
beta1 = 0;
alpha1 = 9.0;
a1 = -2485.0/649.0;
b1 = -1809.0/280.0;
c1 = 18.0/1.0;
d1 = -14.0/1.0;
e1 = 21.0/2.0;
f1 = -63.0/10.0;
g1 = 14.0/5.0;
h1 = -6.0/7.0;
i1 = 9.0/56.0;
j1 = -1.0/72.0;
#Penta 8
#beta1 = 15.0;
#alpha1 = 8.0;
#a1 = -79.0/20.0;
#b1 = -77.0/5.0;
#c1 = 55.0/4.0;
#d1 = 20.0/3.0;
#e1 = -5.0/4.0;
#f1 = 1.0/5.0;
#g1 = -1.0/60.0;
#h1 = -0.0/7.0;
#i1 = 0.0/56.0;
#j1 = -0.0/72.0;
#Tri 6
#beta1 = 0;
#alpha1 = 5.0;
#a1 = -197.0/60.0;
#b1 = -5.0/12.0;
#c1 = 5.0/1.0;
#d1 = -5.0/3.0;
#e1 = 5.0/12.0;
#f1 = -1.0/20.0;
#g1 = 0.0#14.0/5.0;
#h1 = 0.0#-6.0/7.0;
#i1 = 0.0# 9.0/56.0;
#j1 = 0.0#-1.0/72.0;
#Point 1, N-2
#Penta 10
beta2 = 5.0/3.0;
alpha2_1 = 1.0/21.0;
alpha2_2 = 3.0;
a2 = -544.0/2581.0;
b2 = -39.0/20.0;
c2 = -17.0/20.0;
d2 = 95.0/36.0;
e2 = 5.0/12.0;
f2 = -1.0/20.0;
g2 = 1.0/180.0;
h2 = -1.0/2940.0;
#Tri6
#beta2 = 0.0;
#alpha2_1 = 1.0/8.0;
#alpha2_2 = 3.0/4.0;
#a2 = -43/96.0;
#b2 = -5.0/6.0;
#c2 = 9.0/8.0;
#d2 = 1.0/6.0;
#e2 = -1.0/96.0;
#f2 = 0.0#-1.0/20.0;
#g2 = 0.0# 1.0/180.0;
#h2 = 0.0#-1.0/2940.0;
#Penta6
#beta2 = 0.0;
#alpha2_1 = 1.0/8.0;
#alpha2_2 = 3.0/4.0;
#a2 = -43/96.0;
#b2 = -5.0/6.0;
#c2 = 9.0/8.0;
#d2 = 1.0/6.0;
#e2 = -1.0/96.0;
#f2 = 0.0#-1.0/20.0;
#g2 = 0.0# 1.0/180.0;
#h2 = 0.0#-1.0/2940.0;
#Point 2, N-3
#Penta 10
#beta3_1 = 1.0/90.0;
#beta3_2 = 1.0;
#alpha3_1 = 4.0/15.0;
#alpha3_2 = 8.0/9.0;
#a3 = -34.0/675.0;
#b3 = -127.0/225.0;
#c3 = -7.0/12.0;
#d3 = 20.0/27.0;
#e3 = 4.0/9.0;
#f3 = 1.0/75.0;
#g3 = -1.0/2700.0;
#h3 = 0
#i3 = 0
#Tri 10
#beta3_1 = 0/90.0;
#beta3_2 = 0.0;
#alpha3_1 = 1.0/7.0;
#alpha3_2 = 1.0;
#a3 = -1.0/168.0;
#b3 = -433.0/980.0;
#c3 = -19.0/20.0;
#d3 = 21.0/20.0;
#e3 = 5.0/12.0;
#f3 = -1.0/20.0;
#g3 = 1.0/60.0;
#h3 = -1.0/420.0
#i3 = 1.0/5880.0
#Tri 6 interior
#beta3_1 = 0.0;
#beta3_2 = 0.0;
#alpha3_1 = 1.0/3.0;
#alpha3_2 = 1.0/3.0;
#a3 = -1.0/9.0/4.0;
#b3 = -14/9.0/2;
#c3 = 0.0;
#d3 = 14/9/2.0;
#e3 = 1/9/4.0;
#f3 = 0;
#g3 = 0;
#h3 = 0
#i3 = 0
#Penta 8 interior
beta3_1 = 1.0/36.0;
beta3_2 = 1.0/36.0;
alpha3_1 = 4.0/9.0;
alpha3_2 = 4.0/9.0;
a3 = -25.0/54.0/4.0;
b3 = -40.0/27.0/2.0;
c3 = 0.0;
d3 = 40.0/27.0/2.0;
e3 = 25.0/54.0/4.0;
f3 = 0;
g3 = 0;
h3 = 0
i3 = 0
for i in range(0,N):
A[i,i] = 1
for i in range(0,N-1):
A[i,i+1] = alpha
A[i+1,i] = alpha
B[i,i+1] = a/2
B[i+1,i] = -a/2
for i in range(0,N-2):
A[i,i+2] = beta
A[i+2,i] = beta
B[i,i+2] = b/4
B[i+2,i] = -b/4
for i in range(0,N-3):
B[i,i+3] = c/6
B[i+3,i] = -c/6
Bperiodic = np.copy(B)
Aperiodic = np.copy(A)
Bperiodic[0,-1] = -a/2
Bperiodic[0,-2] = -b/4
Bperiodic[0,-3] = -c/6
Bperiodic[1,-1] = -b/4
Bperiodic[1,-2] = -c/6
Bperiodic[2,-1] = -c/6
Bperiodic[-1,0] = a/2
Bperiodic[-1,1] = b/4
Bperiodic[-1,2] = c/6
Bperiodic[-2,0] = b/4
Bperiodic[-2,1] = c/6
Bperiodic[-3,0] = c/6
Aperiodic[0,-1] = alpha
Aperiodic[0,-2] = beta
Aperiodic[1,-1] = beta
Aperiodic[-1,0] = alpha
Aperiodic[-1,1] = beta
Aperiodic[-2,0] = beta
B[0,0] = a1
B[0,1] = b1
B[0,2] = c1
B[0,3] = d1
B[0,4] = e1
B[0,5] = f1
B[0,6] = g1
B[0,7] = h1
B[0,8] = i1
B[0,9] = j1
B[-1,-1] = -a1
B[-1,-2] = -b1
B[-1,-3] = -c1
B[-1,-4] = -d1
B[-1,-5] = -e1
B[-1,-6] = -f1
B[-1,-7] = -g1
B[-1,-8] = -h1
B[-1,-9] = -i1
B[-1,-10]= -j1
B[1,0] = a2
B[1,1] = b2
B[1,2] = c2
B[1,3] = d2
B[1,4] = e2
B[1,5] = f2
B[1,6] = g2
B[1,7] = h2
B[-2,-1] = -a2
B[-2,-2] = -b2
B[-2,-3] = -c2
B[-2,-4] = -d2
B[-2,-5] = -e2
B[-2,-6] = -f2
B[-2,-7] = -g2
B[-2,-8] = -h2
B[2,0] = a3
B[2,1] = b3
B[2,2] = c3
B[2,3] = d3
B[2,4] = e3
B[2,5] = f3
B[2,6] = g3
B[2,7] = h3
B[2,8] = i3
B[-3,-1] = -a3
B[-3,-2] = -b3
B[-3,-3] = -c3
B[-3,-4] = -d3
B[-3,-5] = -e3
B[-3,-6] = -f3
B[-3,-7] = -g3
B[-3,-8] = -h3
B[-3,-9] = -i3
A[0,1] = alpha1
A[0,2] = beta1
A[-1,-2] = alpha1
A[-1,-3] = beta1
A[1,0] = alpha2_1
A[1,2] = alpha2_2
A[1,3] = beta2
A[-2, -1] = alpha2_1
A[-2, -3] = alpha2_2
A[-2, -4] = beta2
A[2,0] = beta3_1
A[2,1] = alpha3_1
A[2,3] = alpha3_2
A[2,4] = beta3_2
A[-3, -1] = beta3_1
A[-3, -2] = alpha3_1
A[-3, -4] = alpha3_2
A[-3, -5] = beta3_2
x = np.zeros((N,1))
xperiodic = np.zeros((N,1))
for i in range(0,N):
x[i] = np.sin((float(i)/float(N-1))*2.0*np.pi)
xperiodic[i] = np.sin((float(i)/float(N))*2.0*np.pi)
# %%
dx = 2.0*np.pi/(N-1)
dxp = 2.0*np.pi/N
RHS = (1/dx)*np.matmul(B,x)
RHSperiodic = (1/dxp)*np.matmul(Bperiodic,xperiodic)
#%%
y = np.linalg.solve(A,RHS)
yperiodic = np.linalg.solve(Aperiodic, RHSperiodic)
| 16.843023
| 56
| 0.427856
|
7d08331d8a5c2cd91a3a93971be64ee733bca531
| 981
|
py
|
Python
|
src/calendar_export.py
|
soxidus/Boardgame-Bot
|
6c54ce090b0ddb3f41431c4d17e6b89523f817d8
|
[
"MIT"
] | null | null | null |
src/calendar_export.py
|
soxidus/Boardgame-Bot
|
6c54ce090b0ddb3f41431c4d17e6b89523f817d8
|
[
"MIT"
] | 1
|
2020-07-05T19:36:12.000Z
|
2020-07-05T19:36:12.000Z
|
src/calendar_export.py
|
soxidus/Boardgame-Bot
|
6c54ce090b0ddb3f41431c4d17e6b89523f817d8
|
[
"MIT"
] | null | null | null |
from icalendar import Calendar, Event
from datetime import datetime
from os import remove
from parse_strings import generate_uuid_32
def create_ics_file(title, date):
calendar = Calendar()
calendar.add('prodid', '-//Meeple - Auto-Ical//')
calendar.add('version', '2.0')
gamenight_event = Event()
gamenight_event.add('dtstamp', datetime.now())
gamenight_event.add('summary', title)
gamenight_event.add('description', 'Created by Meeple the boardgame bot')
gamenight_event.add('dtstart', datetime.strptime(str(date) + " 19", "%Y-%m-%d 00:00:00 %H"))
gamenight_event.add('dtend', datetime.strptime(str(date) + " 23", "%Y-%m-%d 00:00:00 %H"))
calendar.add_component(gamenight_event)
filename = 'cal-' + generate_uuid_32() + '.ics'
with open(filename, 'wb') as my_file:
my_file.write(calendar.to_ical())
return filename
def delete_ics_file(filename):
try:
remove(filename)
except OSError:
pass
| 30.65625
| 96
| 0.678899
|
ced4760658fbef993e0b2f4523353a3e1caca2e2
| 2,598
|
py
|
Python
|
tests/tests.py
|
qureshizawar/CUDA-quartic-solver
|
97a24e5e1818b51c6bd3ac6fc9728f1dc2a3743b
|
[
"MIT"
] | 6
|
2020-04-27T15:34:08.000Z
|
2022-02-23T06:34:18.000Z
|
tests/tests.py
|
qureshizawar/CUDA-quartic-solver
|
97a24e5e1818b51c6bd3ac6fc9728f1dc2a3743b
|
[
"MIT"
] | null | null | null |
tests/tests.py
|
qureshizawar/CUDA-quartic-solver
|
97a24e5e1818b51c6bd3ac6fc9728f1dc2a3743b
|
[
"MIT"
] | 2
|
2020-05-05T11:45:48.000Z
|
2021-07-21T06:33:51.000Z
|
import numpy as np
import QuarticSolver
def test1():
eps = 1e-5
correct_minimum = -2
N = 1<<9
A = np.ones(N)*2
B = np.ones(N)*-4
C = np.ones(N)*-22
D = np.ones(N)*24
E = np.ones(N)*2
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
N = 1<<13
A = np.ones(N)*2
B = np.ones(N)*-4
C = np.ones(N)*-22
D = np.ones(N)*24
E = np.ones(N)*2
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
N = 1<<20
A = np.ones(N)*2
B = np.ones(N)*-4
C = np.ones(N)*-22
D = np.ones(N)*24
E = np.ones(N)*2
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
def test2():
eps = 1e-5
correct_minimum = -0.5688
N = 1<<9
A = np.ones(N)*14
B = np.ones(N)*-11
C = np.ones(N)*51
D = np.ones(N)*79
E = np.ones(N)*1
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
N = 1<<13
A = np.ones(N)*14
B = np.ones(N)*-11
C = np.ones(N)*51
D = np.ones(N)*79
E = np.ones(N)*1
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
N = 1<<20
A = np.ones(N)*14
B = np.ones(N)*-11
C = np.ones(N)*51
D = np.ones(N)*79
E = np.ones(N)*1
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
def test3():
eps = 1e-5
correct_minimum = -21.75
N = 1<<9
A = np.ones(N)*3
B = np.ones(N)*87
C = np.ones(N)*0
D = np.ones(N)*0
E = np.ones(N)*0
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
N = 1<<13
A = np.ones(N)*3
B = np.ones(N)*87
C = np.ones(N)*0
D = np.ones(N)*0
E = np.ones(N)*0
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
N = 1<<20
A = np.ones(N)*3
B = np.ones(N)*87
C = np.ones(N)*0
D = np.ones(N)*0
E = np.ones(N)*0
minimum = QuarticSolver.QuarticMinimum(A,B,C,D,E,True)
res = np.sum(minimum)/N
assert abs(res-correct_minimum)<eps
if __name__=='__main__':
QuarticSolver.dry_run(1<<20)
test1()
test2()
test3()
print('all test passed!')
| 20.951613
| 58
| 0.563895
|
c3d827db7800a3fecaef90199b3f9326d68cb7c2
| 4,179
|
py
|
Python
|
flearn/trainers/fedprox.py
|
xiby/FedProx
|
e9423634e7d01bcc72df614dd941ffdd3d1266b5
|
[
"MIT"
] | 371
|
2019-01-26T01:15:53.000Z
|
2022-03-29T17:41:41.000Z
|
flearn/trainers/fedprox.py
|
xiby/FedProx
|
e9423634e7d01bcc72df614dd941ffdd3d1266b5
|
[
"MIT"
] | 24
|
2019-04-19T12:22:05.000Z
|
2022-02-09T23:29:38.000Z
|
flearn/trainers/fedprox.py
|
xiby/FedProx
|
e9423634e7d01bcc72df614dd941ffdd3d1266b5
|
[
"MIT"
] | 115
|
2019-07-02T04:11:14.000Z
|
2022-03-19T16:57:00.000Z
|
import numpy as np
from tqdm import trange, tqdm
import tensorflow as tf
from .fedbase import BaseFedarated
from flearn.optimizer.pgd import PerturbedGradientDescent
from flearn.utils.tf_utils import process_grad, process_sparse_grad
class Server(BaseFedarated):
def __init__(self, params, learner, dataset):
print('Using Federated prox to Train')
self.inner_opt = PerturbedGradientDescent(params['learning_rate'], params['mu'])
super(Server, self).__init__(params, learner, dataset)
def train(self):
'''Train using Federated Proximal'''
print('Training with {} workers ---'.format(self.clients_per_round))
for i in range(self.num_rounds):
# test model
if i % self.eval_every == 0:
stats = self.test() # have set the latest model for all clients
stats_train = self.train_error_and_loss()
tqdm.write('At round {} accuracy: {}'.format(i, np.sum(stats[3])*1.0/np.sum(stats[2]))) # testing accuracy
tqdm.write('At round {} training accuracy: {}'.format(i, np.sum(stats_train[3])*1.0/np.sum(stats_train[2])))
tqdm.write('At round {} training loss: {}'.format(i, np.dot(stats_train[4], stats_train[2])*1.0/np.sum(stats_train[2])))
model_len = process_grad(self.latest_model).size
global_grads = np.zeros(model_len)
client_grads = np.zeros(model_len)
num_samples = []
local_grads = []
for c in self.clients:
num, client_grad = c.get_grads(model_len)
local_grads.append(client_grad)
num_samples.append(num)
global_grads = np.add(global_grads, client_grad * num)
global_grads = global_grads * 1.0 / np.sum(np.asarray(num_samples))
difference = 0
for idx in range(len(self.clients)):
difference += np.sum(np.square(global_grads - local_grads[idx]))
difference = difference * 1.0 / len(self.clients)
tqdm.write('gradient difference: {}'.format(difference))
indices, selected_clients = self.select_clients(i, num_clients=self.clients_per_round) # uniform sampling
np.random.seed(i) # make sure that the stragglers are the same for FedProx and FedAvg
active_clients = np.random.choice(selected_clients, round(self.clients_per_round * (1 - self.drop_percent)), replace=False)
csolns = [] # buffer for receiving client solutions
self.inner_opt.set_params(self.latest_model, self.client_model)
for idx, c in enumerate(selected_clients.tolist()):
# communicate the latest model
c.set_params(self.latest_model)
total_iters = int(self.num_epochs * c.num_samples / self.batch_size)+2 # randint(low,high)=[low,high)
# solve minimization locally
if c in active_clients:
soln, stats = c.solve_inner(num_epochs=self.num_epochs, batch_size=self.batch_size)
else:
#soln, stats = c.solve_iters(num_iters=np.random.randint(low=1, high=total_iters), batch_size=self.batch_size)
soln, stats = c.solve_inner(num_epochs=np.random.randint(low=1, high=self.num_epochs), batch_size=self.batch_size)
# gather solutions from client
csolns.append(soln)
# track communication cost
self.metrics.update(rnd=i, cid=c.id, stats=stats)
# update models
self.latest_model = self.aggregate(csolns)
self.client_model.set_params(self.latest_model)
# final test model
stats = self.test()
stats_train = self.train_error_and_loss()
self.metrics.accuracies.append(stats)
self.metrics.train_accuracies.append(stats_train)
tqdm.write('At round {} accuracy: {}'.format(self.num_rounds, np.sum(stats[3])*1.0/np.sum(stats[2])))
tqdm.write('At round {} training accuracy: {}'.format(self.num_rounds, np.sum(stats_train[3])*1.0/np.sum(stats_train[2])))
| 48.034483
| 136
| 0.627423
|
e7e03a2b70a5f153fb24837a79529dacab8727e7
| 24,720
|
py
|
Python
|
zerver/webhooks/bitbucket2/tests.py
|
coderkoala/zulipnew
|
8aa03dc71948559cde7b436a72a182f8680f7b47
|
[
"Apache-2.0"
] | 4
|
2019-06-04T09:06:53.000Z
|
2019-06-04T09:07:47.000Z
|
zerver/webhooks/bitbucket2/tests.py
|
BackGroundC/zulip
|
2bd6d275a70a7683986edc72fa8585726e976604
|
[
"Apache-2.0"
] | null | null | null |
zerver/webhooks/bitbucket2/tests.py
|
BackGroundC/zulip
|
2bd6d275a70a7683986edc72fa8585726e976604
|
[
"Apache-2.0"
] | 1
|
2020-02-06T13:56:40.000Z
|
2020-02-06T13:56:40.000Z
|
# -*- coding: utf-8 -*-
from mock import MagicMock, patch
from zerver.lib.test_classes import WebhookTestCase
class Bitbucket2HookTests(WebhookTestCase):
STREAM_NAME = 'bitbucket2'
URL_TEMPLATE = "/api/v1/external/bitbucket2?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = 'bitbucket2'
EXPECTED_TOPIC = u"Repository name"
EXPECTED_TOPIC_PR_EVENTS = u"Repository name / PR #1 new commit"
EXPECTED_TOPIC_ISSUE_EVENTS = u"Repository name / Issue #1 Bug"
EXPECTED_TOPIC_BRANCH_EVENTS = u"Repository name / master"
def test_bitbucket2_on_push_event(self) -> None:
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n{}".format(commit_info)
self.send_and_test_stream_message('push', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers(self) -> None:
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 3 commits to branch master. Commits by zbenjamin (2) and kolaszek (1).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*2)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None:
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by james (3), Brendon (2), Tomasz (2) and others (3).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 3 commits to branch master. Commits by zbenjamin (2) and kolaszek (1).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*2)
self.send_and_test_stream_message('push_multiple_committers', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_multiple_committers_with_others_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n'
expected_message = u"""kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by james (3), Brendon (2), Tomasz (2) and others (3).\n\n{}* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))""".format(commit_info*9)
self.send_and_test_stream_message('push_multiple_committers_with_others', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_event_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = u'* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n{}".format(commit_info)
self.send_and_test_stream_message('push', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_above_limit_event(self) -> None:
commit_info = '* a ([6f161a7](https://bitbucket.org/kolaszek/repository-name/commits/6f161a7bced94430ac8947d87dbf45c6deee3fb0))\n'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branches/compare/6f161a7bced94430ac8947d87dbf45c6deee3fb0..1221f2fda6f1e3654b09f1f3a08390e4cb25bb48) 5 commits to branch master. Commits by Tomasz (5).\n\n{}[and more commit(s)]".format(
(commit_info * 5),
)
self.send_and_test_stream_message('push_commits_above_limit', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_push_commits_above_limit_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
commit_info = '* a ([6f161a7](https://bitbucket.org/kolaszek/repository-name/commits/6f161a7bced94430ac8947d87dbf45c6deee3fb0))\n'
expected_message = u"kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branches/compare/6f161a7bced94430ac8947d87dbf45c6deee3fb0..1221f2fda6f1e3654b09f1f3a08390e4cb25bb48) 5 commits to branch master. Commits by Tomasz (5).\n\n{}[and more commit(s)]".format(
(commit_info * 5),
)
self.send_and_test_stream_message('push_commits_above_limit', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_force_push_event(self) -> None:
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) to branch master. Head is now 25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12."
self.send_and_test_stream_message('force_push', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_force_push_event_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
expected_message = u"kolaszek [force pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) to branch master. Head is now 25f93d22b719e2d678a7ad5ee0ef0d1fcdf39c12."
self.send_and_test_stream_message('force_push', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_remove_branch_event(self) -> None:
expected_message = u"kolaszek deleted branch master."
self.send_and_test_stream_message('remove_branch', self.EXPECTED_TOPIC_BRANCH_EVENTS, expected_message)
def test_bitbucket2_on_fork_event(self) -> None:
expected_message = u"User Tomasz(login: kolaszek) forked the repository into [kolaszek/repository-name2](https://bitbucket.org/kolaszek/repository-name2)."
self.send_and_test_stream_message('fork', self.EXPECTED_TOPIC, expected_message)
def test_bitbucket2_on_commit_comment_created_event(self) -> None:
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/commits/32c4ea19aa3af10acd08e419e2c354941a365d74#comment-3354963) on [32c4ea1](https://bitbucket.org/kolaszek/repository-name/commits/32c4ea19aa3af10acd08e419e2c354941a365d74):\n~~~ quote\nNice fix!\n~~~"
self.send_and_test_stream_message('commit_comment_created', self.EXPECTED_TOPIC, expected_message)
def test_bitbucket2_on_commit_status_changed_event(self) -> None:
expected_message = u"[System mybuildtool](https://my-build-tool.com/builds/MY-PROJECT/BUILD-777) changed status of [9fec847](https://bitbucket.org/kolaszek/repository-name/commits/9fec847784abb10b2fa567ee63b85bd238955d0e) to SUCCESSFUL."
self.send_and_test_stream_message('commit_status_changed', self.EXPECTED_TOPIC, expected_message)
def test_bitbucket2_on_issue_created_event(self) -> None:
expected_message = u"kolaszek created [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug) (assigned to kolaszek):\n\n~~~ quote\nSuch a bug\n~~~"
self.send_and_test_stream_message('issue_created', self.EXPECTED_TOPIC_ISSUE_EVENTS, expected_message)
def test_bitbucket2_on_issue_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = u"notifications"
expected_message = u"kolaszek created [Issue #1 Bug](https://bitbucket.org/kolaszek/repository-name/issues/2/bug) (assigned to kolaszek):\n\n~~~ quote\nSuch a bug\n~~~"
self.send_and_test_stream_message('issue_created', expected_topic, expected_message)
def test_bitbucket2_on_issue_updated_event(self) -> None:
expected_message = u"kolaszek updated [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)."
self.send_and_test_stream_message('issue_updated', self.EXPECTED_TOPIC_ISSUE_EVENTS, expected_message)
def test_bitbucket2_on_issue_commented_event(self) -> None:
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/issues/2#comment-28973596) on [Issue #1](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)."
self.send_and_test_stream_message('issue_commented', self.EXPECTED_TOPIC_ISSUE_EVENTS, expected_message)
def test_bitbucket2_on_issue_commented_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = u"notifications"
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/issues/2#comment-28973596) on [Issue #1 Bug](https://bitbucket.org/kolaszek/repository-name/issues/2/bug)."
self.send_and_test_stream_message('issue_commented', expected_topic, expected_message)
def test_bitbucket2_on_pull_request_created_event(self) -> None:
expected_message = u"kolaszek created [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1) (assigned to tkolek) from `new-branch` to `master`:\n\n~~~ quote\ndescription\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:created'
}
self.send_and_test_stream_message('pull_request_created_or_updated', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = u"notifications"
expected_message = u"kolaszek created [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/1) (assigned to tkolek) from `new-branch` to `master`:\n\n~~~ quote\ndescription\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:created'
}
self.send_and_test_stream_message('pull_request_created_or_updated', expected_topic, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_updated_event(self) -> None:
expected_message = u"kolaszek updated [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1) (assigned to tkolek) from `new-branch` to `master`:\n\n~~~ quote\ndescription\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:updated'
}
self.send_and_test_stream_message('pull_request_created_or_updated', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_approved_event(self) -> None:
expected_message = u"kolaszek approved [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:approved'
}
self.send_and_test_stream_message('pull_request_approved_or_unapproved', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_approved_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = u"notifications"
expected_message = u"kolaszek approved [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:approved'
}
self.send_and_test_stream_message('pull_request_approved_or_unapproved', expected_topic, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_unapproved_event(self) -> None:
expected_message = u"kolaszek unapproved [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:unapproved'
}
self.send_and_test_stream_message('pull_request_approved_or_unapproved', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_declined_event(self) -> None:
expected_message = u"kolaszek rejected [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:rejected'
}
self.send_and_test_stream_message('pull_request_fulfilled_or_rejected', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_fulfilled_event(self) -> None:
expected_message = u"kolaszek merged [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/1)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:fulfilled'
}
self.send_and_test_stream_message('pull_request_fulfilled_or_rejected', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_created_event(self) -> None:
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3):\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_created'
}
self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_created_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = u"notifications"
expected_message = u"kolaszek [commented](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/3):\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_created'
}
self.send_and_test_stream_message('pull_request_comment_action', expected_topic, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_updated_event(self) -> None:
expected_message = u"kolaszek updated a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3):\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_updated'
}
self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_updated_with_custom_topic_in_url(self) -> None:
self.url = self.build_webhook_url(topic="notifications")
expected_topic = u"notifications"
expected_message = u"kolaszek updated a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1 new commit](https://bitbucket.org/kolaszek/repository-name/pull-requests/3):\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_updated'
}
self.send_and_test_stream_message('pull_request_comment_action', expected_topic, expected_message, **kwargs)
def test_bitbucket2_on_pull_request_comment_deleted_event(self) -> None:
expected_message = u"kolaszek deleted a [comment](https://bitbucket.org/kolaszek/repository-name/pull-requests/3/_/diff#comment-20576503) on [PR #1](https://bitbucket.org/kolaszek/repository-name/pull-requests/3):\n\n~~~ quote\nComment1\n~~~"
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:comment_deleted'
}
self.send_and_test_stream_message('pull_request_comment_action', self.EXPECTED_TOPIC_PR_EVENTS, expected_message, **kwargs)
def test_bitbucket2_on_repo_updated_event(self) -> None:
expected_message = u"eeshangarg changed the website of the **new-name** repo to **http://zulipchat.com**.\neeshangarg changed the name of the **new-name** repo from **test-repo** to **new-name**.\neeshangarg changed the language of the **new-name** repo to **python**.\neeshangarg changed the full name of the **new-name** repo from **webhooktest/test-repo** to **webhooktest/new-name**.\neeshangarg changed the description of the **new-name** repo to **Random description.**"
expected_topic = u"new-name"
kwargs = {"HTTP_X_EVENT_KEY": 'repo:updated'}
self.send_and_test_stream_message('repo_updated', expected_topic,
expected_message, **kwargs)
def test_bitbucket2_on_push_one_tag_event(self) -> None:
expected_message = u"kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('push_one_tag', self.EXPECTED_TOPIC, expected_message, **kwargs)
def test_bitbucket2_on_push_remove_tag_event(self) -> None:
expected_message = u"kolaszek removed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('push_remove_tag', self.EXPECTED_TOPIC, expected_message, **kwargs)
def test_bitbucket2_on_push_more_than_one_tag_event(self) -> None:
expected_message = u"kolaszek pushed tag [{name}](https://bitbucket.org/kolaszek/repository-name/commits/tag/{name})."
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('push_more_than_one_tag', **kwargs)
msg = self.get_last_message()
self.do_test_topic(msg, self.EXPECTED_TOPIC)
self.do_test_message(msg, expected_message.format(name='b'))
msg = self.get_second_to_last_message()
self.do_test_topic(msg, self.EXPECTED_TOPIC)
self.do_test_message(msg, expected_message.format(name='a'))
def test_bitbucket2_on_more_than_one_push_event(self) -> None:
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('more_than_one_push_event', **kwargs)
msg = self.get_second_to_last_message()
self.do_test_message(msg, 'kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))')
self.do_test_topic(msg, self.EXPECTED_TOPIC_BRANCH_EVENTS)
msg = self.get_last_message()
self.do_test_message(msg, 'kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a).')
self.do_test_topic(msg, self.EXPECTED_TOPIC)
def test_bitbucket2_on_more_than_one_push_event_filtered_by_branches(self) -> None:
self.url = self.build_webhook_url(branches='master,development')
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
self.send_and_test_stream_message('more_than_one_push_event', **kwargs)
msg = self.get_second_to_last_message()
self.do_test_message(msg, 'kolaszek [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 1 commit to branch master.\n\n* first commit ([84b96ad](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))')
self.do_test_topic(msg, self.EXPECTED_TOPIC_BRANCH_EVENTS)
msg = self.get_last_message()
self.do_test_message(msg, 'kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a).')
self.do_test_topic(msg, self.EXPECTED_TOPIC)
def test_bitbucket2_on_more_than_one_push_event_filtered_by_branches_ignore(self) -> None:
self.url = self.build_webhook_url(branches='changes,development')
kwargs = {
"HTTP_X_EVENT_KEY": 'pullrequest:push'
}
expected_message = u"kolaszek pushed tag [a](https://bitbucket.org/kolaszek/repository-name/commits/tag/a)."
self.send_and_test_stream_message('more_than_one_push_event',
self.EXPECTED_TOPIC,
expected_message, **kwargs)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_event_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_commits_above_limit_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push_commits_above_limit')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_force_push_event_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('force_push')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_multiple_committers_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push_multiple_committers')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_multiple_committers_with_others_filtered_by_branches_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
self.url = self.build_webhook_url(branches='changes,devlopment')
payload = self.get_body('push_multiple_committers_with_others')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
@patch('zerver.webhooks.bitbucket2.view.check_send_webhook_message')
def test_bitbucket2_on_push_without_changes_ignore(
self, check_send_webhook_message_mock: MagicMock) -> None:
payload = self.get_body('push_without_changes')
result = self.client_post(self.url, payload, content_type="application/json")
self.assertFalse(check_send_webhook_message_mock.called)
self.assert_json_success(result)
| 74.234234
| 484
| 0.748139
|
2c256f28237f5f6a1f9f0f05d71f079c61c83d44
| 377
|
py
|
Python
|
examples/pygame_display/display_layouts_shapes_test.py
|
FoamyGuy/Foamyguy_CircuitPython_DisplayIO_Inflater
|
c86d50c8727b4912d94c56226b58a35ccddb815d
|
[
"MIT",
"MIT-0",
"Unlicense"
] | 3
|
2020-06-29T19:00:23.000Z
|
2021-05-06T21:56:07.000Z
|
examples/pygame_display/display_layouts_shapes_test.py
|
FoamyGuy/circuitpython_display_layouts
|
d4380236d0613af8b77aaeeeb8d71dbd0a14dbd7
|
[
"MIT"
] | null | null | null |
examples/pygame_display/display_layouts_shapes_test.py
|
FoamyGuy/circuitpython_display_layouts
|
d4380236d0613af8b77aaeeeb8d71dbd0a14dbd7
|
[
"MIT"
] | null | null | null |
from display_layouts.absolute_layout import AbsoluteLayout
from blinka_displayio_pygamedisplay import PyGameDisplay
import os
os.chdir("..")
display = PyGameDisplay(width=800, height=600)
f = open("layouts/shapes_test.json", "r")
layout_str = f.read()
f.close()
main_layout = AbsoluteLayout(display, layout_str)
display.show(main_layout.view)
while display.running:
pass
| 25.133333
| 58
| 0.793103
|
291456277e62caedd9a2f001ed22f5c7892803a3
| 11,832
|
py
|
Python
|
config/settings/base.py
|
developertoentrepreneur/d2e-share-splitter
|
3dc406c726a801b507aa0b049fce8a2ab5d1bf2d
|
[
"MIT"
] | null | null | null |
config/settings/base.py
|
developertoentrepreneur/d2e-share-splitter
|
3dc406c726a801b507aa0b049fce8a2ab5d1bf2d
|
[
"MIT"
] | 5
|
2022-01-09T07:40:38.000Z
|
2022-02-12T19:38:54.000Z
|
config/settings/base.py
|
developertoentrepreneur/d2e_share_splitter
|
3dc406c726a801b507aa0b049fce8a2ab5d1bf2d
|
[
"MIT"
] | null | null | null |
"""
Base settings to build other settings files upon.
"""
from pathlib import Path
import environ
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
# d2e_share_splitter/
APPS_DIR = ROOT_DIR / "d2e_share_splitter"
env = environ.Env()
READ_DOT_ENV_FILE = env.bool("DJANGO_READ_DOT_ENV_FILE", default=False)
if READ_DOT_ENV_FILE:
# OS environment variables take precedence over variables from .env
env.read_env(str(ROOT_DIR / ".env"))
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# Local time zone. Choices are
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# though not all of them may be available with every OS.
# In Windows, this must be set to your system time zone.
TIME_ZONE = "UTC"
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = "en-us"
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# https://docs.djangoproject.com/en/dev/ref/settings/#locale-paths
LOCALE_PATHS = [str(ROOT_DIR / "locale")]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {"default": env.db("DATABASE_URL")}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
# https://docs.djangoproject.com/en/stable/ref/settings/#std:setting-DEFAULT_AUTO_FIELD
DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# APPS
# ------------------------------------------------------------------------------
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
# "django.contrib.humanize", # Handy template tags
"django.contrib.admin",
"django.forms",
]
THIRD_PARTY_APPS = [
"crispy_forms",
"crispy_bootstrap5",
"allauth",
"allauth.account",
"allauth.socialaccount",
"rest_framework",
"rest_framework.authtoken",
"corsheaders",
]
LOCAL_APPS = [
"d2e_share_splitter.users",
"d2e_share_splitter.shareconf",
"d2e_share_splitter.sharecontributions",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIGRATIONS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#migration-modules
MIGRATION_MODULES = {"sites": "d2e_share_splitter.contrib.sites.migrations"}
# AUTHENTICATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#authentication-backends
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-user-model
AUTH_USER_MODEL = "users.User"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = "users:redirect"
# https://docs.djangoproject.com/en/dev/ref/settings/#login-url
LOGIN_URL = "account_login"
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#password-hashers
PASSWORD_HASHERS = [
# https://docs.djangoproject.com/en/dev/topics/auth/passwords/#using-argon2-with-django
"django.contrib.auth.hashers.Argon2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2PasswordHasher",
"django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher",
"django.contrib.auth.hashers.BCryptSHA256PasswordHasher",
]
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"corsheaders.middleware.CorsMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.common.BrokenLinkEmailsMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(APPS_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
"BACKEND": "django.template.backends.django.DjangoTemplates",
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
"DIRS": [str(APPS_DIR / "templates")],
"OPTIONS": {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
# https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
],
},
}
]
# https://docs.djangoproject.com/en/dev/ref/settings/#form-renderer
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
# http://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap5"
CRISPY_ALLOWED_TEMPLATE_PACKS = "bootstrap5"
# FIXTURES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#fixture-dirs
FIXTURE_DIRS = (str(APPS_DIR / "fixtures"),)
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-httponly
SESSION_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-httponly
CSRF_COOKIE_HTTPONLY = True
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# https://docs.djangoproject.com/en/dev/ref/settings/#x-frame-options
X_FRAME_OPTIONS = "DENY"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = env(
"DJANGO_EMAIL_BACKEND",
default="django.core.mail.backends.smtp.EmailBackend",
)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-timeout
EMAIL_TIMEOUT = 5
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL.
ADMIN_URL = "admin/"
# https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = [("""Alvaro Lloret""", "alvaro@developertoentrepreneur.com")]
# https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
}
# django-allauth
# ------------------------------------------------------------------------------
ACCOUNT_ALLOW_REGISTRATION = env.bool("DJANGO_ACCOUNT_ALLOW_REGISTRATION", True)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_AUTHENTICATION_METHOD = "username"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_REQUIRED = True
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_ADAPTER = "d2e_share_splitter.users.adapters.AccountAdapter"
# https://django-allauth.readthedocs.io/en/latest/configuration.html
SOCIALACCOUNT_ADAPTER = "d2e_share_splitter.users.adapters.SocialAccountAdapter"
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.SessionAuthentication",
"rest_framework.authentication.TokenAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
}
# django-cors-headers - https://github.com/adamchainz/django-cors-headers#setup
CORS_URLS_REGEX = r"^/api/.*$"
# Your stuff...
# ------------------------------------------------------------------------------
| 40.8
| 93
| 0.6381
|
e787d7b6d1a5ad8394e9f39f539b91c427ba9272
| 7,507
|
py
|
Python
|
Classifier/preproc/soldability_mean_diff_median_by_session.py
|
leonardoads/RecSys-cariris
|
98098bb072d3ab8f1faed248f350472c62cb199b
|
[
"Apache-2.0"
] | null | null | null |
Classifier/preproc/soldability_mean_diff_median_by_session.py
|
leonardoads/RecSys-cariris
|
98098bb072d3ab8f1faed248f350472c62cb199b
|
[
"Apache-2.0"
] | null | null | null |
Classifier/preproc/soldability_mean_diff_median_by_session.py
|
leonardoads/RecSys-cariris
|
98098bb072d3ab8f1faed248f350472c62cb199b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Para cada linha que representa uma click de uma SESSÃO em um ITEM, inserir o dado de clicks dados na mesma CATEGORIA do item em questão
#INPUT
#clicks_proc_basico.dat
#SESSION, DAY, MONTH, YEAR, TIME, ITEM, CATEGORY, SOLDABILITY
#1, 7, 4, 2014, 10.85, 214536502, 0, 2,56
#1, 7, 4, 2014, 10.9, 214536500, 0, 8,5
#1, 7, 4, 2014, 10.9, 214536506, 0 22,37
#1, 7, 4, 2014, 10.95, 214577561, 0 1,44
#OUTPUT - clicks-soldability_mean_by_session.dat
#8.64
#8.64
#8.64
#8.64
#OUTPUT - clicks-soldability_mean_diff_by_session.dat
#6.08
#-0.14
#13.73
#-7.20
#OUTPUT - clicks-soldability_mean_by_session.dat
#5.53
#5.53
#5.53
#5.53
#no caso de o arquivo estar dividido em partes
#path - local do diretorio
#pattern_filename - padrao do nome das partes do arquivo
#match_numeric - qual parte do padrao deve ser substituido pelo diferenciador da parte
#list_index - lista de diferenciador das partes
def read_file_parts(path, pattern_filename, match_numeric, list_index):
list_temp = []
for i in list_index:
print "Reading file:", pattern_filename.replace(match_numeric, str(i))
arq = open(path + pattern_filename.replace(match_numeric, str(i)), "r")
list_temp = list_temp + arq.readlines()
arq.close()
return list_temp
def read_single_file(filename):
lines = ""
print "Reading file:", filename
arq = open(filename, "r")
lines = arq.readlines()
return lines
#já salva as linhas com a nova coluna
def calculate_and_store_soldability_mean(session, lista_linhas, arq_w_sold_mean, index_soldability_column):
soldabilities = []
for linhaSplit in lista_linhas:
soldability = linhaSplit[index_soldability_column]
soldabilities.append(soldability)
soldability_mean = list_mean(soldabilities)
string_soldability_mean = str(soldability_mean).split(".")[0] + "." + str(soldability_mean).split(".")[1][0:2]
for s in range(len(soldabilities)):
arq_w_sold_mean.write(string_soldability_mean + "\n")
def calculate_and_store_soldability_median(session, lista_linhas, arq_w_sold_median, index_soldability_column):
soldabilities = []
for linhaSplit in lista_linhas:
soldability = linhaSplit[index_soldability_column]
soldabilities.append(soldability)
soldability_median = list_median(soldabilities)
if("." in str(soldability_median)):
string_soldability_median = str(soldability_median).split(".")[0] + "." + str(soldability_median).split(".")[1][0:2]
else:
string_soldability_median = str(soldability_median)
for s in range(len(soldabilities)):
arq_w_sold_median.write(string_soldability_median + "\n")
#já salva as linhas com a nova coluna
def calculate_and_store_soldability_mean_diff(session, lista_linhas, arq_w_sold_mean_diff, index_soldability_column):
soldabilities = []
for linhaSplit in lista_linhas:
soldability = linhaSplit[index_soldability_column]
soldabilities.append(soldability)
soldability_mean = list_mean(soldabilities)
string_soldability_mean_diff = []
for linhaSplit in lista_linhas:
soldab = linhaSplit[index_soldability_column]
diff = float(soldab) - float(soldability_mean)
string_diff = str(diff).split(".")[0] + "." + str(diff).split(".")[1][0:2]
arq_w_sold_mean_diff.write(string_diff + "\n")
def list_mean(number_list):
soma = 0.0
for num in number_list:
soma = soma + float(num)
return (soma / len(number_list) )
def list_median(number_list):
list_temp = sort(number_list)
if(len(list_temp) % 2 == 0):
return( ( float(list_temp[len(list_temp) / 2]) + (float(list_temp[ (len(list_temp) / 2) - 1 ]))) / 2 )
else:
return (list_temp[len(list_temp) / 2])
def sort(array):
less = []
equal = []
greater = []
if len(array) > 1:
pivot = array[0]
for x in array:
if x < pivot:
less.append(x)
if x == pivot:
equal.append(x)
if x > pivot:
greater.append(x)
# Don't forget to return something!
return sort(less)+equal+sort(greater) # Just use the + operator to join lists
# Note that you want equal ^^^^^ not pivot
else: # You need to hande the part at the end of the recursion - when you only have one element in your array, just return the array.
return array
#metodo nativo JOIN nao junta int com string :(
def join(sep, lista):
string = ""
for i in lista:
string = string + str(i) + sep
return string[:-1]
def c_bind(colunas1, colunas2):
new_lines = []
if(not len(colunas1) == len(colunas2)):
return "COLUMNS DON'T HAVE THE SAME LENGTH"
index = 0
for a in range(len(colunas1)):
new_lines.append(colunas1[index].replace("\n","") + "," + colunas2[index].replace("\n",""))
index = index + 1
return new_lines
import os
import sys
#to clicks or test?
apply_to = sys.argv[1]
#init
path = "/".join(os.path.dirname(os.path.realpath(__file__)).split("/")[0:-2]) + "/Data/"
print "Loading CLICKS data"
linhas = read_file_parts(path, apply_to + "-proc-basico/" + apply_to + "-proc-basico-parteX.dat", "X", [1,2,3,4,5,6])
print len(linhas), "lines loaded"
print "Loading SOLDABILITY column data"
linhas_soldability = read_single_file(path + "columns/" + apply_to + "-column-soldability.dat")
print len(linhas_soldability), "lines loaded"
arq_w_sold_mean = open(path + "columns/" + apply_to + "-soldability_mean_by_session.dat", "w")
arq_w_sold_median = open(path + "columns/" + apply_to + "-soldability_median_by_session.dat", "w")
arq_w_sold_mean_diff = open(path + "columns/" + apply_to + "-soldability_mean_diff_by_session.dat", "w")
linhas = c_bind(linhas, linhas_soldability)
###########################################################
## Este trecho permite uma rapida iteração por sessões ##
## quando é necessário comparar elas. Se lembre que elas ##
## estão juntas no arquivo. TalesBoy :) ##
###########################################################
session_id_anterior = linhas[0].split(",")[0]
lista_linhas_por_id = []
#apenas para poder registrar as linhas da ultima sessão
linhas.append("0,0,0,0,0.0,0,0")
conta_linhas = 0
percent_done = 0
total = 0
for linha in linhas:
total = total + 1
conta_linhas = conta_linhas + 1
linhaSplit = linha.replace("\n","").split(",")
session_id = linhaSplit[0]
if(session_id != session_id_anterior):
calculate_and_store_soldability_mean(session_id_anterior, lista_linhas_por_id, arq_w_sold_mean, 7)
calculate_and_store_soldability_median(session_id_anterior, lista_linhas_por_id, arq_w_sold_median, 7)
calculate_and_store_soldability_mean_diff(session_id_anterior, lista_linhas_por_id, arq_w_sold_mean_diff, 7)
lista_linhas_por_id = []
session_id_anterior = session_id
lista_linhas_por_id.append(linhaSplit)
else:
lista_linhas_por_id.append(linhaSplit)
#apenas para dar print no andamento do script
if (len(linhas) == conta_linhas):
percent_done = 100
conta_linhas = 0
#print datetime.datetime.now().time().hour, datetime.datetime.now().minute
print "Done:", (str(percent_done) + "%")
elif ((len(linhas) / 100) == conta_linhas):
percent_done = percent_done + 1
conta_linhas = 0
#print datetime.datetime.now().time().hour, datetime.datetime.now().minute
print "Done:", (str(percent_done) + "%")
##########################################
## FIM DO TRECHO DESCRITO ANTERIORMENTE ##
##########################################
print total - 1, "lines saved"
arq_w_sold_mean.close()
arq_w_sold_median.close()
arq_w_sold_mean_diff.close()
| 30.148594
| 138
| 0.693752
|
909ab756a049cb4304e094a00950ea6251074485
| 2,597
|
py
|
Python
|
freenit/decorators.py
|
Dervish13/backend-1
|
c94231df3e7f3f6440e96d786d581e504de8cb33
|
[
"BSD-2-Clause"
] | 3
|
2020-01-10T09:43:24.000Z
|
2022-01-17T20:54:37.000Z
|
freenit/decorators.py
|
Dervish13/backend-1
|
c94231df3e7f3f6440e96d786d581e504de8cb33
|
[
"BSD-2-Clause"
] | 4
|
2020-01-31T12:12:56.000Z
|
2021-01-13T12:37:23.000Z
|
freenit/decorators.py
|
Dervish13/backend-1
|
c94231df3e7f3f6440e96d786d581e504de8cb33
|
[
"BSD-2-Clause"
] | 3
|
2019-11-18T15:28:24.000Z
|
2022-01-21T11:32:45.000Z
|
def FreenitAPI(app):
class route:
def __init__(self, route, tags=["object"], many=False, responses={}):
self.app = app
self.route = route
self.tags = tags
self.many = many
self.responses = responses
def __call__(self, cls):
origGet = getattr(cls, "get", None)
origPost = getattr(cls, "post", None)
origPatch = getattr(cls, "patch", None)
origDelete = getattr(cls, "delete", None)
getSuffix = " list" if self.many else ""
app = self.app
responses = self.responses
class Wrapped(cls):
if callable(origGet):
anotated_model = origGet.__annotations__.get("return")
model = responses.get("get") or anotated_model
deco = app.get(
self.route,
summary=f"Get {self.tags[0]}{getSuffix}",
response_model=model,
tags=self.tags,
)
get = deco(origGet)
if callable(origPost):
anotated_model = origPost.__annotations__.get("return")
model = responses.get("post") or anotated_model
deco = self.app.post(
self.route,
summary=f"Create {self.tags[0]}",
response_model=model,
tags=self.tags,
)
post = deco(origPost)
if callable(origPatch):
anotated_model = origPatch.__annotations__.get("return")
model = responses.get("patch") or anotated_model
deco = self.app.patch(
self.route,
summary=f"Edit {self.tags[0]}",
response_model=model,
tags=self.tags,
)
patch = deco(origPatch)
if callable(origDelete):
anotated_model = origDelete.__annotations__.get("return")
model = responses.get("delete") or anotated_model
deco = self.app.delete(
self.route,
summary=f"Destroy {self.tags[0]}",
response_model=model,
tags=self.tags,
)
delete = deco(origDelete)
return Wrapped
return route
| 40.578125
| 77
| 0.447054
|
ef381104cb69f3f3309d19fca1c19002b2366924
| 176
|
py
|
Python
|
PythonExercicios/Mundo 1/3_operadores_aritimeticos/ex015.py
|
GuilhermoCampos/Curso-Python3-curso-em-video
|
723767bc6069e9c1fa9e28fe412e694f9eb8d05e
|
[
"MIT"
] | null | null | null |
PythonExercicios/Mundo 1/3_operadores_aritimeticos/ex015.py
|
GuilhermoCampos/Curso-Python3-curso-em-video
|
723767bc6069e9c1fa9e28fe412e694f9eb8d05e
|
[
"MIT"
] | null | null | null |
PythonExercicios/Mundo 1/3_operadores_aritimeticos/ex015.py
|
GuilhermoCampos/Curso-Python3-curso-em-video
|
723767bc6069e9c1fa9e28fe412e694f9eb8d05e
|
[
"MIT"
] | null | null | null |
d = int(input('Quantos dias alugados?:'))
km = float(input('Quantos de kilometros percorridos?: '))
t = (km * 0.15) + (d * 60)
print('O total a ser pago é R${:.2f}'.format(t))
| 35.2
| 57
| 0.619318
|
2a6c3c8ac55a736f80f545f3b71054284c5c3673
| 2,385
|
py
|
Python
|
rasa/run.py
|
orcaformation/chatbot_widget
|
cdbc0db5103a5a701878804ba183d5448823c798
|
[
"Apache-2.0"
] | 37
|
2019-06-07T07:39:00.000Z
|
2022-01-27T08:32:57.000Z
|
rasa/run.py
|
orcaformation/chatbot_widget
|
cdbc0db5103a5a701878804ba183d5448823c798
|
[
"Apache-2.0"
] | 93
|
2020-10-22T10:41:26.000Z
|
2022-03-01T13:34:43.000Z
|
rasa/run.py
|
orcaformation/chatbot_widget
|
cdbc0db5103a5a701878804ba183d5448823c798
|
[
"Apache-2.0"
] | 65
|
2019-05-21T12:16:53.000Z
|
2022-02-23T10:54:15.000Z
|
import logging
import typing
from typing import Dict, Text
import rasa.shared.utils.common
from rasa.shared.utils.cli import print_warning
from rasa.shared.constants import DOCS_BASE_URL
from rasa.core.lock_store import LockStore
logger = logging.getLogger(__name__)
if typing.TYPE_CHECKING:
from rasa.core.agent import Agent
def run(
model: Text,
endpoints: Text,
connector: Text = None,
credentials: Text = None,
**kwargs: Dict,
):
"""Runs a Rasa model.
Args:
model: Path to model archive.
endpoints: Path to endpoints file.
connector: Connector which should be use (overwrites `credentials`
field).
credentials: Path to channel credentials file.
**kwargs: Additional arguments which are passed to
`rasa.core.run.serve_application`.
"""
import rasa.core.run
import rasa.nlu.run
from rasa.core.utils import AvailableEndpoints
_endpoints = AvailableEndpoints.read_endpoints(endpoints)
if not connector and not credentials:
connector = "rest"
print_warning(
f"No chat connector configured, falling back to the "
f"REST input channel. To connect your bot to another channel, "
f"read the docs here: {DOCS_BASE_URL}/messaging-and-voice-channels"
)
kwargs = rasa.shared.utils.common.minimal_kwargs(
kwargs, rasa.core.run.serve_application
)
rasa.core.run.serve_application(
model,
channel=connector,
credentials=credentials,
endpoints=_endpoints,
**kwargs,
)
def create_agent(model: Text, endpoints: Text = None) -> "Agent":
from rasa.core.tracker_store import TrackerStore
from rasa.core.utils import AvailableEndpoints
from rasa.core.agent import Agent
from rasa.core.brokers.broker import EventBroker
import rasa.utils.common
_endpoints = AvailableEndpoints.read_endpoints(endpoints)
_broker = rasa.utils.common.run_in_loop(EventBroker.create(_endpoints.event_broker))
_tracker_store = TrackerStore.create(_endpoints.tracker_store, event_broker=_broker)
_lock_store = LockStore.create(_endpoints.lock_store)
return Agent.load(
model,
generator=_endpoints.nlg,
tracker_store=_tracker_store,
lock_store=_lock_store,
action_endpoint=_endpoints.action,
)
| 29.444444
| 88
| 0.69979
|
a8c2ac58d6fb17f5df9788a4b1f438a26e58f7c7
| 117,274
|
py
|
Python
|
transformers/src/transformers/pipelines.py
|
jordiclive/ControlPrefixes
|
b647f68bf0c7e771f847c4a51e5f22af2ac95699
|
[
"Apache-1.1"
] | 26
|
2021-11-23T09:01:32.000Z
|
2022-03-25T11:34:15.000Z
|
transformers/src/transformers/pipelines.py
|
jordiclive/ControlPrefixes
|
b647f68bf0c7e771f847c4a51e5f22af2ac95699
|
[
"Apache-1.1"
] | 3
|
2021-12-10T17:43:23.000Z
|
2022-03-18T11:37:19.000Z
|
transformers/src/transformers/pipelines.py
|
jordiclive/ControlPrefixes
|
b647f68bf0c7e771f847c4a51e5f22af2ac95699
|
[
"Apache-1.1"
] | 5
|
2021-12-19T03:22:08.000Z
|
2022-02-14T04:41:04.000Z
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import sys
import uuid
from abc import ABC, abstractmethod
from contextlib import contextmanager
from itertools import chain
from os.path import abspath, exists
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from uuid import UUID
import numpy as np
from .configuration_auto import AutoConfig
from .configuration_utils import PretrainedConfig
from .data import SquadExample, squad_convert_examples_to_features
from .file_utils import add_end_docstrings, is_tf_available, is_torch_available
from .modelcard import ModelCard
from .tokenization_auto import AutoTokenizer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_utils_base import BatchEncoding, PaddingStrategy
from .utils import logging
if is_tf_available():
import tensorflow as tf
from .modeling_tf_auto import (
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_WITH_LM_HEAD_MAPPING,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeq2SeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
)
if is_torch_available():
import torch
from .modeling_auto import (
MODEL_FOR_MASKED_LM_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSeq2SeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
)
if TYPE_CHECKING:
from .modeling_tf_utils import TFPreTrainedModel
from .modeling_utils import PreTrainedModel
logger = logging.get_logger(__name__)
def get_framework(model=None):
"""
Select framework (TensorFlow or PyTorch) to use.
Args:
model (:obj:`str`, :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`, `optional`):
If both frameworks are installed, picks the one corresponding to the model passed (either a model class or
the model name). If no specific model is provided, defaults to using PyTorch.
"""
if is_tf_available() and is_torch_available() and model is not None and not isinstance(model, str):
# Both framework are available but the user supplied a model class instance.
# Try to guess which framework to use from the model classname
framework = "tf" if model.__class__.__name__.startswith("TF") else "pt"
elif not is_tf_available() and not is_torch_available():
raise RuntimeError(
"At least one of TensorFlow 2.0 or PyTorch should be installed. "
"To install TensorFlow 2.0, read the instructions at https://www.tensorflow.org/install/ "
"To install PyTorch, read the instructions at https://pytorch.org/."
)
else:
# framework = 'tf' if is_tf_available() else 'pt'
framework = "pt" if is_torch_available() else "tf"
return framework
class PipelineException(Exception):
"""
Raised by a :class:`~transformers.Pipeline` when handling __call__.
Args:
task (:obj:`str`): The task of the pipeline.
model (:obj:`str`): The model used by the pipeline.
reason (:obj:`str`): The error message to display.
"""
def __init__(self, task: str, model: str, reason: str):
super().__init__(reason)
self.task = task
self.model = model
class ArgumentHandler(ABC):
"""
Base interface for handling arguments for each :class:`~transformers.pipelines.Pipeline`.
"""
@abstractmethod
def __call__(self, *args, **kwargs):
raise NotImplementedError()
class DefaultArgumentHandler(ArgumentHandler):
"""
Default argument parser handling parameters for each :class:`~transformers.pipelines.Pipeline`.
"""
@staticmethod
def handle_kwargs(kwargs: Dict) -> List:
if len(kwargs) == 1:
output = list(kwargs.values())
else:
output = list(chain(kwargs.values()))
return DefaultArgumentHandler.handle_args(output)
@staticmethod
def handle_args(args: Sequence[Any]) -> List[str]:
# Only one argument, let's do case by case
if len(args) == 1:
if isinstance(args[0], str):
return [args[0]]
elif not isinstance(args[0], list):
return list(args)
else:
return args[0]
# Multiple arguments (x1, x2, ...)
elif len(args) > 1:
if all([isinstance(arg, str) for arg in args]):
return list(args)
# If not instance of list, then it should instance of iterable
elif isinstance(args, Iterable):
return list(chain.from_iterable(chain(args)))
else:
raise ValueError(
"Invalid input type {}. Pipeline supports Union[str, Iterable[str]]".format(type(args))
)
else:
return []
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ValueError("Pipeline cannot handle mixed args and kwargs")
if len(kwargs) > 0:
return DefaultArgumentHandler.handle_kwargs(kwargs)
else:
return DefaultArgumentHandler.handle_args(args)
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing.
Supported data formats currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
:obj:`PipelineDataFormat` also includes some utilities to work with multi-columns like mapping from datasets
columns to pipelines keyword arguments through the :obj:`dataset_kwarg_1=dataset_column_1` format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite: bool = False,
):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
@abstractmethod
def __iter__(self):
raise NotImplementedError()
@abstractmethod
def save(self, data: Union[dict, List[dict]]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
Args:
data (:obj:`dict` or list of :obj:`dict`): The data to store.
Returns:
:obj:`str`: Path where the data has been saved.
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
@staticmethod
def from_str(
format: str,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
) -> "PipelineDataFormat":
"""
Creates an instance of the right subclass of :class:`~transformers.pipelines.PipelineDataFormat` depending
on :obj:`format`.
Args:
format: (:obj:`str`):
The format of the desired pipeline. Acceptable values are :obj:`"json"`, :obj:`"csv"` or :obj:`"pipe"`.
output_path (:obj:`str`, `optional`):
Where to save the outgoing data.
input_path (:obj:`str`, `optional`):
Where to look for the input data.
column (:obj:`str`, `optional`):
The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
Returns:
:class:`~transformers.pipelines.PipelineDataFormat`: The proper data format.
"""
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
class CsvPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using CSV data format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
def __iter__(self):
with open(self.input_path, "r") as f:
reader = csv.DictReader(f)
for row in reader:
if self.is_multi_columns:
yield {k: row[c] for k, c in self.column}
else:
yield row[self.column[0]]
def save(self, data: List[dict]):
"""
Save the provided data object with the representation for the current
:class:`~transformers.pipelines.PipelineDataFormat`.
Args:
data (:obj:`List[dict]`): The data to store.
"""
with open(self.output_path, "w") as f:
if len(data) > 0:
writer = csv.DictWriter(f, list(data[0].keys()))
writer.writeheader()
writer.writerows(data)
class JsonPipelineDataFormat(PipelineDataFormat):
"""
Support for pipelines using JSON file format.
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __init__(
self,
output_path: Optional[str],
input_path: Optional[str],
column: Optional[str],
overwrite=False,
):
super().__init__(output_path, input_path, column, overwrite=overwrite)
with open(input_path, "r") as f:
self._entries = json.load(f)
def __iter__(self):
for entry in self._entries:
if self.is_multi_columns:
yield {k: entry[c] for k, c in self.column}
else:
yield entry[self.column[0]]
def save(self, data: dict):
"""
Save the provided data object in a json file.
Args:
data (:obj:`dict`): The data to store.
"""
with open(self.output_path, "w") as f:
json.dump(data, f)
class PipedPipelineDataFormat(PipelineDataFormat):
"""
Read data from piped input to the python process.
For multi columns data, columns should separated by \t
If columns are provided, then the output will be a dictionary with {column_x: value_x}
Args:
output_path (:obj:`str`, `optional`): Where to save the outgoing data.
input_path (:obj:`str`, `optional`): Where to look for the input data.
column (:obj:`str`, `optional`): The column to read.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to overwrite the :obj:`output_path`.
"""
def __iter__(self):
for line in sys.stdin:
# Split for multi-columns
if "\t" in line:
line = line.split("\t")
if self.column:
# Dictionary to map arguments
yield {kwargs: l for (kwargs, _), l in zip(self.column, line)}
else:
yield tuple(line)
# No dictionary to map arguments
else:
yield line
def save(self, data: dict):
"""
Print the data.
Args:
data (:obj:`dict`): The data to store.
"""
print(data)
def save_binary(self, data: Union[dict, List[dict]]) -> str:
if self.output_path is None:
raise KeyError(
"When using piped input on pipeline outputting large object requires an output file path. "
"Please provide such output path through --output argument."
)
return super().save_binary(data)
class _ScikitCompat(ABC):
"""
Interface layer for the Scikit and Keras compatibility.
"""
@abstractmethod
def transform(self, X):
raise NotImplementedError()
@abstractmethod
def predict(self, X):
raise NotImplementedError()
PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no
model is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model
on the associated CUDA device id.
binary_output (:obj:`bool`, `optional`, defaults to :obj:`False`):
Flag indicating if the output the pipeline should happen in a binary format (i.e., pickle) or as raw text.
"""
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Pipeline(_ScikitCompat):
"""
The Pipeline class is the class from which all pipelines inherit. Refer to this class for methods shared across
different pipelines.
Base class implementing pipelined operations.
Pipeline workflow is defined as a sequence of the following operations:
Input -> Tokenization -> Model Inference -> Post-Processing (task dependent) -> Output
Pipeline supports running on CPU or GPU through the device argument (see below).
Some pipeline, like for instance :class:`~transformers.FeatureExtractionPipeline` (:obj:`'feature-extraction'` )
output large tensor object as nested-lists. In order to avoid dumping such large structure as textual data we
provide the :obj:`binary_output` constructor argument. If set to :obj:`True`, the output will be stored in the
pickle format.
"""
default_input_names = None
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
task: str = "",
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
):
if framework is None:
framework = get_framework(model)
self.task = task
self.model = model
self.tokenizer = tokenizer
self.modelcard = modelcard
self.framework = framework
self.device = device if framework == "tf" else torch.device("cpu" if device < 0 else "cuda:{}".format(device))
self.binary_output = binary_output
self._args_parser = args_parser or DefaultArgumentHandler()
# Special handling
if self.framework == "pt" and self.device.type == "cuda":
self.model = self.model.to(self.device)
# Update config with task specific parameters
task_specific_params = self.model.config.task_specific_params
if task_specific_params is not None and task in task_specific_params:
self.model.config.update(task_specific_params.get(task))
def save_pretrained(self, save_directory: str):
"""
Save the pipeline's model and tokenizer.
Args:
save_directory (:obj:`str`):
A path to the directory where to saved. It will be created if it doesn't exist.
"""
if os.path.isfile(save_directory):
logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
return
os.makedirs(save_directory, exist_ok=True)
self.model.save_pretrained(save_directory)
self.tokenizer.save_pretrained(save_directory)
if self.modelcard is not None:
self.modelcard.save_pretrained(save_directory)
def transform(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
def predict(self, X):
"""
Scikit / Keras interface to transformers' pipelines. This method will forward to __call__().
"""
return self(X=X)
@contextmanager
def device_placement(self):
"""
Context Manager allowing tensor allocation on the user-specified device in framework agnostic way.
Returns:
Context manager
Examples::
# Explicitly ask for tensor allocation on CUDA device :0
pipe = pipeline(..., device=0)
with pipe.device_placement():
# Every framework specific tensor allocation will be done on the request device
output = pipe(...)
"""
if self.framework == "tf":
with tf.device("/CPU:0" if self.device == -1 else "/device:GPU:{}".format(self.device)):
yield
else:
if self.device.type == "cuda":
torch.cuda.set_device(self.device)
yield
def ensure_tensor_on_device(self, **inputs):
"""
Ensure PyTorch tensors are on the specified device.
Args:
inputs (keyword arguments that should be :obj:`torch.Tensor`): The tensors to place on :obj:`self.device`.
Return:
:obj:`Dict[str, torch.Tensor]`: The same as :obj:`inputs` but on the proper device.
"""
return {name: tensor.to(self.device) for name, tensor in inputs.items()}
def check_model_type(self, supported_models: Union[List[str], dict]):
"""
Check if the model class is in supported by the pipeline.
Args:
supported_models (:obj:`List[str]` or :obj:`dict`):
The list of models supported by the pipeline, or a dictionary with model class values.
"""
if not isinstance(supported_models, list): # Create from a model mapping
supported_models = [item[1].__name__ for item in supported_models.items()]
if self.model.__class__.__name__ not in supported_models:
raise PipelineException(
self.task,
self.model.base_model_prefix,
f"The model '{self.model.__class__.__name__}' is not supported for {self.task}. Supported models are {supported_models}",
)
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
)
return inputs
def __call__(self, *args, **kwargs):
inputs = self._parse_and_tokenize(*args, **kwargs)
return self._forward(inputs)
def _forward(self, inputs, return_tensors=False):
"""
Internal framework specific forward dispatching.
Args:
inputs: dict holding all the keyworded arguments for required by the model forward method.
return_tensors: Whether to return native framework (pt/tf) tensors rather than numpy array.
Returns:
Numpy array
"""
# Encode for forward
with self.device_placement():
if self.framework == "tf":
# TODO trace model
predictions = self.model(inputs.data, training=False)[0]
else:
with torch.no_grad():
inputs = self.ensure_tensor_on_device(**inputs)
predictions = self.model(**inputs)[0].cpu()
if return_tensors:
return predictions
else:
return predictions.numpy()
# Can't use @add_end_docstrings(PIPELINE_INIT_ARGS) here because this one does not accept `binary_output`
class FeatureExtractionPipeline(Pipeline):
"""
Feature extraction pipeline using no model head. This pipeline extracts the hidden states from the base
transformer, which can be used as features in downstream tasks.
This feature extraction pipeline can currently be loaded from :func:`~transformers.pipeline` using the task
identifier: :obj:`"feature-extraction"`.
All models may be used for this pipeline. See a list of all models, including community-contributed models on
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Model card attributed to the model for this pipeline.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no
model is provided.
task (:obj:`str`, defaults to :obj:`""`):
A task-identifier for the pipeline.
args_parser (:class:`~transformers.pipelines.ArgumentHandler`, `optional`):
Reference to the object in charge of parsing supplied pipeline parameters.
device (:obj:`int`, `optional`, defaults to -1):
Device ordinal for CPU/GPU supports. Setting this to -1 will leverage CPU, a positive will run the model
on the associated CUDA device id.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
def __call__(self, *args, **kwargs):
"""
Extract the features of the input(s).
Args:
args (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) to get the features of.
Return:
A nested list of :obj:`float`: The features computed by the model.
"""
return super().__call__(*args, **kwargs).tolist()
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TextGenerationPipeline(Pipeline):
"""
Language generation pipeline using any :obj:`ModelWithLMHead`. This pipeline predicts the words that will follow a
specified text prompt.
This language generation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"text-generation"`.
The models that this pipeline can use are models that have been trained with an autoregressive language modeling
objective, which includes the uni-directional models in the library (e.g. gpt2).
See the list of available community models on
`huggingface.co/models <https://huggingface.co/models?search=&filter=lm-head>`__.
"""
# Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
XL_PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
ALLOWED_MODELS = [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"ReformerModelWithLMHead",
"GPT2LMHeadModel",
"OpenAIGPTLMHeadModel",
"CTRLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
"TFGPT2LMHeadModel",
"TFOpenAIGPTLMHeadModel",
"TFCTRLLMHeadModel",
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(self.ALLOWED_MODELS)
# overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize
"""
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
tokenizer_kwargs = {"add_space_before_punct_symbol": True}
else:
tokenizer_kwargs = {}
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
**tokenizer_kwargs,
)
return inputs
def __call__(
self,
*args,
return_tensors=False,
return_text=True,
clean_up_tokenization_spaces=False,
prefix=None,
**generate_kwargs
):
"""
Complete the prompt(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several prompts (or one list of prompts) to complete.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
prefix (:obj:`str`, `optional`):
Prefix added to prompt.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
- **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the generated text.
"""
text_inputs = self._args_parser(*args)
results = []
for prompt_text in text_inputs:
# Manage correct placement of the tensors
with self.device_placement():
prefix = prefix if prefix is not None else self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
prefix = self.XL_PREFIX
if prefix:
prefix_inputs = self._parse_and_tokenize(prefix, padding=False, add_special_tokens=False)
# This impacts max_length and min_length argument that need adjusting.
prefix_length = prefix_inputs["input_ids"].shape[-1]
if generate_kwargs.get("max_length", None) is not None:
generate_kwargs["max_length"] += prefix_length
if generate_kwargs.get("min_length", None) is not None:
generate_kwargs["min_length"] += prefix_length
prefix = prefix or ""
inputs = self._parse_and_tokenize(prefix + prompt_text, padding=False, add_special_tokens=False)
# set input_ids to None to allow empty prompt
if inputs["input_ids"].shape[-1] == 0:
inputs["input_ids"] = None
inputs["attention_mask"] = None
if self.framework == "pt" and inputs["input_ids"] is not None:
inputs = self.ensure_tensor_on_device(**inputs)
input_ids = inputs["input_ids"]
# Ensure that batch size = 1 (batch generation not allowed for now)
assert (
input_ids is None or input_ids.shape[0] == 1
), "Batch generation is currently not supported. See https://github.com/huggingface/transformers/issues/3021 for more information."
output_sequences = self.model.generate(input_ids=input_ids, **generate_kwargs) # BS x SL
result = []
for generated_sequence in output_sequences:
if self.framework == "pt" and generated_sequence is not None:
generated_sequence = generated_sequence.cpu()
generated_sequence = generated_sequence.numpy().tolist()
record = {}
if return_tensors:
record["generated_token_ids"] = generated_sequence
if return_text:
# Decode text
text = self.tokenizer.decode(
generated_sequence,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
prompt_length = 0
else:
prompt_length = len(
self.tokenizer.decode(
input_ids[0],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
record["generated_text"] = prompt_text + text[prompt_length:]
result.append(record)
results += [result]
if len(results) == 1:
return results[0]
return results
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
return_all_scores (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether to return all prediction scores or just the one of the predicted class.
""",
)
class TextClassificationPipeline(Pipeline):
"""
Text classification pipeline using any :obj:`ModelForSequenceClassification`. See the
`sequence classification examples <../task_summary.html#sequence-classification>`__ for more information.
This text classification pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"sentiment-analysis"` (for classifying sequences according to positive or negative
sentiments).
The models that this pipeline can use are models that have been fine-tuned on a sequence classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=text-classification>`__.
"""
def __init__(self, return_all_scores: bool = False, **kwargs):
super().__init__(**kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
)
self.return_all_scores = return_all_scores
def __call__(self, *args, **kwargs):
"""
Classify the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) to classify.
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the
following keys:
- **label** (:obj:`str`) -- The label predicted.
- **score** (:obj:`float`) -- The corresponding probability.
If ``self.return_all_scores=True``, one such dictionary is returned per label.
"""
outputs = super().__call__(*args, **kwargs)
scores = np.exp(outputs) / np.exp(outputs).sum(-1, keepdims=True)
if self.return_all_scores:
return [
[{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(item)]
for item in scores
]
else:
return [
{"label": self.model.config.id2label[item.argmax()], "score": item.max().item()} for item in scores
]
class ZeroShotClassificationArgumentHandler(ArgumentHandler):
"""
Handles arguments for zero-shot for text classification by turning each possible label into an NLI
premise/hypothesis pair.
"""
def _parse_labels(self, labels):
if isinstance(labels, str):
labels = [label.strip() for label in labels.split(",")]
return labels
def __call__(self, sequences, labels, hypothesis_template):
if len(labels) == 0 or len(sequences) == 0:
raise ValueError("You must include at least one label and at least one sequence.")
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(hypothesis_template)
)
if isinstance(sequences, str):
sequences = [sequences]
labels = self._parse_labels(labels)
sequence_pairs = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels])
return sequence_pairs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class ZeroShotClassificationPipeline(Pipeline):
"""
NLI-based zero-shot classification pipeline using a :obj:`ModelForSequenceClassification` trained on NLI (natural
language inference) tasks.
Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis
pair and passed to the pretrained model. Then, the logit for `entailment` is taken as the logit for the
candidate label being valid. Any NLI model can be used as long as the first output logit corresponds to
`contradiction` and the last to `entailment`.
This NLI pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"zero-shot-classification"`.
The models that this pipeline can use are models that have been fine-tuned on an NLI task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?search=nli>`__.
"""
def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs):
super().__init__(*args, args_parser=args_parser, **kwargs)
def _parse_and_tokenize(self, *args, padding=True, add_special_tokens=True, **kwargs):
"""
Parse arguments and tokenize only_first so that hypothesis (label) is not truncated
"""
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer(
inputs,
add_special_tokens=add_special_tokens,
return_tensors=self.framework,
padding=padding,
truncation="only_first",
)
return inputs
def __call__(self, sequences, candidate_labels, hypothesis_template="This example is {}.", multi_class=False):
"""
Classify the sequence(s) given as inputs.
Args:
sequences (:obj:`str` or :obj:`List[str]`):
The sequence(s) to classify, will be truncated if the model input is too large.
candidate_labels (:obj:`str` or :obj:`List[str]`):
The set of possible class labels to classify each sequence into. Can be a single label, a string of
comma-separated labels, or a list of labels.
hypothesis_template (:obj:`str`, `optional`, defaults to :obj:`"This example is {}."`):
The template used to turn each label into an NLI-style hypothesis. This template must include a {}
or similar syntax for the candidate label to be inserted into the template. For example, the default
template is :obj:`"This example is {}."` With the candidate label :obj:`"sports"`, this would be fed
into the model like :obj:`"<cls> sequence to classify <sep> This example is sports . <sep>"`. The
default template works well in many cases, but it may be worthwhile to experiment with different
templates depending on the task setting.
multi_class (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized
such that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are
considered independent and probabilities are normalized for each candidate by doing a softmax of
the entailment score vs. the contradiction score.
Return:
A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **sequence** (:obj:`str`) -- The sequence for which this is the output.
- **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood.
- **scores** (:obj:`List[float]`) -- The probabilities for each of the labels.
"""
outputs = super().__call__(sequences, candidate_labels, hypothesis_template)
num_sequences = 1 if isinstance(sequences, str) else len(sequences)
candidate_labels = self._args_parser._parse_labels(candidate_labels)
reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1))
if len(candidate_labels) == 1:
multi_class = True
if not multi_class:
# softmax the "entailment" logits over all candidate labels
entail_logits = reshaped_outputs[..., -1]
scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True)
else:
# softmax over the entailment vs. contradiction dim for each label independently
entail_contr_logits = reshaped_outputs[..., [0, -1]]
scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True)
scores = scores[..., 1]
result = []
for iseq in range(num_sequences):
top_inds = list(reversed(scores[iseq].argsort()))
result.append(
{
"sequence": sequences if isinstance(sequences, str) else sequences[iseq],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[iseq][top_inds].tolist(),
}
)
if len(result) == 1:
return result[0]
return result
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
topk (:obj:`int`, defaults to 5): The number of predictions to return.
""",
)
class FillMaskPipeline(Pipeline):
"""
Masked language modeling prediction pipeline using any :obj:`ModelWithLMHead`. See the
`masked language modeling examples <../task_summary.html#masked-language-modeling>`__ for more information.
This mask filling pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"fill-mask"`.
The models that this pipeline can use are models that have been trained with a masked language modeling objective,
which includes the bi-directional models in the library.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=lm-head>`__.
.. note::
This pipeline only works for inputs with exactly one token masked.
"""
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
topk=5,
task: str = "",
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=True,
task=task,
)
self.check_model_type(TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_MASKED_LM_MAPPING)
self.topk = topk
def ensure_exactly_one_mask_token(self, masked_index: np.ndarray):
numel = np.prod(masked_index.shape)
if numel > 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"More than one mask_token ({self.tokenizer.mask_token}) is not supported",
)
elif numel < 1:
raise PipelineException(
"fill-mask",
self.model.base_model_prefix,
f"No mask_token ({self.tokenizer.mask_token}) found on the input",
)
def __call__(self, *args, targets=None, **kwargs):
"""
Fill the masked token in the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of prompts) with masked tokens.
targets (:obj:`str` or :obj:`List[str]`, `optional`):
When passed, the model will return the scores for the passed token or tokens rather than the top k
predictions in the entire vocabulary. If the provided targets are not in the model vocab, they will
be tokenized and the first resulting token will be used (with a warning).
Return:
A list or a list of list of :obj:`dict`: Each result comes as list of dictionaries with the
following keys:
- **sequence** (:obj:`str`) -- The corresponding input with the mask token prediction.
- **score** (:obj:`float`) -- The corresponding probability.
- **token** (:obj:`int`) -- The predicted token id (to replace the masked one).
- **token** (:obj:`str`) -- The predicted token (to replace the masked one).
"""
inputs = self._parse_and_tokenize(*args, **kwargs)
outputs = self._forward(inputs, return_tensors=True)
results = []
batch_size = outputs.shape[0] if self.framework == "tf" else outputs.size(0)
if targets is not None:
if len(targets) == 0 or len(targets[0]) == 0:
raise ValueError("At least one target must be provided when passed.")
if isinstance(targets, str):
targets = [targets]
targets_proc = []
for target in targets:
target_enc = self.tokenizer.tokenize(target)
if len(target_enc) > 1 or target_enc[0] == self.tokenizer.unk_token:
logger.warning(
"The specified target token `{}` does not exist in the model vocabulary. Replacing with `{}`.".format(
target, target_enc[0]
)
)
targets_proc.append(target_enc[0])
target_inds = np.array(self.tokenizer.convert_tokens_to_ids(targets_proc))
for i in range(batch_size):
input_ids = inputs["input_ids"][i]
result = []
if self.framework == "tf":
masked_index = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index)
logits = outputs[i, masked_index.item(), :]
probs = tf.nn.softmax(logits)
if targets is None:
topk = tf.math.top_k(probs, k=self.topk)
values, predictions = topk.values.numpy(), topk.indices.numpy()
else:
values = tf.gather_nd(probs, tf.reshape(target_inds, (-1, 1)))
sort_inds = tf.reverse(tf.argsort(values), [0])
values = tf.gather_nd(values, tf.reshape(sort_inds, (-1, 1))).numpy()
predictions = target_inds[sort_inds.numpy()]
else:
masked_index = torch.nonzero(input_ids == self.tokenizer.mask_token_id, as_tuple=False)
# Fill mask pipeline supports only one ${mask_token} per sample
self.ensure_exactly_one_mask_token(masked_index.numpy())
logits = outputs[i, masked_index.item(), :]
probs = logits.softmax(dim=0)
if targets is None:
values, predictions = probs.topk(self.topk)
else:
values = probs[..., target_inds]
sort_inds = list(reversed(values.argsort(dim=-1)))
values = values[..., sort_inds]
predictions = target_inds[sort_inds]
for v, p in zip(values.tolist(), predictions.tolist()):
tokens = input_ids.numpy()
tokens[masked_index] = p
# Filter padding out:
tokens = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
result.append(
{
"sequence": self.tokenizer.decode(tokens),
"score": v,
"token": p,
"token_str": self.tokenizer.convert_ids_to_tokens(p),
}
)
# Append
results += [result]
if len(results) == 1:
return results[0]
return results
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
ignore_labels (:obj:`List[str]`, defaults to :obj:`["O"]`):
A list of labels to ignore.
grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to group the tokens corresponding to the same entity together in the predictions or not.
""",
)
class TokenClassificationPipeline(Pipeline):
"""
Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the
`named entity recognition examples <../task_summary.html#named-entity-recognition>`__ for more information.
This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location
or miscellaneous).
The models that this pipeline can use are models that have been fine-tuned on a token classification task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=token-classification>`__.
"""
default_input_names = "sequences"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
args_parser: ArgumentHandler = None,
device: int = -1,
binary_output: bool = False,
ignore_labels=["O"],
task: str = "",
grouped_entities: bool = False,
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=args_parser,
device=device,
binary_output=binary_output,
task=task,
)
self.check_model_type(
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
)
self._basic_tokenizer = BasicTokenizer(do_lower_case=False)
self.ignore_labels = ignore_labels
self.grouped_entities = grouped_entities
def __call__(self, *args, **kwargs):
"""
Classify each token of the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
One or several texts (or one list of texts) for token classification.
Return:
A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in
the corresponding input, or each entity if this pipeline was instantiated with
:obj:`grouped_entities=True`) with the following keys:
- **word** (:obj:`str`) -- The token/word classified.
- **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`.
- **entity** (:obj:`str`) -- The entity predicted for that token/word.
- **index** (:obj:`int`, only present when ``self.grouped_entities=False``) -- The index of the
corresponding token in the sentence.
"""
inputs = self._args_parser(*args, **kwargs)
answers = []
for sentence in inputs:
# Manage correct placement of the tensors
with self.device_placement():
tokens = self.tokenizer(
sentence,
return_attention_mask=False,
return_tensors=self.framework,
truncation=True,
)
# Forward
if self.framework == "tf":
entities = self.model(tokens.data)[0][0].numpy()
input_ids = tokens["input_ids"].numpy()[0]
else:
with torch.no_grad():
tokens = self.ensure_tensor_on_device(**tokens)
entities = self.model(**tokens)[0][0].cpu().numpy()
input_ids = tokens["input_ids"].cpu().numpy()[0]
score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True)
labels_idx = score.argmax(axis=-1)
entities = []
# Filter to labels not in `self.ignore_labels`
filtered_labels_idx = [
(idx, label_idx)
for idx, label_idx in enumerate(labels_idx)
if self.model.config.id2label[label_idx] not in self.ignore_labels
]
for idx, label_idx in filtered_labels_idx:
entity = {
"word": self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])),
"score": score[idx][label_idx].item(),
"entity": self.model.config.id2label[label_idx],
"index": idx,
}
entities += [entity]
# Append grouped entities
if self.grouped_entities:
answers += [self.group_entities(entities)]
# Append ungrouped entities
else:
answers += [entities]
if len(answers) == 1:
return answers[0]
return answers
def group_sub_entities(self, entities: List[dict]) -> dict:
"""
Group together the adjacent tokens with the same entity predicted.
Args:
entities (:obj:`dict`): The entities predicted by the pipeline.
"""
# Get the first entity in the entity group
entity = entities[0]["entity"]
scores = np.mean([entity["score"] for entity in entities])
tokens = [entity["word"] for entity in entities]
entity_group = {
"entity_group": entity,
"score": np.mean(scores),
"word": self.tokenizer.convert_tokens_to_string(tokens),
}
return entity_group
def group_entities(self, entities: List[dict]) -> List[dict]:
"""
Find and group together the adjacent tokens with the same entity predicted.
Args:
entities (:obj:`dict`): The entities predicted by the pipeline.
"""
entity_groups = []
entity_group_disagg = []
if entities:
last_idx = entities[-1]["index"]
for entity in entities:
is_last_idx = entity["index"] == last_idx
if not entity_group_disagg:
entity_group_disagg += [entity]
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
continue
# If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group
# The split is meant to account for the "B" and "I" suffixes
if (
entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1]
and entity["index"] == entity_group_disagg[-1]["index"] + 1
):
entity_group_disagg += [entity]
# Group the entities at the last entity
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
# If the current entity is different from the previous entity, aggregate the disaggregated entity group
else:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
entity_group_disagg = [entity]
# If it's the last entity, add it to the entity groups
if is_last_idx:
entity_groups += [self.group_sub_entities(entity_group_disagg)]
return entity_groups
NerPipeline = TokenClassificationPipeline
class QuestionAnsweringArgumentHandler(ArgumentHandler):
"""
QuestionAnsweringPipeline requires the user to provide multiple arguments (i.e. question & context) to be mapped
to internal :class:`~transformers.SquadExample`.
QuestionAnsweringArgumentHandler manages all the possible to create a :class:`~transformers.SquadExample` from
the command-line supplied arguments.
"""
def __call__(self, *args, **kwargs):
# Position args, handling is sensibly the same as X and data, so forwarding to avoid duplicating
if args is not None and len(args) > 0:
if len(args) == 1:
kwargs["X"] = args[0]
else:
kwargs["X"] = list(args)
# Generic compatibility with sklearn and Keras
# Batched data
if "X" in kwargs or "data" in kwargs:
inputs = kwargs["X"] if "X" in kwargs else kwargs["data"]
if isinstance(inputs, dict):
inputs = [inputs]
else:
# Copy to avoid overriding arguments
inputs = [i for i in inputs]
for i, item in enumerate(inputs):
if isinstance(item, dict):
if any(k not in item for k in ["question", "context"]):
raise KeyError("You need to provide a dictionary with keys {question:..., context:...}")
inputs[i] = QuestionAnsweringPipeline.create_sample(**item)
elif not isinstance(item, SquadExample):
raise ValueError(
"{} argument needs to be of type (list[SquadExample | dict], SquadExample, dict)".format(
"X" if "X" in kwargs else "data"
)
)
# Tabular input
elif "question" in kwargs and "context" in kwargs:
if isinstance(kwargs["question"], str):
kwargs["question"] = [kwargs["question"]]
if isinstance(kwargs["context"], str):
kwargs["context"] = [kwargs["context"]]
inputs = [
QuestionAnsweringPipeline.create_sample(q, c) for q, c in zip(kwargs["question"], kwargs["context"])
]
else:
raise ValueError("Unknown arguments {}".format(kwargs))
if not isinstance(inputs, list):
inputs = [inputs]
return inputs
@add_end_docstrings(PIPELINE_INIT_ARGS)
class QuestionAnsweringPipeline(Pipeline):
"""
Question Answering pipeline using any :obj:`ModelForQuestionAnswering`. See the
`question answering examples <../task_summary.html#question-answering>`__ for more information.
This question answering pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"question-answering"`.
The models that this pipeline can use are models that have been fine-tuned on a question answering task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=question-answering>`__.
"""
default_input_names = "question,context"
def __init__(
self,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
tokenizer: PreTrainedTokenizer,
modelcard: Optional[ModelCard] = None,
framework: Optional[str] = None,
device: int = -1,
task: str = "",
**kwargs
):
super().__init__(
model=model,
tokenizer=tokenizer,
modelcard=modelcard,
framework=framework,
args_parser=QuestionAnsweringArgumentHandler(),
device=device,
task=task,
**kwargs,
)
self.check_model_type(
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if self.framework == "tf" else MODEL_FOR_QUESTION_ANSWERING_MAPPING
)
@staticmethod
def create_sample(
question: Union[str, List[str]], context: Union[str, List[str]]
) -> Union[SquadExample, List[SquadExample]]:
"""
QuestionAnsweringPipeline leverages the :class:`~transformers.SquadExample` internally.
This helper method encapsulate all the logic for converting question(s) and context(s) to
:class:`~transformers.SquadExample`.
We currently support extractive question answering.
Arguments:
question (:obj:`str` or :obj:`List[str]`): The question(s) asked.
context (:obj:`str` or :obj:`List[str]`): The context(s) in which we will look for the answer.
Returns:
One or a list of :class:`~transformers.SquadExample`: The corresponding
:class:`~transformers.SquadExample` grouping question and context.
"""
if isinstance(question, list):
return [SquadExample(None, q, c, None, None, None) for q, c in zip(question, context)]
else:
return SquadExample(None, question, context, None, None, None)
def __call__(self, *args, **kwargs):
"""
Answer the question(s) given as inputs by using the context(s).
Args:
args (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`):
One or several :class:`~transformers.SquadExample` containing the question and context.
X (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
One or several :class:`~transformers.SquadExample` containing the question and context
(will be treated the same way as if passed as the first positional argument).
data (:class:`~transformers.SquadExample` or a list of :class:`~transformers.SquadExample`, `optional`):
One or several :class:`~transformers.SquadExample` containing the question and context
(will be treated the same way as if passed as the first positional argument).
question (:obj:`str` or :obj:`List[str]`):
One or several question(s) (must be used in conjunction with the :obj:`context` argument).
context (:obj:`str` or :obj:`List[str]`):
One or several context(s) associated with the qustion(s) (must be used in conjunction with the
:obj:`question` argument).
topk (:obj:`int`, `optional`, defaults to 1):
The number of answers to return (will be chosen by order of likelihood).
doc_stride (:obj:`int`, `optional`, defaults to 128):
If the context is too long to fit with the question for the model, it will be split in several chunks
with some overlap. This argument controls the size of that overlap.
max_answer_len (:obj:`int`, `optional`, defaults to 15):
The maximum length of predicted answers (e.g., only answers with a shorter length are considered).
max_seq_len (:obj:`int`, `optional`, defaults to 384):
The maximum length of the total sentence (context + question) after tokenization. The context will be
split in several chunks (using :obj:`doc_stride`) if needed.
max_question_len (:obj:`int`, `optional`, defaults to 64):
The maximum length of the question after tokenization. It will be truncated if needed.
handle_impossible_answer (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not we accept impossible as an answer.
Return:
A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **score** (:obj:`float`) -- The probability associated to the answer.
- **start** (:obj:`int`) -- The start index of the answer (in the tokenized version of the input).
- **end** (:obj:`int`) -- The end index of the answer (in the tokenized version of the input).
- **answer** (:obj:`str`) -- The answer to the question.
"""
# Set defaults values
kwargs.setdefault("topk", 1)
kwargs.setdefault("doc_stride", 128)
kwargs.setdefault("max_answer_len", 15)
kwargs.setdefault("max_seq_len", 384)
kwargs.setdefault("max_question_len", 64)
kwargs.setdefault("handle_impossible_answer", False)
if kwargs["topk"] < 1:
raise ValueError("topk parameter should be >= 1 (got {})".format(kwargs["topk"]))
if kwargs["max_answer_len"] < 1:
raise ValueError("max_answer_len parameter should be >= 1 (got {})".format(kwargs["max_answer_len"]))
# Convert inputs to features
examples = self._args_parser(*args, **kwargs)
features_list = [
squad_convert_examples_to_features(
examples=[example],
tokenizer=self.tokenizer,
max_seq_length=kwargs["max_seq_len"],
doc_stride=kwargs["doc_stride"],
max_query_length=kwargs["max_question_len"],
padding_strategy=PaddingStrategy.MAX_LENGTH.value,
is_training=False,
tqdm_enabled=False,
)
for example in examples
]
all_answers = []
for features, example in zip(features_list, examples):
model_input_names = self.tokenizer.model_input_names + ["input_ids"]
fw_args = {k: [feature.__dict__[k] for feature in features] for k in model_input_names}
# Manage tensor allocation on correct device
with self.device_placement():
if self.framework == "tf":
fw_args = {k: tf.constant(v) for (k, v) in fw_args.items()}
start, end = self.model(fw_args)[:2]
start, end = start.numpy(), end.numpy()
else:
with torch.no_grad():
# Retrieve the score for the context tokens only (removing question tokens)
fw_args = {k: torch.tensor(v, device=self.device) for (k, v) in fw_args.items()}
start, end = self.model(**fw_args)[:2]
start, end = start.cpu().numpy(), end.cpu().numpy()
min_null_score = 1000000 # large and positive
answers = []
for (feature, start_, end_) in zip(features, start, end):
# Ensure padded tokens & question tokens cannot belong to the set of candidate answers.
undesired_tokens = np.abs(np.array(feature.p_mask) - 1) & feature.attention_mask
# Generate mask
undesired_tokens_mask = undesired_tokens == 0.0
# Make sure non-context indexes in the tensor cannot contribute to the softmax
start_ = np.where(undesired_tokens_mask, -10000.0, start_)
end_ = np.where(undesired_tokens_mask, -10000.0, end_)
# Normalize logits and spans to retrieve the answer
start_ = np.exp(start_ - np.log(np.sum(np.exp(start_), axis=-1, keepdims=True)))
end_ = np.exp(end_ - np.log(np.sum(np.exp(end_), axis=-1, keepdims=True)))
if kwargs["handle_impossible_answer"]:
min_null_score = min(min_null_score, (start_[0] * end_[0]).item())
# Mask CLS
start_[0] = end_[0] = 0.0
starts, ends, scores = self.decode(start_, end_, kwargs["topk"], kwargs["max_answer_len"])
char_to_word = np.array(example.char_to_word_offset)
# Convert the answer (tokens) back to the original text
answers += [
{
"score": score.item(),
"start": np.where(char_to_word == feature.token_to_orig_map[s])[0][0].item(),
"end": np.where(char_to_word == feature.token_to_orig_map[e])[0][-1].item(),
"answer": " ".join(
example.doc_tokens[feature.token_to_orig_map[s] : feature.token_to_orig_map[e] + 1]
),
}
for s, e, score in zip(starts, ends, scores)
]
if kwargs["handle_impossible_answer"]:
answers.append({"score": min_null_score, "start": 0, "end": 0, "answer": ""})
answers = sorted(answers, key=lambda x: x["score"], reverse=True)[: kwargs["topk"]]
all_answers += answers
if len(all_answers) == 1:
return all_answers[0]
return all_answers
def decode(self, start: np.ndarray, end: np.ndarray, topk: int, max_answer_len: int) -> Tuple:
"""
Take the output of any :obj:`ModelForQuestionAnswering` and will generate probalities for each span to be
the actual answer.
In addition, it filters out some unwanted/impossible cases like answer len being greater than
max_answer_len or answer end position being before the starting position.
The method supports output the k-best answer through the topk argument.
Args:
start (:obj:`np.ndarray`): Individual start probabilities for each token.
end (:obj:`np.ndarray`): Individual end probabilities for each token.
topk (:obj:`int`): Indicates how many possible answer span(s) to extract from the model output.
max_answer_len (:obj:`int`): Maximum size of the answer to extract from the model's output.
"""
# Ensure we have batch axis
if start.ndim == 1:
start = start[None]
if end.ndim == 1:
end = end[None]
# Compute the score of each tuple(start, end) to be the real answer
outer = np.matmul(np.expand_dims(start, -1), np.expand_dims(end, 1))
# Remove candidate with end < start and end - start > max_answer_len
candidates = np.tril(np.triu(outer), max_answer_len - 1)
# Inspired by Chen & al. (https://github.com/facebookresearch/DrQA)
scores_flat = candidates.flatten()
if topk == 1:
idx_sort = [np.argmax(scores_flat)]
elif len(scores_flat) < topk:
idx_sort = np.argsort(-scores_flat)
else:
idx = np.argpartition(-scores_flat, topk)[0:topk]
idx_sort = idx[np.argsort(-scores_flat[idx])]
start, end = np.unravel_index(idx_sort, candidates.shape)[1:]
return start, end, candidates[0, start, end]
def span_to_answer(self, text: str, start: int, end: int) -> Dict[str, Union[str, int]]:
"""
When decoding from token probalities, this method maps token indexes to actual word in
the initial context.
Args:
text (:obj:`str`): The actual context to extract the answer from.
start (:obj:`int`): The answer starting token index.
end (:obj:`int`): The answer end token index.
Returns:
Dictionary like :obj:`{'answer': str, 'start': int, 'end': int}`
"""
words = []
token_idx = char_start_idx = char_end_idx = chars_idx = 0
for i, word in enumerate(text.split(" ")):
token = self.tokenizer.tokenize(word)
# Append words if they are in the span
if start <= token_idx <= end:
if token_idx == start:
char_start_idx = chars_idx
if token_idx == end:
char_end_idx = chars_idx + len(word)
words += [word]
# Stop if we went over the end of the answer
if token_idx > end:
break
# Append the subtokenization length to the running index
token_idx += len(token)
chars_idx += len(word) + 1
# Join text with spaces
return {
"answer": " ".join(words),
"start": max(0, char_start_idx),
"end": min(len(text), char_end_idx),
}
@add_end_docstrings(PIPELINE_INIT_ARGS)
class SummarizationPipeline(Pipeline):
"""
Summarize news articles and other documents.
This summarizing pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"GEC"`.
The models that this pipeline can use are models that have been fine-tuned on a GEC task,
which is currently, '`bart-large-cnn`', '`t5-small`', '`t5-base`', '`t5-large`', '`t5-3b`', '`t5-11b`'.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=summarization>`__.
Usage::
# use bart in pytorch
summarizer = pipeline("GEC")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
# use t5 in tf
summarizer = pipeline("GEC", model="t5-base", tokenizer="t5-base", framework="tf")
summarizer("Sam Shleifer writes the best docstring examples in the whole world.", min_length=5, max_length=20)
"""
def __init__(self, *args, **kwargs):
kwargs.update(task="GEC")
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *documents, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Summarize the text(s) given as inputs.
Args:
documents (`str` or :obj:`List[str]`):
One or several articles (or one list of articles) to summarize.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **summary_text** (:obj:`str`, present when ``return_text=True``) -- The summary of the corresponding
input.
- **summary_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the summary.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
assert len(documents) > 0, "Please provide a document to summarize"
if self.framework == "tf" and "BartForConditionalGeneration" in self.model.__class__.__name__:
raise NotImplementedError(
"Tensorflow is not yet supported for Bart. Please consider using T5, e.g. `t5-base`"
)
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(documents[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
documents = ([prefix + document for document in documents[0]],)
padding = True
elif isinstance(documents[0], str):
documents = (prefix + documents[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
documents[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*documents, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
min_length = generate_kwargs.get("min_length", self.model.config.min_length)
if input_length < min_length // 2:
logger.warning(
"Your min_length is set to {}, but you input_length is only {}. You might consider decreasing min_length manually, e.g. summarizer('...', min_length=10)".format(
min_length, input_length
)
)
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length < max_length:
logger.warning(
"Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
max_length, input_length
)
)
summaries = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for summary in summaries:
record = {}
if return_tensors:
record["summary_token_ids"] = summary
if return_text:
record["summary_text"] = self.tokenizer.decode(
summary,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
@add_end_docstrings(PIPELINE_INIT_ARGS)
class TranslationPipeline(Pipeline):
"""
Translates from one language to another.
This translation pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"translation_xx_to_yy"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=translation>`__.
Usage::
en_fr_translator = pipeline("translation_en_to_fr")
en_fr_translator("How old are you?")
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_WITH_LM_HEAD_MAPPING if self.framework == "tf" else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Translate the text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
Texts to be translated.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **translation_text** (:obj:`str`, present when ``return_text=True``) -- The translation.
- **translation_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the translation.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
prefix = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
args = ([prefix + text for text in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if input_length > 0.9 * max_length:
logger.warning(
"Your input_length: {} is bigger than 0.9 * max_length: {}. You might consider increasing your max_length manually, e.g. translator('...', max_length=400)".format(
input_length, max_length
)
)
translations = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for translation in translations:
record = {}
if return_tensors:
record["translation_token_ids"] = translation
if return_text:
record["translation_text"] = self.tokenizer.decode(
translation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
@add_end_docstrings(PIPELINE_INIT_ARGS)
class Text2TextGenerationPipeline(Pipeline):
"""
Pipeline for text to text generation using seq2seq models.
This Text2TextGenerationPipeline pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"text2text-generation"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=seq2seq>`__.
Usage::
text2text_generator = pipeline("text2text-generation")
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
)
def __call__(
self, *args, return_tensors=False, return_text=True, clean_up_tokenization_spaces=False, **generate_kwargs
):
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (:obj:`str` or :obj:`List[str]`):
Input text for the encoder.
return_tensors (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to include the tensors of predictions (as token indinces) in the outputs.
return_text (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Return:
A list or a list of list of :obj:`dict`: Each result comes as a dictionary with the
following keys:
- **generated_text** (:obj:`str`, present when ``return_text=True``) -- The generated text.
- **generated_token_ids** (:obj:`torch.Tensor` or :obj:`tf.Tensor`, present when ``return_tensors=True``)
-- The token ids of the generated text.
"""
assert return_tensors or return_text, "You must specify return_tensors=True or return_text=True"
if isinstance(args[0], list):
assert (
self.tokenizer.pad_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id when using a batch input"
padding = True
elif isinstance(args[0], str):
padding = False
else:
raise ValueError(
" `documents[0]`: {} have the wrong format. The should be either of type `str` or type `list`".format(
args[0]
)
)
with self.device_placement():
inputs = self._parse_and_tokenize(*args, padding=padding)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
generations = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
results = []
for generation in generations:
record = {}
if return_tensors:
record["generated_token_ids"] = generation
if return_text:
record["generated_text"] = self.tokenizer.decode(
generation,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
results.append(record)
return results
class Conversation:
"""
Utility class containing a conversation and its history. This class is meant to be used as an input to the
:class:`~transformers.ConversationalPipeline`. The conversation contains a number of utility function to manage the
addition of new user input and generated model responses. A conversation needs to contain an unprocessed user input
before being passed to the :class:`~transformers.ConversationalPipeline`. This user input is either created when
the class is instantiated, or by calling :obj:`conversional_pipeline.append_response("input")` after a conversation
turn.
Arguments:
text (:obj:`str`, `optional`):
The initial user input to start the conversation. If not provided, a user input needs to be provided
manually using the :meth:`~transformers.Conversation.add_user_input` method before the conversation can
begin.
conversation_id (:obj:`uuid.UUID`, `optional`):
Unique identifier for the conversation. If not provided, a random UUID4 id will be assigned to the
conversation.
Usage::
conversation = Conversation("Going to the movies tonight - any suggestions?")
# Steps usually performed by the model when generating a response:
# 1. Mark the user input as processed (moved to the history)
conversation.mark_processed()
# 2. Append a mode response
conversation.append_response("The Big lebowski.")
conversation.add_user_input("Is it good?")
"""
def __init__(self, text: str = None, conversation_id: UUID = None):
if not conversation_id:
conversation_id = uuid.uuid4()
self.uuid: UUID = conversation_id
self.past_user_inputs: List[str] = []
self.generated_responses: List[str] = []
self.history: List[int] = []
self.new_user_input: Optional[str] = text
def add_user_input(self, text: str, overwrite: bool = False):
"""
Add a user input to the conversation for the next round. This populates the internal :obj:`new_user_input`
field.
Args:
text (:obj:`str`): The user input for the next conversation round.
overwrite (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not existing and unprocessed user input should be overwritten when this function is called.
"""
if self.new_user_input:
if overwrite:
logger.warning(
'User input added while unprocessed input was existing: "{}" was overwritten with: "{}".'.format(
self.new_user_input, text
)
)
self.new_user_input = text
else:
logger.warning(
'User input added while unprocessed input was existing: "{}" new input ignored: "{}". '
"Set `overwrite` to True to overwrite unprocessed user input".format(self.new_user_input, text)
)
else:
self.new_user_input = text
def mark_processed(self):
"""
Mark the conversation as processed (moves the content of :obj:`new_user_input` to :obj:`past_user_inputs`) and
empties the :obj:`new_user_input` field.
"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input)
self.new_user_input = None
def append_response(self, response: str):
"""
Append a response to the list of generated responses.
Args:
response (:obj:`str`): The model generated response.
"""
self.generated_responses.append(response)
def set_history(self, history: List[int]):
"""
Updates the value of the history of the conversation. The history is represented by a list of :obj:`token_ids`.
The history is used by the model to generate responses based on the previous conversation turns.
Args:
history (:obj:`List[int]`): History of tokens provided and generated for this conversation.
"""
self.history = history
def __repr__(self):
"""
Generates a string representation of the conversation.
Return:
:obj:`str`:
Example:
Conversation id: 7d15686b-dc94-49f2-9c4b-c9eac6a1f114
user >> Going to the movies tonight - any suggestions?
bot >> The Big Lebowski
"""
output = "Conversation id: {} \n".format(self.uuid)
for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses):
output += "user >> {} \n".format(user_input)
output += "bot >> {} \n".format(generated_response)
if self.new_user_input is not None:
output += "user >> {} \n".format(self.new_user_input)
return output
@add_end_docstrings(
PIPELINE_INIT_ARGS,
r"""
min_length_for_response (:obj:`int`, `optional`, defaults to 32):
The minimum length (in number of tokens) for a response.
""",
)
class ConversationalPipeline(Pipeline):
"""
Multi-turn conversational pipeline.
This conversational pipeline can currently be loaded from :func:`~transformers.pipeline` using the following
task identifier: :obj:`"conversational"`.
The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task,
currently: `'microsoft/DialoGPT-small'`, `'microsoft/DialoGPT-medium'`, `'microsoft/DialoGPT-large'`.
See the up-to-date list of available models on
`huggingface.co/models <https://huggingface.co/models?filter=conversational>`__.
Usage::
conversational_pipeline = pipeline("conversational")
conversation_1 = Conversation("Going to the movies tonight - any suggestions?")
conversation_2 = Conversation("What's the last book you have read?")
conversational_pipeline([conversation_1, conversation_2])
conversation_1.add_user_input("Is it an action movie?")
conversation_2.add_user_input("What is the genre of this book?")
conversational_pipeline([conversation_1, conversation_2])
"""
def __init__(self, min_length_for_response=32, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.tokenizer.eos_token_id is not None, "DialoguePipeline tokenizer should have an EOS token set"
if self.tokenizer.pad_token_id is not None:
self.pad_token_id = self.tokenizer.pad_token_id
else:
self.pad_token_id = self.tokenizer.eos_token_id
self.min_length_for_response = min_length_for_response
def __call__(
self,
conversations: Union[Conversation, List[Conversation]],
clean_up_tokenization_spaces=True,
**generate_kwargs
):
r"""
Generate responses for the conversation(s) given as inputs.
Args:
conversations (a :class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`):
Conversations to generate responses for.
clean_up_tokenization_spaces (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to clean up the potential extra spaces in the text output.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate
method corresponding to your framework `here <./model.html#generative-models>`__).
Returns:
:class:`~transformers.Conversation` or a list of :class:`~transformers.Conversation`: Conversation(s) with
updated generated responses for those containing a new user input.
"""
# Input validation
if isinstance(conversations, list):
for conversation in conversations:
assert isinstance(
conversation, Conversation
), "DialoguePipeline expects a Conversation or list of Conversations as an input"
if conversation.new_user_input is None:
raise ValueError(
"Conversation with UUID {} does not contain new user input to process. "
"Add user inputs with the conversation's `add_user_input` method".format(
type(conversation.uuid)
)
)
assert (
self.tokenizer.pad_token_id is not None or self.tokenizer.eos_token_id is not None
), "Please make sure that the tokenizer has a pad_token_id or eos_token_id when using a batch input"
elif isinstance(conversations, Conversation):
conversations = [conversations]
else:
raise ValueError("DialoguePipeline expects a Conversation or list of Conversations as an input")
with self.device_placement():
inputs = self._parse_and_tokenize([conversation.new_user_input for conversation in conversations])
histories = [conversation.history for conversation in conversations]
max_length = generate_kwargs.get("max_length", self.model.config.max_length)
inputs = self._concat_inputs_history(inputs, histories, max_length)
if self.framework == "pt":
inputs = self.ensure_tensor_on_device(**inputs)
input_length = inputs["input_ids"].shape[-1]
elif self.framework == "tf":
input_length = tf.shape(inputs["input_ids"])[-1].numpy()
if input_length > 0.9 * max_length:
logger.warning(
"Longest conversation length: {} is bigger than 0.9 * max_length: {}. "
"You might consider trimming the early phase of the conversation".format(input_length, max_length)
)
generated_responses = self.model.generate(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
**generate_kwargs,
)
cleaned_history = self._clean_padding_history(generated_responses)
output = []
for conversation_index, conversation in enumerate(conversations):
conversation.mark_processed()
conversation.generated_responses.append(
self.tokenizer.decode(
cleaned_history[conversation_index][input_length:],
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
)
conversation.set_history(cleaned_history[conversation_index])
output.append(conversation)
if len(output) == 1:
return output[0]
else:
return output
def _parse_and_tokenize(self, *args, **kwargs):
"""
Parse arguments and tokenize, adding an EOS token at the end of the user input
"""
# Parse arguments
inputs = self._args_parser(*args, **kwargs)
inputs = self.tokenizer.batch_encode_plus(inputs, add_special_tokens=False, padding=False).get("input_ids", [])
for input in inputs:
input.append(self.tokenizer.eos_token_id)
return inputs
def _clean_padding_history(self, generated_tensor) -> List[List[int]]:
"""
Cleans the padding history. Padding may be generated in two places when multiple conversations are provided as
an input:
- at the end of the concatenated history and new user input, so that all input to the model have the same
length
- at the end of the generated response, as some responses will be longer than others
This method cleans up these padding token so that the history for each conversation is not impacted by the
batching process.
"""
outputs = []
for sequence in generated_tensor:
sequence_tokens = []
is_previous_pad = False
for token in sequence:
if token == self.pad_token_id:
if is_previous_pad:
continue
else:
is_previous_pad = True
else:
is_previous_pad = False
if self.framework == "pt":
sequence_tokens.append(token.item())
else:
sequence_tokens.append(int(token.numpy()))
outputs.append(sequence_tokens)
return outputs
def _concat_inputs_history(self, inputs: List[List[int]], histories: List[Optional[List[int]]], max_length: int):
"""
Builds an input prepended by the history for this conversation, allowing multi-turn conversation with context
"""
outputs = []
for new_input, history in zip(inputs, histories):
if history is not None:
new_input = history + new_input
if len(new_input) > max_length - self.min_length_for_response:
cutoff_eos_index = 0
while len(new_input) - cutoff_eos_index > max_length - self.min_length_for_response:
if cutoff_eos_index >= len(new_input):
break
cutoff_eos_index = new_input[cutoff_eos_index:].index(self.tokenizer.eos_token_id)
if cutoff_eos_index == 0 or cutoff_eos_index == len(new_input) - 1:
break
else:
new_input = new_input[cutoff_eos_index + 1 :]
outputs.append(new_input)
max_len = max([len(item) for item in outputs])
outputs = [output + [self.pad_token_id] * (max_len - len(output)) for output in outputs]
outputs = BatchEncoding(
{"input_ids": outputs, "attention_mask": [[1] * len(outputs)]},
tensor_type=self.framework,
)
return outputs
# Register all the supported tasks here
SUPPORTED_TASKS = {
"feature-extraction": {
"impl": FeatureExtractionPipeline,
"tf": TFAutoModel if is_tf_available() else None,
"pt": AutoModel if is_torch_available() else None,
"default": {"model": {"pt": "distilbert-base-cased", "tf": "distilbert-base-cased"}},
},
"sentiment-analysis": {
"impl": TextClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "distilbert-base-uncased-finetuned-sst-2-english",
"tf": "distilbert-base-uncased-finetuned-sst-2-english",
},
},
},
"ner": {
"impl": TokenClassificationPipeline,
"tf": TFAutoModelForTokenClassification if is_tf_available() else None,
"pt": AutoModelForTokenClassification if is_torch_available() else None,
"default": {
"model": {
"pt": "dbmdz/bert-large-cased-finetuned-conll03-english",
"tf": "dbmdz/bert-large-cased-finetuned-conll03-english",
},
},
},
"question-answering": {
"impl": QuestionAnsweringPipeline,
"tf": TFAutoModelForQuestionAnswering if is_tf_available() else None,
"pt": AutoModelForQuestionAnswering if is_torch_available() else None,
"default": {
"model": {"pt": "distilbert-base-cased-distilled-squad", "tf": "distilbert-base-cased-distilled-squad"},
},
},
"fill-mask": {
"impl": FillMaskPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForMaskedLM if is_torch_available() else None,
"default": {"model": {"pt": "distilroberta-base", "tf": "distilroberta-base"}},
},
"GEC": {
"impl": SummarizationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "sshleifer/distilbart-cnn-12-6", "tf": "t5-small"}},
},
"translation_en_to_fr": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_de": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"translation_en_to_ro": {
"impl": TranslationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text2text-generation": {
"impl": Text2TextGenerationPipeline,
"tf": TFAutoModelForSeq2SeqLM if is_tf_available() else None,
"pt": AutoModelForSeq2SeqLM if is_torch_available() else None,
"default": {"model": {"pt": "t5-base", "tf": "t5-base"}},
},
"text-generation": {
"impl": TextGenerationPipeline,
"tf": TFAutoModelWithLMHead if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "gpt2", "tf": "gpt2"}},
},
"zero-shot-classification": {
"impl": ZeroShotClassificationPipeline,
"tf": TFAutoModelForSequenceClassification if is_tf_available() else None,
"pt": AutoModelForSequenceClassification if is_torch_available() else None,
"default": {
"model": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"config": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
"tokenizer": {"pt": "facebook/bart-large-mnli", "tf": "roberta-large-mnli"},
},
},
"conversational": {
"impl": ConversationalPipeline,
"tf": TFAutoModelForCausalLM if is_tf_available() else None,
"pt": AutoModelForCausalLM if is_torch_available() else None,
"default": {"model": {"pt": "microsoft/DialoGPT-medium", "tf": "microsoft/DialoGPT-medium"}},
},
}
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a :class:`~transformers.Pipeline`.
Pipelines are made of:
- A :doc:`tokenizer <tokenizer>` in charge of mapping raw textual input to token.
- A :doc:`model <model>` to make predictions from the inputs.
- Some (optional) post processing for enhancing model's output.
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- :obj:`"feature-extraction"`: will return a :class:`~transformers.FeatureExtractionPipeline`.
- :obj:`"sentiment-analysis"`: will return a :class:`~transformers.TextClassificationPipeline`.
- :obj:`"ner"`: will return a :class:`~transformers.TokenClassificationPipeline`.
- :obj:`"question-answering"`: will return a :class:`~transformers.QuestionAnsweringPipeline`.
- :obj:`"fill-mask"`: will return a :class:`~transformers.FillMaskPipeline`.
- :obj:`"GEC"`: will return a :class:`~transformers.SummarizationPipeline`.
- :obj:`"translation_xx_to_yy"`: will return a :class:`~transformers.TranslationPipeline`.
- :obj:`"text-generation"`: will return a :class:`~transformers.TextGenerationPipeline`.
- :obj:`"conversation"`: will return a :class:`~transformers.ConversationalPipeline`.
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).
If not provided, the default for the :obj:`task` will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If not provided, the default for the :obj:`task` will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If not provided, the default for the :obj:`task` will be loaded.
framework (:obj:`str`, `optional`):
The framework to use, either :obj:`"pt"` for PyTorch or :obj:`"tf"` for TensorFlow. The specified framework
must be installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to the framework of the :obj:`model`, or to PyTorch if no
model is provided.
kwargs:
Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
corresponding pipeline class for possible values).
Returns:
:class:`~transformers.Pipeline`: A suitable pipeline for the task.
Examples::
>>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
>>> # Sentiment analysis pipeline
>>> pipeline('sentiment-analysis')
>>> # Question answering pipeline, specifying the checkpoint identifier
>>> pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
>>> # Named entity recognition pipeline, passing in a specific model and tokenizer
>>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
>>> pipeline('ner', model=model, tokenizer=tokenizer)
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
framework = framework or get_framework(model)
targeted_task = SUPPORTED_TASKS[task]
task_class, model_class = targeted_task["impl"], targeted_task[framework]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"][framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
elif isinstance(config, str):
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task_class(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, task=task, **kwargs)
| 42.722769
| 183
| 0.610272
|
475b05c9b4712eac9d902eab4175ce7d5498d259
| 10,831
|
py
|
Python
|
rsyslog_exporter.py
|
pangeoradar/rsyslog_exporter_py
|
1008e243bac7f8499d9a68df312e163802455d84
|
[
"Apache-2.0"
] | 1
|
2020-10-02T07:54:27.000Z
|
2020-10-02T07:54:27.000Z
|
rsyslog_exporter.py
|
pangeoradar/rsyslog_exporter_py
|
1008e243bac7f8499d9a68df312e163802455d84
|
[
"Apache-2.0"
] | null | null | null |
rsyslog_exporter.py
|
pangeoradar/rsyslog_exporter_py
|
1008e243bac7f8499d9a68df312e163802455d84
|
[
"Apache-2.0"
] | 1
|
2021-09-07T09:48:50.000Z
|
2021-09-07T09:48:50.000Z
|
#!/usr/bin/env python3
"""
Export rsyslog counters as prometheus metrics (impstats via omprog)
Copyright (c) 2018, Yury Bushmelev <jay4mail@gmail.com>
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__version__ = '1.0'
import os
import re
import sys
import time
import json
import select
import argparse
import collections
from prometheus_client import start_http_server, Summary
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
PARSE_TIME = Summary('rsyslog_exporter_parsing_seconds', 'Time spent on parsing input')
COLLECT_TIME = Summary('rsyslog_exporter_collecting_seconds', 'Time spent on collecting metrics')
def dbg(msg):
""" Print [debug] message to stderr """
sys.stderr.write("%s\n" % msg)
sys.stderr.flush()
class RsyslogStats(object):
""" Class to parse and collect rsyslog stats """
metric_prefix = 'rsyslog'
def __init__(self):
self._current = collections.defaultdict(dict)
self._exported = collections.defaultdict(dict)
self.is_up = False
self._is_exported = True
self.parser_failures = 0
self.stats_count = 0
self.export_time = 0
self.labels = {}
def parser_failure(self):
self.parser_failures += 1
return self.parser_failures
def is_exported(self):
return self._is_exported
def export(self):
self._exported = self._current
self._current = collections.defaultdict(dict)
self._is_exported = True
self.export_time = time.time()
def counters(self):
return self._exported
def add(self, metric_name, name, value):
self._current[metric_name][name] = value
def dump(self, kind='c', prefix=''):
if kind == 'c':
metrics = self._current
else:
metrics = self._exported
dbg("%s====" % (prefix))
for k, v in metrics.items():
for kk, vv in v.items():
dbg("%s%s{label=\"%s\"}: %s" % (prefix, k, kk, vv))
dbg("%s...." % (prefix))
def _fix_metric_name(self, metric):
m = re.sub('[^_a-zA-Z0-9]', '_', metric.lower())
m = re.sub('_+', '_', m)
m = m.strip('_')
return m
@PARSE_TIME.time()
def parse(self, statline):
if not self.is_up:
self.is_up = True
try:
stats = json.loads(statline)
except ValueError:
return self.parser_failure()
if 'name' not in stats:
return self.parser_failure()
if 'origin' not in stats:
# Workaround for https://github.com/rsyslog/rsyslog/issues/1508
# 'omkafka' module stats output contains no 'origin' field
if stats['name'] == 'omkafka':
stats['origin'] = 'omkafka'
else:
return self.parser_failure()
origin = stats['origin']
name = stats['name']
metric_basename = self.metric_prefix + '_' + self._fix_metric_name(origin)
if name == 'global':
if not self._is_exported:
self.export()
# Special case for first line ("name":"global").
# There are dynamic stats fields reported in <name>.<field> format
for k, v in stats['values'].items():
n, c = k.split('.')
metric_name = metric_basename + '_' + self._fix_metric_name(c)
self.add(metric_name, n, v)
else:
for k, v in stats.items():
metric_name = metric_basename + '_' + self._fix_metric_name(k)
if k not in ['origin', 'name']:
if k != 'values':
self.add(metric_name, name, v)
else:
if origin == 'dynstats.bucket':
metric_name = self.metric_prefix + '_dynstats_' + self._fix_metric_name(name)
for kk, vv in v.items():
self.add(metric_name, kk, vv)
if self._is_exported:
self.stats_count = 0
self._is_exported = False
self.stats_count += 1
class RsyslogCollector(object):
""" Custom prometheus collector class """
def __init__(self, stats):
self._stats = stats
@COLLECT_TIME.time()
def collect(self):
custom_label_names = self._stats.labels.keys()
custom_label_values = self._stats.labels.values()
m = GaugeMetricFamily(
'rsyslog_exporter_version',
'Version of rsyslog_exporter running',
labels=['version'] + list(custom_label_names))
m.add_metric([__version__] + list(custom_label_values), 1.0)
yield m
m = GaugeMetricFamily(
'rsyslog_exporter_up',
'Is rsyslog_exporter up and connected?',
labels=custom_label_names)
m.add_metric(custom_label_values, float(self._stats.is_up is True))
yield m
m = GaugeMetricFamily(
'rsyslog_exporter_last_stats_processed',
'Amount of rsyslog stats processed last time',
labels=custom_label_names)
m.add_metric(custom_label_values, self._stats.stats_count)
yield m
m = CounterMetricFamily(
'rsyslog_exporter_parser_failures',
'Amount of rsyslog stats parsing failures',
labels=custom_label_names)
m.add_metric(custom_label_values, self._stats.parser_failures)
yield m
m = GaugeMetricFamily(
'rsyslog_exporter_last_export_timestamp',
'Last metrics export timestamp',
labels=custom_label_names)
m.add_metric(custom_label_values, self._stats.export_time)
yield m
if not self._stats.is_up:
return
label_names = ['name'] + list(custom_label_names)
for metric_name, v in self._stats.counters().items():
if metric_name == 'rsyslog_core_queue_size':
m = GaugeMetricFamily(metric_name, '', labels=label_names)
else:
m = CounterMetricFamily(metric_name, '', labels=label_names)
for name, value in v.items():
m.add_metric([name] + list(custom_label_values), value)
yield m
def parse_args():
""" Parse cmdline args """
parser = argparse.ArgumentParser(
description='Export rsyslog stats to prometheus'
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s ' + __version__,
)
parser.add_argument(
'-p', '--port',
help='Port to serve metrics request on',
type=int,
default=int(os.environ.get('RSYSLOG_EXPORTER_PORT', 9292)),
dest='port',
)
parser.add_argument(
'-e', '--export-after',
help='Export current stats if nothing is received during specified interval in seconds',
type=float,
default=float(os.environ.get('RSYSLOG_EXPORTER_EXPORT_AFTER', 5.0)),
dest='export_after',
)
parser.add_argument(
'-d', '--down-after',
help='Mark exporter as down if nothing is received during specified interval in seconds',
type=float,
default=float(os.environ.get('RSYSLOG_EXPORTER_DOWN_AFTER', 180.0)),
dest='down_after',
)
parser.add_argument(
'-L', '--label',
help='Add custom label to every rsyslog metric. Use multiple times to add multiple labels',
action='append',
default=os.environ.get('RSYSLOG_EXPORTER_LABELS', '').split(','),
dest='labels',
)
parser.add_argument(
'-b', '--bind-address',
help='listen address',
default='127.0.0.1',
dest='ip',
)
return parser.parse_args()
def parse_labels(key_values):
labels = {}
for kv in key_values:
try:
k, v = kv.split('=')
except ValueError:
continue
else:
labels[k] = v
return labels
def main():
""" Main procedure """
try:
args = parse_args()
if args.down_after <= args.export_after:
sys.stderr.write("Down timeout must be greater than export timeout!\n")
return 1
stats = RsyslogStats()
stats.labels = parse_labels(args.labels)
# Make stdin unbuffered
stdin_unbuf = os.fdopen(sys.stdin.fileno(), 'rb', 0)
sys.stdin = stdin_unbuf
# Start http server thread to expose metrics
start_http_server(args.port, addr=args.ip)
REGISTRY.register(RsyslogCollector(stats))
sleep_seconds = args.down_after
silent_seconds = 0
keep_running = True
while keep_running:
sleep_start = time.time()
if sys.stdin not in select.select([sys.stdin], [], [], sleep_seconds)[0]:
sleep_end = time.time()
slept_seconds = abs(sleep_end - sleep_start)
silent_seconds += slept_seconds
if not stats.is_exported() and silent_seconds >= args.export_after:
stats.export()
if stats.is_up and silent_seconds >= args.down_after:
stats.is_up = False
if not stats.is_up:
sleep_seconds = args.down_after
else:
if stats.is_exported():
sleep_seconds = args.down_after - slept_seconds
else:
sleep_seconds = args.export_after - slept_seconds
else:
silent_seconds = 0
sleep_seconds = args.export_after
while keep_running and sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
line = sys.stdin.readline()
if line:
json_start_idx = line.find(b'{')
json_end_idx = line.rfind(b'}')
stats.parse(line[json_start_idx:json_end_idx + 1])
else:
# Exit when EOF received on stdin
keep_running = False
except KeyboardInterrupt:
sys.stderr.write("Interrupted!\n")
return 0
if __name__ == '__main__':
sys.exit(main())
| 32.139466
| 105
| 0.58351
|
5d8a6621102fb2994ea86d0959be0a19ffae7ce1
| 2,592
|
py
|
Python
|
src/epc_exporter/collector/vppctl_show_interface.py
|
cisco-cx/epc_exporter
|
8c433631dcb8118e213f67020760a2d30b4876e0
|
[
"X11"
] | null | null | null |
src/epc_exporter/collector/vppctl_show_interface.py
|
cisco-cx/epc_exporter
|
8c433631dcb8118e213f67020760a2d30b4876e0
|
[
"X11"
] | null | null | null |
src/epc_exporter/collector/vppctl_show_interface.py
|
cisco-cx/epc_exporter
|
8c433631dcb8118e213f67020760a2d30b4876e0
|
[
"X11"
] | null | null | null |
"""
Collects vppctl "show interface" command and parses it
"""
from prometheus_client import REGISTRY
from prometheus_client.metrics_core import GaugeMetricFamily
from collector.abstract_command_collector import AbstractCommandCollector
from collector.utils import add_gauge_metrics
from device import AbstractDevice
FIELD_INTERFACE = 0
FIELD_STATE = 1
FIELD_MTU_L3 = 2
FIELD_MTU_IP4 = 3
FIELD_MTU_IP6 = 4
FIELD_MTU_MPLS = 5
FIELD_COUNTER_NAME = 6
FIELD_COUNTER_VALUE = 7
class VppctlShowInterfaceCollector(AbstractCommandCollector):
""" Collector for vppctl "show interface" command """
def __init__(self,
template_dir: str,
device: AbstractDevice,
registry=REGISTRY):
super().__init__(template_dir + "/vppctl_show_interface.template",
device, registry)
def collect(self):
"""
collect method collects the command output from device and
return the metrics
"""
self._device.enable_test_commands()
output = self._device.exec('vppctl "show interface"')
rows = self._parser.ParseText(output)
metrics = [
GaugeMetricFamily("epc_vppctl_interface_status",
"interface up or down",
labels=["interface"]),
GaugeMetricFamily("epc_vppctl_interface_mtu",
"MTU value",
labels=["interface", "protocol"]),
GaugeMetricFamily("epc_vppctl_interface_counter",
"interface counters and values",
labels=["interface", "name"]),
]
for row in rows:
interface = row[FIELD_INTERFACE]
add_gauge_metrics(metrics[0], [interface], 1 if
row[FIELD_STATE] == "up" else 0)
add_gauge_metrics(metrics[1], [interface, "l3"],
float(row[FIELD_MTU_L3]))
add_gauge_metrics(metrics[1], [interface, "ip4"],
float(row[FIELD_MTU_IP4]))
add_gauge_metrics(metrics[1], [interface, "ip6"],
float(row[FIELD_MTU_IP6]))
add_gauge_metrics(metrics[1], [interface, "mpls"],
float(row[FIELD_MTU_MPLS]))
for name, value in zip(row[FIELD_COUNTER_NAME],
row[FIELD_COUNTER_VALUE]):
add_gauge_metrics(metrics[2], [interface, name], float(value))
return metrics
| 37.565217
| 78
| 0.580247
|
1e9814bccefe4fd8b8fdd5ce610de619d0386b46
| 2,834
|
py
|
Python
|
src/ggrc/migrations/versions/20130709122154_201c3f33e44c_add_context_model.py
|
sriharshakappala/ggrc-core
|
7561ce27cd987d73468a44df5b6e2b7425f050ef
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-04-21T12:21:17.000Z
|
2019-04-21T12:21:17.000Z
|
src/ggrc/migrations/versions/20130709122154_201c3f33e44c_add_context_model.py
|
sriharshakappala/ggrc-core
|
7561ce27cd987d73468a44df5b6e2b7425f050ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/ggrc/migrations/versions/20130709122154_201c3f33e44c_add_context_model.py
|
sriharshakappala/ggrc-core
|
7561ce27cd987d73468a44df5b6e2b7425f050ef
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""Add context model
Revision ID: 201c3f33e44c
Revises: 26641df89c2c
Create Date: 2013-07-09 12:21:54.312795
"""
# revision identifiers, used by Alembic.
revision = '201c3f33e44c'
down_revision = '26641df89c2c'
from alembic import op
from sqlalchemy.sql import table, column
import sqlalchemy as sa
all_tables = [
'categorizations',
'categories',
'controls',
'control_assessments',
'control_controls',
'control_risks',
'control_sections',
'cycles',
'data_assets',
'directives',
'documents',
'facilities',
'helps',
'markets',
'meetings',
'object_documents',
'object_people',
'options',
'org_groups',
'pbc_lists',
'people',
'population_samples',
'products',
'programs',
'program_directives',
'projects',
'relationships',
'relationship_types',
'requests',
'responses',
'risks',
'risk_risky_attributes',
'risky_attributes',
'sections',
'systems',
'system_controls',
'system_systems',
'transactions',
'revisions',
'events',
]
def upgrade():
op.create_table('contexts',
sa.Column('id', sa.Integer(), nullable=False, primary_key=True),
sa.Column('name', sa.String(length=128), nullable=True),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('related_object_id', sa.Integer(), nullable=True),
sa.Column('related_object_type', sa.String(length=128), nullable=True),
sa.Column('modified_by_id', sa.Integer()),
sa.Column(
'created_at', sa.DateTime(), default=sa.text('current_timestamp')),
sa.Column(
'updated_at',
sa.DateTime(),
default=sa.text('current_timestamp'),
onupdate=sa.text('current_timestamp')),
sa.Column('context_id', sa.Integer(), default=None),
)
contexts_table = table('contexts',
column('id', sa.Integer),
column('name', sa.String),
column('description', sa.Text),
column('related_object_id', sa.Integer),
column('related_object_type', sa.String),
column('modified_by_id', sa.Integer),
column('created_at', sa.DateTime),
column('updated_at', sa.DateTime),
column('context_id', sa.Integer),
)
op.execute(contexts_table.insert().values(
name = 'Administration',
description = 'Context for Administrative resources.',
modified_by_id = 0,
context_id = 1,
))
for t in all_tables:
op.execute(
'UPDATE {table_name} SET context_id = NULL'.format(table_name=t))
op.create_foreign_key(
'fk_{0}_contexts'.format(t),
t,
'contexts',
['context_id'],
['id'],
)
def downgrade():
for table in all_tables:
op.drop_constraint(
'fk_{0}_contexts'.format(table), table, type_='foreignkey')
op.drop_table('contexts')
| 25.531532
| 75
| 0.63338
|
17f9c2f5f82ac8db458703a478f017d83a591c45
| 391
|
py
|
Python
|
crudcbv/crudcbv/wsgi.py
|
souluanf/django-essencial
|
95d4d6d85bbe590ae95c3b26fb9577b97c2f5e7c
|
[
"MIT"
] | null | null | null |
crudcbv/crudcbv/wsgi.py
|
souluanf/django-essencial
|
95d4d6d85bbe590ae95c3b26fb9577b97c2f5e7c
|
[
"MIT"
] | 33
|
2020-08-04T00:46:16.000Z
|
2022-03-12T00:45:43.000Z
|
crudcbv/crudcbv/wsgi.py
|
souluanf/django-essencial
|
95d4d6d85bbe590ae95c3b26fb9577b97c2f5e7c
|
[
"MIT"
] | null | null | null |
"""
WSGI config for crudcbv project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'crudcbv.settings')
application = get_wsgi_application()
| 23
| 78
| 0.785166
|
e0fc7f8373891ddb597fd7e131589ea5dc9ba6ea
| 6,402
|
py
|
Python
|
blscint/ne2001.py
|
bbrzycki/blscint
|
1a2b9d28b8eebf407a4bc0593b31390f39becf9b
|
[
"MIT"
] | null | null | null |
blscint/ne2001.py
|
bbrzycki/blscint
|
1a2b9d28b8eebf407a4bc0593b31390f39becf9b
|
[
"MIT"
] | null | null | null |
blscint/ne2001.py
|
bbrzycki/blscint
|
1a2b9d28b8eebf407a4bc0593b31390f39becf9b
|
[
"MIT"
] | null | null | null |
import os
import subprocess
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy import constants as const
import matplotlib.pyplot as plt
import tqdm
def to_galactic(ra, dec=None):
"""
Convert RA/Dec to galactic coordinates (l, b).
Parameters
----------
ra : str, float, or astropy.Quantity
Right ascension as a string or float in degrees, or a full string
that includes both RA and Dec
dec : str, float, or astropy.Quantity, optional
Declination as a string or float in degrees
Returns
-------
l, b : float
Galactic coordinates
"""
if dec is None:
assert isinstance(ra, str)
c = SkyCoord(ra, unit=(u.hourangle, u.deg))
else:
if isinstance(ra, str) and isinstance(dec, str):
c = SkyCoord(ra, dec, unit=(u.hourangle, u.deg))
elif type(ra) in [int, float] and type(dec) in [int, float]:
c = SkyCoord(ra, dec, unit=(u.deg, u.deg))
else:
c = SkyCoord(ra, dec)
gal = c.galactic
return gal.l.value, gal.b.value
def query_ne2001(l, b, d, field=None):
"""
Query NE2001 model for various parameters, as described in
Cordes & Lazio 2002.
Note that this returns an astropy Quantity; use the `.value` property
to access the underlying value only.
"""
current_path = os.path.abspath(os.path.dirname(__file__))
exec_path = os.path.join(current_path, 'NE2001/bin.NE2001/run_NE2001.pl')
cwd = os.getcwd()
try:
os.chdir(os.path.join(current_path, 'NE2001/bin.NE2001/'))
if field is None:
field = 'ALL'
output = subprocess.run(['./run_NE2001.pl',
str(l),
str(b),
str(d),
'-1',
field],
stdout=subprocess.PIPE).stdout.decode('utf-8')
except:
pass
finally:
os.chdir(cwd)
if field == 'ALL':
print(output)
return
# Get unit
unit = (output.split()[3].replace('pc-', 'pc.')
.replace('^{', '(')
.replace('}', ')'))
unit = u.Unit(unit)
val = float(output.split()[2])
return val * unit
def plot_profile(l, b, d=(1, 20), steps=100, field='SCINTIME'):
"""
Plot profile.
"""
d = np.linspace(d[0], d[1], steps)
p = np.empty(steps)
for i in tqdm.tqdm(range(steps)):
val = query_ne2001(l, b, d[i], field=field)
p[i] = val.value
unit = val.unit
plt.plot(d, p)
plt.xlabel('Distance (kpc)')
plt.ylabel(f'{field} ({unit})')
def plot_map(l=(-2, 2), b=(-2, 2), d=8, l_steps=5, b_steps=5, field='SCINTIME'):
"""
Plot 2D map of calculated field.
"""
l = np.linspace(l[0], l[1], l_steps)
dl = l[1] - l[0]
b = np.linspace(b[0], b[1], b_steps)
db = b[1] - b[0]
f_map = np.empty((b_steps, l_steps))
with tqdm.tqdm(total=l_steps * b_steps) as pbar:
pbar.set_description('Pointings')
for i in range(l_steps):
for j in range(b_steps):
val = query_ne2001(l[i], b[j], d, field=field)
f_map[b_steps - 1 - j, i] = val.value
unit = val.unit
pbar.update(1)
plt.imshow(f_map, interpolation='none',
extent=[l[0]-dl/2, l[-1]+dl/2, b[0]-db/2, b[-1]+db/2])
c = plt.colorbar()
plt.title(f'{field} ({unit})')
plt.xlabel('l')
plt.ylabel('b')
def get_standard_t_d(l, b, d):
"""
Use NE2001 to estimate scintillation time at 1 GHz and 1 km/s transverse velocity.
Parameters
----------
l : float
Galactic longitude
b : float
Galactic latitude
d : float
Distance in kpc
Returns
-------
t_d : float
Scintillation timescale in s
"""
return query_ne2001(l, b, d, field='SCINTIME')
def scale_t_d(t_d, f=1, v=100, regime='moderate'):
"""
Scale scintillation time by frequency and effective transverse velocity of
the diffraction pattern with respect to the observer. Changes exponential
scaling based on scattering regime, which is 'moderate' by default, or
'very_strong' (as in Cordes & Lazio 1991, Section 4.3).
Parameters
----------
t_d : float
Scintillation time (s) at 1 GHz and 100 km/s
f : float
Frequency in GHz
v : float
Transverse velocity in km/s
regime : str
String determining frequency scaling, can be 'moderate' or 'very_strong'
Returns
-------
t_d : float
Scintillation timescale in s
"""
if regime == 'very_strong':
f_exp = 1
else:
f_exp = 1.2
return t_d * (f / 1)**(f_exp) * (np.abs(v) / 100)**(-1)
def get_t_d(l, b, d, f=1, v=100, regime='moderate'):
"""
Use NE2001 to estimate scintillation time at a specified frequency and
effective transverse velocity of the diffraction pattern with respect to
the observer. Changes exponential scaling based on scattering regime, which
is 'moderate' by default, or 'very_strong' (as in Cordes & Lazio 1991, Section
4.3).
Parameters
----------
l : float
Galactic longitude
b : float
Galactic latitude
d : float
Distance in kpc
f : float
Frequency in GHz
v : float
Transverse velocity in km/s
regime : str
String determining frequency scaling, can be 'moderate' or 'very_strong'
Returns
-------
t_d : float
Scintillation timescale in s
"""
t_st = get_standard_t_d(l, b, d)
return scale_t_d(t_st, f, v, regime)
def get_fresnel(f, D, normalize=True):
"""
Get Fresnel scale. If normalize=True, use definition with 1/2pi in the sqrt.
Parameters
----------
f : float
Frequency in GHz
D : float
Distance in kpc
normalize : bool
Whether to scale by sqrt(1/2pi)
"""
wl = const.c / (f * u.GHz)
l_f = np.sqrt(wl * (D * u.kpc)).to(u.cm)
if normalize:
l_f = np.sqrt(l_f / (2 * np.pi))
return l_f
| 27.476395
| 86
| 0.548266
|
32ee56b5881744845ee9906b7326e74e573e9fdd
| 395
|
py
|
Python
|
backend/fff_28914/wsgi.py
|
crowdbotics-apps/fff-28914
|
34a1d252b080b35659afcdb5ea79db529551623d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/fff_28914/wsgi.py
|
crowdbotics-apps/fff-28914
|
34a1d252b080b35659afcdb5ea79db529551623d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/fff_28914/wsgi.py
|
crowdbotics-apps/fff-28914
|
34a1d252b080b35659afcdb5ea79db529551623d
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
WSGI config for fff_28914 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fff_28914.settings')
application = get_wsgi_application()
| 23.235294
| 78
| 0.787342
|
ca940ba53603683723851000f6832442d70ca641
| 9,163
|
py
|
Python
|
pytorch3dunet/unet3d/model.py
|
ciubecca/3dunet-cavity
|
cfcc827773b18a95d221ab86c1afc5e2f7c30ecb
|
[
"MIT"
] | null | null | null |
pytorch3dunet/unet3d/model.py
|
ciubecca/3dunet-cavity
|
cfcc827773b18a95d221ab86c1afc5e2f7c30ecb
|
[
"MIT"
] | null | null | null |
pytorch3dunet/unet3d/model.py
|
ciubecca/3dunet-cavity
|
cfcc827773b18a95d221ab86c1afc5e2f7c30ecb
|
[
"MIT"
] | null | null | null |
import importlib
import torch.nn as nn
import torch
from pytorch3dunet.unet3d.buildingblocks import DoubleConv, ExtResNetBlock, create_encoders, \
create_decoders
from pytorch3dunet.unet3d.utils import number_of_features_per_level
from pytorch3dunet.datasets.featurizer import BaseFeatureList
class Abstract3DUNet(nn.Module):
"""
Base class for standard and residual UNet.
Args:
in_channels (int): number of input channels
out_channels (int): number of output segmentation masks;
Note that that the of out_channels might correspond to either
different semantic classes or to different binary segmentation mask.
It's up to the user of the class to interpret the out_channels and
use the proper loss criterion during training (i.e. CrossEntropyLoss (multi-class)
or BCEWithLogitsLoss (two-class) respectively)
f_maps (int, tuple): number of feature maps at each level of the encoder; if it's an integer the number
of feature maps is given by the geometric progression: f_maps ^ k, k=1,2,3,4
final_sigmoid (bool): if True apply element-wise nn.Sigmoid after the
final 1x1 convolution, otherwise apply nn.Softmax. MUST be True if nn.BCELoss (two-class) is used
to train the model. MUST be False if nn.CrossEntropyLoss (multi-class) is used to train the model.
basic_module: basic model for the encoder/decoder (DoubleConv, ExtResNetBlock, ....)
layer_order (string): determines the order of layers
in `SingleConv` module. e.g. 'crg' stands for Conv3d+ReLU+GroupNorm3d.
See `SingleConv` for more info
num_groups (int): number of groups for the GroupNorm
num_levels (int): number of levels in the encoder/decoder path (applied only if f_maps is an int)
is_segmentation (bool): if True (semantic segmentation problem) Sigmoid/Softmax normalization is applied
after the final convolution; if False (regression problem) the normalization layer is skipped at the end
testing (bool): if True (testing mode) the `final_activation` (if present, i.e. `is_segmentation=true`)
will be applied as the last operation during the forward pass; if False the model is in training mode
and the `final_activation` (even if present) won't be applied; default: False
conv_kernel_size (int or tuple): size of the convolving kernel in the basic_module
pool_kernel_size (int or tuple): the size of the window
conv_padding (int or tuple): add zero-padding added to all three sides of the input
"""
def __init__(self, in_channels, out_channels, final_sigmoid, basic_module, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, testing=False,
conv_kernel_size=3, pool_kernel_size=2, conv_padding=1, **kwargs):
super(Abstract3DUNet, self).__init__()
self.testing = testing
if isinstance(f_maps, int):
f_maps = number_of_features_per_level(f_maps, num_levels=num_levels)
assert isinstance(f_maps, list) or isinstance(f_maps, tuple)
assert len(f_maps) > 1, "Required at least 2 levels in the U-Net"
# create encoder path
self.encoders = create_encoders(in_channels, f_maps, basic_module, conv_kernel_size, conv_padding, layer_order,
num_groups, pool_kernel_size)
# create decoder path
self.decoders = create_decoders(f_maps, basic_module, conv_kernel_size, conv_padding, layer_order, num_groups,
upsample=True)
# in the last layer a 1×1 convolution reduces the number of output
# channels to the number of labels
self.final_conv = nn.Conv3d(f_maps[0], out_channels, 1)
if is_segmentation:
# semantic segmentation problem
if final_sigmoid:
self.final_activation = nn.Sigmoid()
else:
self.final_activation = nn.Softmax(dim=1)
else:
# regression problem
self.final_activation = None
def forward(self, x):
# encoder part
encoders_features = []
for encoder in self.encoders:
x = encoder(x)
# reverse the encoder outputs to be aligned with the decoder
encoders_features.insert(0, x)
# remove the last encoder's output from the list
# !!remember: it's the 1st in the list
encoders_features = encoders_features[1:]
# decoder part
for decoder, encoder_features in zip(self.decoders, encoders_features):
# pass the output from the corresponding encoder and the output
# of the previous decoder
x = decoder(encoder_features, x)
x = self.final_conv(x)
# apply final_activation (i.e. Sigmoid or Softmax) only during prediction. During training the network outputs
# logits and it's up to the user to normalize it before visualising with tensorboard or computing validation metric
if self.testing and self.final_activation is not None:
x = self.final_activation(x)
return x
class LogisticRegression(torch.nn.Module):
def __init__(self, **kwargs):
super(LogisticRegression, self).__init__()
# Pixel wise
self.linear = nn.Linear(1, 1)
def forward(self, x):
xin = x.flatten()[:,None]
output = torch.sigmoid(self.linear(xin))
return output.reshape(x.shape)
class PixelWiseModel(nn.Module):
"""
Baseline class for pixelwise models
Args:
"""
def __init__(self, const, **kwargs):
super(PixelWiseModel, self).__init__()
self.const = const
def forward(self, x):
# encoder part
ret = torch.zeros_like(x)
ret[:] = self.const
return ret
class UNet3D(Abstract3DUNet):
"""
3DUnet model from
`"3D U-Net: Learning Dense Volumetric Segmentation from Sparse Annotation"
<https://arxiv.org/pdf/1606.06650.pdf>`.
Uses `DoubleConv` as a basic_module and nearest neighbor upsampling in the decoder
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=4, is_segmentation=True, conv_padding=1, **kwargs):
super(UNet3D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=DoubleConv,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_padding=conv_padding,
**kwargs)
class ResidualUNet3D(Abstract3DUNet):
"""
Residual 3DUnet model implementation based on https://arxiv.org/pdf/1706.00120.pdf.
Uses ExtResNetBlock as a basic building block, summation joining instead
of concatenation joining and transposed convolutions for upsampling (watch out for block artifacts).
Since the model effectively becomes a residual net, in theory it allows for deeper UNet.
"""
def __init__(self, in_channels, out_channels, final_sigmoid=True, f_maps=64, layer_order='gcr',
num_groups=8, num_levels=5, is_segmentation=True, conv_padding=1, **kwargs):
super(ResidualUNet3D, self).__init__(in_channels=in_channels,
out_channels=out_channels,
final_sigmoid=final_sigmoid,
basic_module=ExtResNetBlock,
f_maps=f_maps,
layer_order=layer_order,
num_groups=num_groups,
num_levels=num_levels,
is_segmentation=is_segmentation,
conv_padding=conv_padding,
**kwargs)
def get_model(features: BaseFeatureList, model_config):
def _model_class(class_name):
modules = ['pytorch3dunet.unet3d.model']
for module in modules:
m = importlib.import_module(module)
clazz = getattr(m, class_name, None)
if clazz is not None:
return clazz
model_class = _model_class(model_config['name'])
in_channels = features.num_features
return model_class(in_channels=in_channels, **model_config)
| 47.973822
| 124
| 0.610499
|
87777ab34e70326a6effe8c2d4e92b3dfb0e90af
| 22,747
|
py
|
Python
|
skrebate/relieff.py
|
benstear/scikit-rebate
|
43a9537f203b9c2ee125800810ac704e6bda3fb9
|
[
"MIT"
] | 2
|
2020-03-01T04:11:39.000Z
|
2020-03-01T04:12:27.000Z
|
skrebate/relieff.py
|
benstear/scikit-rebate
|
43a9537f203b9c2ee125800810ac704e6bda3fb9
|
[
"MIT"
] | null | null | null |
skrebate/relieff.py
|
benstear/scikit-rebate
|
43a9537f203b9c2ee125800810ac704e6bda3fb9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
scikit-rebate was primarily developed at the University of Pennsylvania by:
- Randal S. Olson (rso@randalolson.com)
- Pete Schmitt (pschmitt@upenn.edu)
- Ryan J. Urbanowicz (ryanurb@upenn.edu)
- Weixuan Fu (weixuanf@upenn.edu)
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function
import numpy as np
import time
import warnings
import sys
from sklearn.base import BaseEstimator
from sklearn.externals.joblib import Parallel, delayed
from .scoring_utils import get_row_missing, ReliefF_compute_scores
class ReliefF(BaseEstimator):
"""Feature selection using data-mined expert knowledge.
Based on the ReliefF algorithm as introduced in:
Igor et al. Overcoming the myopia of inductive learning
algorithms with RELIEFF (1997), Applied Intelligence, 7(1), p39-55"""
"""Note that ReliefF class establishes core functionality that is inherited by all other Relief-based algorithms.
Assumes: * There are no missing values in the label/outcome/dependent variable.
* For ReliefF, the setting of k is <= to the number of instances that have the least frequent class label
(binary and multiclass endpoint data. """
def __init__(self, n_features_to_select=10, n_neighbors=100, discrete_threshold=10, verbose=False, n_jobs=1):
"""Sets up ReliefF to perform feature selection. Note that an approximation of the original 'Relief'
algorithm may be run by setting 'n_features_to_select' to 1. Also note that the original Relief parameter 'm'
is not included in this software. 'm' specifies the number of random training instances out of 'n' (total
training instances) used to update feature scores. Since scores are most representative when m=n, all
available training instances are utilized in all Relief-based algorithm score updates here. If the user
wishes to utilize a smaller 'm' in Relief-based scoring, simply pass any of these algorithms a subset of the
original training dataset samples.
Parameters
----------
n_features_to_select: int (default: 10)
the number of top features (according to the relieff score) to
retain after feature selection is applied.
n_neighbors: int or float (default: 100)
The number of neighbors to consider when assigning feature
importance scores. If a float number is provided, that percentage of
training samples is used as the number of neighbors.
More neighbors results in more accurate scores, but takes longer.
discrete_threshold: int (default: 10)
Value used to determine if a feature is discrete or continuous.
If the number of unique levels in a feature is > discrete_threshold, then it is
considered continuous, or discrete otherwise.
verbose: bool (default: False)
If True, output timing of distance array and scoring
n_jobs: int (default: 1)
The number of cores to dedicate to computing the scores with joblib.
Assigning this parameter to -1 will dedicate as many cores as are available on your system.
We recommend setting this parameter to -1 to speed up the algorithm as much as possible.
"""
self.n_features_to_select = n_features_to_select
self.n_neighbors = n_neighbors
self.discrete_threshold = discrete_threshold
self.verbose = verbose
self.n_jobs = n_jobs
#=========================================================================#
def fit(self, X, y):
"""Scikit-learn required: Computes the feature importance scores from the training data.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
Copy of the ReliefF instance
"""
self._X = X # matrix of predictive variables ('independent variables')
self._y = y # vector of values for outcome variable ('dependent variable')
# Set up the properties for ReliefF -------------------------------------------------------------------------------------
self._datalen = len(self._X) # Number of training instances ('n')
""""Below: Handles special case where user requests that a proportion of training instances be neighbors for
ReliefF rather than a specified 'k' number of neighbors. Note that if k is specified, then k 'hits' and k
'misses' will be used to update feature scores. Thus total number of neighbors is 2k. If instead a proportion
is specified (say 0.1 out of 1000 instances) this represents the total number of neighbors (e.g. 100). In this
case, k would be set to 50 (i.e. 50 hits and 50 misses). """
if hasattr(self, 'n_neighbors') and type(self.n_neighbors) is float:
# Halve the number of neighbors because ReliefF uses n_neighbors matches and n_neighbors misses
self.n_neighbors = int(self.n_neighbors * self._datalen * 0.5)
# Number of unique outcome (label) values (used to determine outcome variable type)
self._label_list = list(set(self._y))
# Determine if label is discrete
discrete_label = (len(self._label_list) <= self.discrete_threshold)
# Identify label type (binary, multiclass, or continuous)
if discrete_label:
if len(self._label_list) == 2:
self._class_type = 'binary'
self.mcmap = 0
elif len(self._label_list) > 2:
self._class_type = 'multiclass'
self.mcmap = self._getMultiClassMap()
else:
raise ValueError('All labels are of the same class.')
else:
self._class_type = 'continuous'
self.mcmap = 0
# Training labels standard deviation -- only used if the training labels are continuous
self._labels_std = 0.
if len(self._label_list) > self.discrete_threshold:
self._labels_std = np.std(self._y, ddof=1)
self._num_attributes = len(self._X[0]) # Number of features in training data
# Number of missing data values in predictor variable matrix.
self._missing_data_count = np.isnan(self._X).sum()
"""Assign internal headers for the features (scikit-learn does not accept external headers from dataset):
The pre_normalize() function relies on the headers being ordered, e.g., X01, X02, etc.
If this is changed, then the sort in the pre_normalize() function needs to be adapted as well. """
xlen = len(self._X[0])
mxlen = len(str(xlen + 1))
self._headers = ['X{}'.format(str(i).zfill(mxlen)) for i in range(1, xlen + 1)]
start = time.time() # Runtime tracking
# Determine data types for all features/attributes in training data (i.e. discrete or continuous)
C = D = False
# Examines each feature and applies discrete_threshold to determine variable type.
self.attr = self._get_attribute_info()
for key in self.attr.keys():
if self.attr[key][0] == 'discrete':
D = True
if self.attr[key][0] == 'continuous':
C = True
# For downstream computational efficiency, determine if dataset is comprised of all discrete, all continuous, or a mix of discrete/continuous features.
if C and D:
self.data_type = 'mixed'
elif D and not C:
self.data_type = 'discrete'
elif C and not D:
self.data_type = 'continuous'
else:
raise ValueError('Invalid data type in data set.')
#--------------------------------------------------------------------------------------------------------------------
# Compute the distance array between all data points ----------------------------------------------------------------
# For downstream efficiency, separate features in dataset by type (i.e. discrete/continuous)
diffs, cidx, didx = self._dtype_array()
cdiffs = diffs[cidx] # max/min continuous value difference for continuous features.
xc = self._X[:, cidx] # Subset of continuous-valued feature data
xd = self._X[:, didx] # Subset of discrete-valued feature data
""" For efficiency, the distance array is computed more efficiently for data with no missing values.
This distance array will only be used to identify nearest neighbors. """
if self._missing_data_count > 0:
self._distance_array = self._distarray_missing(xc, xd, cdiffs)
else:
self._distance_array = self._distarray_no_missing(xc, xd)
if self.verbose:
elapsed = time.time() - start
print('Created distance array in {} seconds.'.format(elapsed))
print('Feature scoring under way ...')
start = time.time()
#--------------------------------------------------------------------------------------------------------------------
# Run remainder of algorithm (i.e. identification of 'neighbors' for each instance, and feature scoring).------------
# Stores feature importance scores for ReliefF or respective Relief-based algorithm.
self.feature_importances_ = self._run_algorithm()
# Delete the internal distance array because it is no longer needed
del self._distance_array
if self.verbose:
elapsed = time.time() - start
print('Completed scoring in {} seconds.'.format(elapsed))
# Compute indices of top features
self.top_features_ = np.argsort(self.feature_importances_)[::-1]
return self
#=========================================================================#
def transform(self, X):
"""Scikit-learn required: Reduces the feature set down to the top `n_features_to_select` features.
Parameters
----------
X: array-like {n_samples, n_features}
Feature matrix to perform feature selection on
Returns
-------
X_reduced: array-like {n_samples, n_features_to_select}
Reduced feature matrix
"""
if self._num_attributes < self.n_features_to_select:
raise ValueError('Number of features to select is larger than the number of features in the dataset.')
return X[:, self.top_features_[:self.n_features_to_select]]
#=========================================================================#
def fit_transform(self, X, y):
"""Scikit-learn required: Computes the feature importance scores from the training data, then reduces the feature set down to the top `n_features_to_select` features.
Parameters
----------
X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
y: array-like {n_samples}
Training labels
Returns
-------
X_reduced: array-like {n_samples, n_features_to_select}
Reduced feature matrix
"""
self.fit(X, y)
return self.transform(X)
######################### SUPPORTING FUNCTIONS ###########################
def _getMultiClassMap(self):
""" Relief algorithms handle the scoring updates a little differently for data with multiclass outcomes. In ReBATE we implement multiclass scoring in line with
the strategy described by Kononenko 1994 within the RELIEF-F variant which was suggested to outperform the RELIEF-E multiclass variant. This strategy weights
score updates derived from misses of different classes by the class frequency observed in the training data. 'The idea is that the algorithm should estimate the
ability of attributes to separate each pair of classes regardless of which two classes are closest to each other'. In this method we prepare for this normalization
by creating a class dictionary, and storing respective class frequencies. This is needed for ReliefF multiclass score update normalizations. """
mcmap = dict()
for i in range(self._datalen):
if(self._y[i] not in mcmap):
mcmap[self._y[i]] = 0
else:
mcmap[self._y[i]] += 1
for each in self._label_list:
mcmap[each] = mcmap[each]/float(self._datalen)
return mcmap
def _get_attribute_info(self):
""" Preprocess the training dataset to identify which features/attributes are discrete vs. continuous valued. Ignores missing values in this determination."""
attr = dict()
d = 0
limit = self.discrete_threshold
w = self._X.transpose()
for idx in range(len(w)):
h = self._headers[idx]
z = w[idx]
if self._missing_data_count > 0:
z = z[np.logical_not(np.isnan(z))] # Exclude any missing values from consideration
zlen = len(np.unique(z))
if zlen <= limit:
attr[h] = ('discrete', 0, 0, 0, 0)
d += 1
else:
mx = np.max(z)
mn = np.min(z)
sd = np.std(z)
attr[h] = ('continuous', mx, mn, mx - mn, sd)
# For each feature/attribute we store (type, max value, min value, max min difference, average, standard deviation) - the latter three values are set to zero if feature is discrete.
return attr
def _distarray_no_missing(self, xc, xd):
"""Distance array calculation for data with no missing values. The 'pdist() function outputs a condense distance array, and squareform() converts this vector-form
distance vector to a square-form, redundant distance matrix.
*This could be a target for saving memory in the future, by not needing to expand to the redundant square-form matrix. """
from scipy.spatial.distance import pdist, squareform
#------------------------------------------#
def pre_normalize(x):
"""Normalizes continuous features so they are in the same range (0 to 1)"""
idx = 0
# goes through all named features (doesn really need to) this method is only applied to continuous features
for i in sorted(self.attr.keys()):
if self.attr[i][0] == 'discrete':
continue
cmin = self.attr[i][2]
diff = self.attr[i][3]
x[:, idx] -= cmin
x[:, idx] /= diff
idx += 1
return x
#------------------------------------------#
if self.data_type == 'discrete': # discrete features only
return squareform(pdist(self._X, metric='hamming'))
elif self.data_type == 'mixed': # mix of discrete and continuous features
d_dist = squareform(pdist(xd, metric='hamming'))
# Cityblock is also known as Manhattan distance
c_dist = squareform(pdist(pre_normalize(xc), metric='cityblock'))
return np.add(d_dist, c_dist) / self._num_attributes
else: #continuous features only
#xc = pre_normalize(xc)
return squareform(pdist(pre_normalize(xc), metric='cityblock'))
#==================================================================#
def _dtype_array(self):
"""Return mask for discrete(0)/continuous(1) attributes and their indices. Return array of max/min diffs of attributes."""
attrtype = []
attrdiff = []
for key in self._headers:
if self.attr[key][0] == 'continuous':
attrtype.append(1)
else:
attrtype.append(0)
attrdiff.append(self.attr[key][3])
attrtype = np.array(attrtype)
cidx = np.where(attrtype == 1)[0]
didx = np.where(attrtype == 0)[0]
attrdiff = np.array(attrdiff)
return attrdiff, cidx, didx
#==================================================================#
def _distarray_missing(self, xc, xd, cdiffs):
"""Distance array calculation for data with missing values"""
cindices = []
dindices = []
# Get Boolean mask locating missing values for continuous and discrete features separately. These correspond to xc and xd respectively.
for i in range(self._datalen):
cindices.append(np.where(np.isnan(xc[i]))[0])
dindices.append(np.where(np.isnan(xd[i]))[0])
if self.n_jobs != 1:
dist_array = Parallel(n_jobs=self.n_jobs)(delayed(get_row_missing)(
xc, xd, cdiffs, index, cindices, dindices) for index in range(self._datalen))
else:
# For each instance calculate distance from all other instances (in non-redundant manner) (i.e. computes triangle, and puts zeros in for rest to form square).
dist_array = [get_row_missing(xc, xd, cdiffs, index, cindices, dindices)
for index in range(self._datalen)]
return np.array(dist_array)
#==================================================================#
############################# ReliefF ############################################
def _find_neighbors(self, inst):
""" Identify k nearest hits and k nearest misses for given instance. This is accomplished differently based on the type of endpoint (i.e. binary, multiclass, and continuous). """
# Make a vector of distances between target instance (inst) and all others
dist_vect = []
for j in range(self._datalen):
if inst != j:
locator = [inst, j]
if inst < j:
locator.reverse()
dist_vect.append(self._distance_array[locator[0]][locator[1]])
else:
# Ensures that target instance is never selected as neighbor.
dist_vect.append(sys.maxsize)
dist_vect = np.array(dist_vect)
# Identify neighbors-------------------------------------------------------
""" NN for Binary Endpoints: """
if self._class_type == 'binary':
nn_list = []
match_count = 0
miss_count = 0
for nn_index in np.argsort(dist_vect):
if self._y[inst] == self._y[nn_index]: # Hit neighbor identified
if match_count >= self.n_neighbors:
continue
nn_list.append(nn_index)
match_count += 1
else: # Miss neighbor identified
if miss_count >= self.n_neighbors:
continue
nn_list.append(nn_index)
miss_count += 1
if match_count >= self.n_neighbors and miss_count >= self.n_neighbors:
break
elif self._class_type == 'multiclass':
nn_list = []
match_count = 0
miss_count = dict.fromkeys(self._label_list, 0)
for nn_index in np.argsort(dist_vect):
if self._y[inst] == self._y[nn_index]: # Hit neighbor identified
if match_count >= self.n_neighbors:
continue
nn_list.append(nn_index)
match_count += 1
else:
for label in self._label_list:
if self._y[nn_index] == label:
if miss_count[label] >= self.n_neighbors:
continue
nn_list.append(nn_index)
miss_count[label] += 1
if match_count >= self.n_neighbors and all(v >= self.n_neighbors for v in miss_count.values()):
break
else:
nn_list = []
match_count = 0
miss_count = 0
for nn_index in np.argsort(dist_vect):
if abs(self._y[inst]-self._y[nn_index]) < self._labels_std: # Hit neighbor identified
if match_count >= self.n_neighbors:
continue
nn_list.append(nn_index)
match_count += 1
else: # Miss neighbor identified
if miss_count >= self.n_neighbors:
continue
nn_list.append(nn_index)
miss_count += 1
if match_count >= self.n_neighbors and miss_count >= self.n_neighbors:
break
return np.array(nn_list)
def _run_algorithm(self):
""" Runs nearest neighbor (NN) identification and feature scoring to yield ReliefF scores. """
# Find nearest neighbors
NNlist = map(self._find_neighbors, range(self._datalen))
# Feature scoring - using identified nearest neighbors
nan_entries = np.isnan(self._X) # boolean mask for missing data values
# Call the scoring method for the ReliefF algorithm
scores = np.sum(Parallel(n_jobs=self.n_jobs)(delayed(
ReliefF_compute_scores)(instance_num, self.attr, nan_entries, self._num_attributes, self.mcmap,
NN, self._headers, self._class_type, self._X, self._y, self._labels_std, self.data_type)
for instance_num, NN in zip(range(self._datalen), NNlist)), axis=0)
return np.array(scores)
| 48.192797
| 189
| 0.596254
|
c689b156e7da8035b67df3f949de85a2bff3c663
| 4,770
|
py
|
Python
|
denorm/join.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | 11
|
2021-03-29T14:27:48.000Z
|
2022-01-01T00:31:40.000Z
|
denorm/join.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | null | null | null |
denorm/join.py
|
rivethealth/denorm
|
c9b9070730e3cc7fbe78927d34db7ffa384aed42
|
[
"MIT"
] | null | null | null |
"""
Procedures:
* ID__refresh - Perform refresh
- When consistency is deferred
* ID__setup - Create the temporary tables
- When consistency is deferred
* ID__chg1__SOURCE - Process changes
* ID__chg2__SOURCE - Process changes
Tables:
* BASE (existing) - Table to watch
- Existing
- Triggers
* ID__del__SOURCE - Record deletes
* ID__ins__SOURCE - Record inserts
* ID__upd__SOURCE - Record updates
* TARGET (existing) - Table to populate
* ID__iterate__SOURCE - Queue changes for iteration
- When iteration is used
* ID__lock - Value lock
Temp tables:
* ID__key - Keys to update
- When consistency is deferred
* ID__refresh - Fire constraint trigger at end of transaction
- When consistency is deferred
- Triggers:
* join (deferred) - Perform refresh
"""
import dataclasses
import typing
from pg_sql import SqlId, sql_list
from .format import format
from .formats.join import (
JOIN_DATA_JSON_FORMAT,
JoinConfig,
JoinConsistency,
JoinJoinMode,
)
from .join_async import create_queue
from .join_change import create_change
from .join_common import JoinTarget, Key, Structure
from .join_defer import DeferredKeys, create_refresh_function, create_setup_function
from .join_key import KeyResolver, TargetRefresh
from .join_lock import create_lock_table
from .join_plain_target import JoinPlainTarget
from .join_refresh_function import (
create_refresh_function as create_table_refresh_function,
)
from .join_table_target import JoinTableTarget
from .resource import ResourceFactory
from .string import indent
@dataclasses.dataclass
class JoinIo:
config: ResourceFactory[typing.TextIO]
output: ResourceFactory[typing.TextIO]
def create_join(io: JoinIo):
schema = JOIN_DATA_JSON_FORMAT.load(io.config)
with io.output() as f:
for statement in _statements(schema):
print(f"{statement};\n", file=f)
def _target(config: JoinConfig) -> JoinTarget:
if config.target_table:
return JoinTableTarget(config.target_table, config.target_query)
else:
return JoinPlainTarget(config.target_query)
def _statements(config: JoinConfig):
structure = Structure(config.schema, config.id)
target = _target(config)
key = target.key()
if key is None:
definition = f"SELECT {sql_list(f'NULL::{column.type} AS {column.sql}' for column in config.key)}"
names = [column.name for column in config.key]
key = Key(definition=definition, names=names)
if config.lock:
yield from create_lock_table(
structure=structure, key=key, target=config.target_table
)
refresh_action = TargetRefresh(
key=key.names,
setup=config.setup,
structure=structure,
lock=config.lock,
target=target,
)
if config.consistency == JoinConsistency.DEFERRED:
yield from create_refresh_function(
id=config.id,
structure=structure,
refresh=refresh_action,
)
yield from create_setup_function(
structure=structure,
id=config.id,
target=config.target_table,
key=key,
)
for table_id, table in config.tables.items():
if table.join_mode != JoinJoinMode.ASYNC:
continue
resolver = KeyResolver(
action=refresh_action,
context=config.context,
key=key.names,
structure=structure,
table_id=table.join,
tables=config.tables,
)
yield from create_queue(
context=config.context,
id=config.id,
resolver=resolver,
structure=structure,
table_id=table_id,
tables=config.tables,
)
for table_id, table in config.tables.items():
if config.consistency == JoinConsistency.DEFERRED:
action = DeferredKeys(key=key.names, structure=structure)
elif config.consistency == JoinConsistency.IMMEDIATE:
action = refresh_action
resolver = KeyResolver(
action=action,
context=config.context,
key=key.names,
structure=structure,
table_id=table_id,
tables=config.tables,
)
if table.refresh_function:
yield from create_table_refresh_function(
resolver=resolver,
structure=structure,
table=table,
table_id=table_id,
)
if table.name is not None:
yield from create_change(
id=config.id,
resolver=resolver,
structure=structure,
table=table,
table_id=table_id,
)
| 28.058824
| 106
| 0.651363
|
e71f63f121b46b78c175cccd5d0d5e53185e4126
| 4,090
|
py
|
Python
|
game/content/ghplots/dd_combatmission.py
|
fmunoz-geo/gearhead-caramel
|
315835481d543420826439245be01460fe6dd81b
|
[
"Apache-2.0"
] | 74
|
2015-03-09T00:33:09.000Z
|
2022-02-25T20:28:27.000Z
|
game/content/ghplots/dd_combatmission.py
|
fmunoz-geo/gearhead-caramel
|
315835481d543420826439245be01460fe6dd81b
|
[
"Apache-2.0"
] | 108
|
2017-12-30T20:26:12.000Z
|
2021-01-16T12:37:00.000Z
|
game/content/ghplots/dd_combatmission.py
|
CartoonFan/gearhead-caramel
|
61995f382923695176ab7a65253f42e849e0c4d7
|
[
"Apache-2.0"
] | 61
|
2018-03-03T09:55:31.000Z
|
2022-03-18T17:28:33.000Z
|
import random
import game.content.gharchitecture
import game.content.ghterrain
import game.content.ghwaypoints
import gears
import pbge
from game import teams, ghdialogue
from game.content import adventureseed
from game.content.adventureseed import MAIN_OBJECTIVE_VALUE
from game.content.plotutility import CargoContainer
from game.ghdialogue import context
from pbge.dialogue import ContextTag, Offer, Reply
from pbge.plots import Plot
from . import missionbuilder
# Mission Objectives:
# - Defeat Enemy Commander
# - Destroy Structure
# - Defend Location
# - Capture Location
# - Rescue Survivors
# - Recover Cargo
# - Extract Team
# - Scout Location
# - Patrol Checkpoints
class CombatMissionSeed(missionbuilder.BuildAMissionSeed):
OBJECTIVE_TAGS = (missionbuilder.BAMO_DEFEAT_COMMANDER,missionbuilder.BAMO_RESPOND_TO_DISTRESS_CALL,missionbuilder.BAMO_EXTRACT_ALLIED_FORCES)
CRIME_TAGS = ("DZDCM_DO_CRIMES",)
def __init__(self, camp, name, metroscene, return_wp, enemy_faction=None, allied_faction=None, include_war_crimes=False, **kwargs):
# Determine 2 to 3 objectives for the mission.
if include_war_crimes:
objs = random.sample(self.OBJECTIVE_TAGS+self.CRIME_TAGS,2)
else:
objs = random.sample(self.OBJECTIVE_TAGS,2)
self.crimes_happened = False
super(CombatMissionSeed, self).__init__(camp, name, metroscene=metroscene, return_wp=return_wp, rank=max(camp.pc.renown + 1, 10),
objectives=objs, win_message="You have completed the mission.",
enemy_faction=enemy_faction, allied_faction=allied_faction, **kwargs)
# *************************
# *** DZDCM_DO_CRIMES ***
# *************************
class EliminateWitnesses( Plot ):
LABEL = "DZDCM_DO_CRIMES"
active = True
scope = "LOCALE"
def custom_init( self, nart ):
myscene = self.elements["LOCALE"]
self.register_element("ROOM",pbge.randmaps.rooms.FuzzyRoom(5,5),dident="LOCALE")
team2 = self.register_element("_eteam",teams.Team(enemies=(myscene.player_team,)),dident="ROOM")
myace = gears.selector.generate_ace(self.rank,self.elements.get("ALLIED_FACTION"),myscene.environment)
team2.contents.append(myace)
self.register_element("_commander",myace.get_pilot())
self.obj = adventureseed.MissionObjective("Defeat {}".format(myace.get_pilot()), MAIN_OBJECTIVE_VALUE)
self.adv.objectives.append(self.obj)
self.intro_ready = True
return True
def _eteam_ACTIVATETEAM(self,camp):
if self.intro_ready:
npc = self.elements["_commander"]
ghdialogue.start_conversation(camp,camp.pc,npc,cue=ghdialogue.ATTACK_STARTER)
self.intro_ready = False
self.adv.crimes_happened = True
def _commander_offers(self,camp):
mylist = list()
myfac = self.elements["ALLIED_FACTION"]
mylist.append(Offer("Hold your fire- I'm not an enemy! You were sent by {}, weren't you?! I know about their secret agenda, and they're trying to keep the word from getting out...".format(myfac),
context=ContextTag([context.ATTACK,])))
mylist.append(Offer("Very well, you've made it clear what side you're on. [CHALLENGE]",
context=ContextTag([context.CHALLENGE,])))
mylist.append(Offer("They've been taken over by extremists; {} is no longer taking orders from {}. I was ordered to attack a village, but refused... now they're after me. Be careful, they're going to come after you too.".format(myfac,myfac.parent_faction.name),
context=ContextTag([context.COMBAT_INFO,]), data={"subject":"Secret Agenda"}, effect=self._get_info))
return mylist
def _get_info(self,camp):
self.obj.failed = True
self.elements["_eteam"].retreat(camp)
camp.dole_xp(100)
def t_ENDCOMBAT(self,camp):
myteam = self.elements["_eteam"]
if len(myteam.get_members_in_play(camp)) < 1:
self.obj.win(camp,100)
| 43.052632
| 269
| 0.685086
|
b60aad2dc5606d775a649d751248abfca83646cc
| 2,267
|
py
|
Python
|
src/streamlink/plugins/teleclubzoom.py
|
sn4kebite/streamlink
|
054b760ce7e9f43451eed08e9f39de440c3e5add
|
[
"BSD-2-Clause"
] | 5
|
2017-03-21T19:43:17.000Z
|
2018-10-03T14:04:29.000Z
|
src/streamlink/plugins/teleclubzoom.py
|
sn4kebite/streamlink
|
054b760ce7e9f43451eed08e9f39de440c3e5add
|
[
"BSD-2-Clause"
] | 7
|
2016-10-13T23:29:31.000Z
|
2018-06-28T14:04:32.000Z
|
src/streamlink/plugins/teleclubzoom.py
|
sn4kebite/streamlink
|
054b760ce7e9f43451eed08e9f39de440c3e5add
|
[
"BSD-2-Clause"
] | 2
|
2021-02-09T14:29:21.000Z
|
2021-05-28T11:10:34.000Z
|
import logging
import re
from urllib.parse import urlparse
from streamlink.exceptions import PluginError
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.plugin.api.utils import itertags
from streamlink.stream import HLSStream
from streamlink.utils import update_scheme
log = logging.getLogger(__name__)
class TeleclubZoom(Plugin):
_url_re = re.compile(r'https?://(?:www\.)?teleclubzoom\.ch')
API_URL = 'https://{netloc}/webservice/http/rest/client/live/play/{id}'
PLAYLIST_URL = 'https://{netloc}/{app}/ngrp:{name}_all/playlist.m3u8'
_api_schema = validate.Schema(
{
'playStreamName': validate.text,
'cdnHost': validate.text,
'streamProperties': {
validate.optional('server'): validate.text,
validate.optional('name'): validate.text,
'application': validate.text,
}
}
)
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
iframe_url = None
page = self.session.http.get(self.url)
for a in itertags(page.text, 'a'):
if a.attributes.get('class') == 'play-live':
iframe_url = update_scheme(self.url, a.attributes['data-url'])
break
if not iframe_url:
raise PluginError('Could not find iframe.')
parsed = urlparse(iframe_url)
path_list = parsed.path.split('/')
if len(path_list) != 6:
# only support a known iframe url style,
# the video id might be on a different spot if the url changes
raise PluginError('unsupported iframe URL: {0}'.format(iframe_url))
res = self.session.http.get(
self.API_URL.format(netloc=parsed.netloc, id=path_list[4]))
data = self.session.http.json(res, schema=self._api_schema)
log.trace('{0!r}'.format(data))
url = self.PLAYLIST_URL.format(
app=data['streamProperties']['application'],
name=data['playStreamName'],
netloc=data['cdnHost'],
)
return HLSStream.parse_variant_playlist(self.session, url)
__plugin__ = TeleclubZoom
| 31.929577
| 79
| 0.628584
|
c4cd9b73e0550731f508851880869c9e375d93b5
| 4,540
|
py
|
Python
|
skrf/vi/sa.py
|
sdurant/scikit-rf
|
09161b879c1a52a1bc2e2df89f2656c97136c39b
|
[
"BSD-3-Clause"
] | null | null | null |
skrf/vi/sa.py
|
sdurant/scikit-rf
|
09161b879c1a52a1bc2e2df89f2656c97136c39b
|
[
"BSD-3-Clause"
] | null | null | null |
skrf/vi/sa.py
|
sdurant/scikit-rf
|
09161b879c1a52a1bc2e2df89f2656c97136c39b
|
[
"BSD-3-Clause"
] | null | null | null |
'''
.. module:: skrf.vi.sa
=================================================================
Spectrum Analyzers (:mod:`skrf.vi.sa`)
=================================================================
.. autosummary::
:toctree: generated/
HP8500
'''
import numpy as npy
import visa
from visa import GpibInstrument
from ..frequency import Frequency
from ..network import Network
from .. import mathFunctions as mf
class HP8500(GpibInstrument):
'''
HP8500's series Spectrum Analyzers
Examples
-----------
Get trace, and store in a Network object
>>> from skrf.vi.sa import HP
>>> my_sa = HP() # default address is 18
>>> trace = my_sa.get_ntwk()
Activate single sweep mode, get a trace, return to continuous sweep
>>> my_sa.single_sweep()
>>> my_sa.sweep()
>>> trace_a = my_sa.trace_a
>>> my_sa.cont_sweep()
'''
def __init__(self, address=18, *args, **kwargs):
'''
Initializer
Parameters
--------------
address : int
GPIB address
\*args, \*\*kwargs :
passed to ``visa.GpibInstrument.__init__``
'''
GpibInstrument.__init__(self,'GPIB::'+str(address),*args,**kwargs)
@property
def frequency(self):
'''
'''
f = Frequency(self.f_start, self.f_stop, len(self.trace_a),'hz')
f.unit = 'ghz'
return f
def get_ntwk(self, trace='a', goto_local=False, *args, **kwargs):
'''
Get a trace and return the data in a :class:`~skrf.network.Network` format
This will save instrument stage to reg 1, activate single sweep
mode, sweep, save data, then recal state from reg 1.
Returning the data in a the form of a
:class:`~skrf.network.Network` allows all the plotting methods
and IO functions of that class to be used. Not all the methods
of Network make sense for this type of data (scalar), but we
assume the user is knows this.
Parameters
------------
trace : ['a', 'b']
save trace 'a' or trace 'b'
goto_local : Boolean
Go to local mode after taking a sweep
\*args,\*\*kwargs :
passed to :func:`~skrf.network.Network.__init__`
'''
trace = trace.lower()
if trace not in ['a','b']:
raise ValueError('\'trace\' should be \'a\' or \'b\'')
self.save_state()
self.single_sweep()
self.sweep()
#TODO: ask if magnitude is in linear (LN) or log (LG) mode
if trace== 'a':
s = self.trace_a
elif trace == 'b':
s = self.trace_b
self.recall_state()
s = mf.db_2_magnitude(npy.array(s))
freq = self.frequency
n = Network(s=s, frequency=freq, z0=1, *args, **kwargs)
if goto_local:
self.goto_local()
return n
@property
def f_start(self):
'''
starting frequency
'''
return float(self.ask('fa?'))
@property
def f_stop(self):
'''
stopping frequency
'''
return float(self.ask('fb?'))
@property
def trace_a(self):
'''
trace 'a'
'''
return self.ask_for_values("tra?")
@property
def trace_b(self):
'''
trace 'b'
'''
return self.ask_for_values("trb?")
def sweep(self):
'''
trigger a sweep, return when done
'''
self.write('ts')
return self.ask('done?')
def single_sweep(self):
'''
Activate single sweep mode
'''
self.write('sngls')
def cont_sweep(self):
'''
Activate continuous sweep mode
'''
self.write('conts')
def goto_local(self):
'''
Switches from remote to local control
'''
pass#visa.vpp43.gpib_control_ren(self.vi,0)
def save_state(self, reg_n=1):
'''
Save current state to a given register
'''
self.write('saves %i'%reg_n)
def recall_state(self, reg_n=1):
'''
Recall current state to a given register
'''
self.write('rcls %i'%reg_n)
| 25.795455
| 83
| 0.488326
|
1f6d3e781bb35273713c7eeb3b1d94fc59095fdd
| 3,421
|
py
|
Python
|
models/mobilenetv2.py
|
cmuspencerlo/pytorch_cifar
|
d1bbdd73af54df60bbf15d4b672644e2098b857c
|
[
"MIT"
] | 1
|
2018-07-24T15:39:10.000Z
|
2018-07-24T15:39:10.000Z
|
models/mobilenetv2.py
|
cmuspencerlo/pytorch_cifar
|
d1bbdd73af54df60bbf15d4b672644e2098b857c
|
[
"MIT"
] | null | null | null |
models/mobilenetv2.py
|
cmuspencerlo/pytorch_cifar
|
d1bbdd73af54df60bbf15d4b672644e2098b857c
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
# Inverted part: in_planes < planes => in_planes
def __init__(self, in_planes, target_planes, expansion, stride):
super(Bottleneck, self).__init__()
self.stride = stride
planes = in_planes * expansion
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
# depthwise conv
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, groups=planes, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, target_planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(target_planes)
self.shortcut = nn.Sequential()
# Only shortcut when H and W are the same
if stride == 1 and in_planes != target_planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, target_planes, kernel_size=1, bias=False),
nn.BatchNorm2d(target_planes))
def forward(self, x):
out = F.relu6(self.bn1(self.conv1(x)))
out = F.relu6(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out = out + self.shortcut(x) if self.stride == 1 else out
return out
class MobileNetV2(nn.Module):
# (expansion, target_planes, num_blocks, stride)
cfg = [(1, 16, 1, 1),
(6, 24, 2, 1),
(6, 32, 3, 2),
(6, 64, 4, 2),
(6, 96, 3, 1),
(6, 160, 3, 2),
(6, 320, 1, 1)]
def __init__(self, num_classes=10):
super(MobileNetV2, self).__init__()
self.in_planes = 32
self.conv1 = nn.Conv2d(3, self.in_planes, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
def _make_layers():
layers = []
for (expansion, target_planes, num_blocks, stride) in self.cfg:
strides = [stride] + [1] * (num_blocks - 1)
for stride in strides:
layers.append(Bottleneck(self.in_planes, target_planes, expansion, stride))
self.in_planes = target_planes
return nn.Sequential(*layers)
self.layers = _make_layers()
self.conv2 = nn.Conv2d(self.in_planes, 1280, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(1280)
self.conv3 = nn.Conv2d(1280, num_classes, kernel_size=1, bias=True)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.relu(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 2)
# GENIUS!
out = out.view(out.size(0), -1, 1, 1)
# (batch, channel, 1, 1)
out = self.conv3(out)
out = out.view(out.size(0), -1)
return out
def test():
device = 'cuda'
net = MobileNetV2()
cnt = 0
for name, param in net.named_parameters():
if 'weight' in name and 'shortcut' not in name and 'bn' not in name:
cnt += 1
# print(name)
# print(cnt)
net = net.to(device)
y = net(torch.randn(1, 3, 32, 32).to(device))
print(y.size())
# net = torch.nn.DataParallel(net)
# reset params
for m in net.modules():
if 'Conv2d' in type(m).__name__:
print(type(m))
m.reset_parameters()
# test()
| 35.635417
| 114
| 0.576732
|
e07cf338fafc1fd3a45fb50f4a46821bf2fd745e
| 9,191
|
py
|
Python
|
bnn_hmc/utils/models.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
bnn_hmc/utils/models.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
bnn_hmc/utils/models.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""CNN haiku models."""
from typing import Tuple
import haiku as hk
import jax
import jax.numpy as jnp
import functools
Batch = Tuple[jnp.ndarray, jnp.ndarray]
_DEFAULT_BN_CONFIG = {
"decay_rate": 0.9,
"eps": 1e-5,
"create_scale": True,
"create_offset": True
}
def make_lenet5_fn(data_info):
num_classes = data_info["num_classes"]
def lenet_fn(batch, is_training):
"""Network inspired by LeNet-5."""
x, _ = batch
cnn = hk.Sequential([
hk.Conv2D(output_channels=6, kernel_shape=5, padding="SAME"),
jax.nn.relu,
hk.MaxPool(window_shape=3, strides=2, padding="VALID"),
hk.Conv2D(output_channels=16, kernel_shape=5, padding="SAME"),
jax.nn.relu,
hk.MaxPool(window_shape=3, strides=2, padding="VALID"),
hk.Conv2D(output_channels=120, kernel_shape=5, padding="SAME"),
jax.nn.relu,
hk.MaxPool(window_shape=3, strides=2, padding="VALID"),
hk.Flatten(),
hk.Linear(84),
jax.nn.relu,
hk.Linear(num_classes),
])
return cnn(x)
return lenet_fn
he_normal = hk.initializers.VarianceScaling(2.0, "fan_in", "truncated_normal")
class FeatureResponseNorm(hk.Module):
def __init__(self, eps=1e-6, name="frn"):
super().__init__(name=name)
self.eps = eps
def __call__(self, x, **unused_kwargs):
del unused_kwargs
par_shape = (1, 1, 1, x.shape[-1]) # [1,1,1,C]
tau = hk.get_parameter("tau", par_shape, x.dtype, init=jnp.zeros)
beta = hk.get_parameter("beta", par_shape, x.dtype, init=jnp.zeros)
gamma = hk.get_parameter("gamma", par_shape, x.dtype, init=jnp.ones)
nu2 = jnp.mean(jnp.square(x), axis=[1, 2], keepdims=True)
x = x * jax.lax.rsqrt(nu2 + self.eps)
y = gamma * x + beta
z = jnp.maximum(y, tau)
return z
def _resnet_layer(inputs,
num_filters,
normalization_layer,
kernel_size=3,
strides=1,
activation=lambda x: x,
use_bias=True,
is_training=True):
x = inputs
x = hk.Conv2D(
num_filters,
kernel_size,
stride=strides,
padding="same",
w_init=he_normal,
with_bias=use_bias)(
x)
x = normalization_layer()(x, is_training=is_training)
x = activation(x)
return x
def make_resnet_fn(
num_classes,
depth,
normalization_layer,
width = 16,
use_bias = True,
activation=jax.nn.relu,
):
num_res_blocks = (depth - 2) // 6
if (depth - 2) % 6 != 0:
raise ValueError("depth must be 6n+2 (e.g. 20, 32, 44).")
def forward(batch, is_training):
num_filters = width
x, _ = batch
x = _resnet_layer(
x,
num_filters=num_filters,
activation=activation,
use_bias=use_bias,
normalization_layer=normalization_layer)
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # downsample
y = _resnet_layer(
x,
num_filters=num_filters,
strides=strides,
activation=activation,
use_bias=use_bias,
is_training=is_training,
normalization_layer=normalization_layer)
y = _resnet_layer(
y,
num_filters=num_filters,
use_bias=use_bias,
is_training=is_training,
normalization_layer=normalization_layer)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match changed dims
x = _resnet_layer(
x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
use_bias=use_bias,
is_training=is_training,
normalization_layer=normalization_layer)
x = activation(x + y)
num_filters *= 2
x = hk.AvgPool((8, 8, 1), 8, "VALID")(x)
x = hk.Flatten()(x)
logits = hk.Linear(num_classes, w_init=he_normal)(x)
return logits
return forward
def make_resnet20_fn(data_info, activation=jax.nn.relu):
num_classes = data_info["num_classes"]
def normalization_layer():
hk.BatchNorm(**_DEFAULT_BN_CONFIG)
return make_resnet_fn(
num_classes,
depth=20,
normalization_layer=normalization_layer,
activation=activation)
def make_resnet20_frn_fn(data_info, activation=jax.nn.relu):
num_classes = data_info["num_classes"]
return make_resnet_fn(
num_classes,
depth=20,
normalization_layer=FeatureResponseNorm,
activation=activation)
def make_cnn_lstm(data_info,
max_features=20000,
embedding_size=128,
cell_size=128,
num_filters=64,
kernel_size=5,
pool_size=4,
use_swish=False,
use_maxpool=True):
"""CNN LSTM architecture for the IMDB dataset."""
num_classes = data_info["num_classes"]
def forward(batch, is_training):
x, _ = batch
batch_size = x.shape[0]
x = hk.Embed(vocab_size=max_features, embed_dim=embedding_size)(x)
x = hk.Conv1D(
output_channels=num_filters, kernel_shape=kernel_size, padding="VALID")(
x)
if use_swish:
x = jax.nn.swish(x)
else:
x = jax.nn.relu(x)
if use_maxpool:
x = hk.MaxPool(
window_shape=pool_size,
strides=pool_size,
padding="VALID",
channel_axis=2)(
x)
x = jnp.moveaxis(x, 1, 0)[:, :] #[T, B, F]
lstm_layer = hk.LSTM(hidden_size=cell_size)
init_state = lstm_layer.initial_state(batch_size)
x, state = hk.static_unroll(lstm_layer, x, init_state)
x = x[-1]
logits = hk.Linear(num_classes)(x)
return logits
return forward
def make_smooth_cnn_lstm(data_info,
max_features=20000,
embedding_size=128,
cell_size=128,
num_filters=64,
kernel_size=5,
pool_size=4):
num_classes = data_info["num_classes"]
return make_cnn_lstm(
num_classes,
max_features,
embedding_size,
cell_size,
num_filters,
kernel_size,
pool_size,
use_swish=True,
use_maxpool=False)
def make_mlp(layer_dims, output_dim):
def forward(batch, is_training):
x, _ = batch
x = hk.Flatten()(x)
for layer_dim in layer_dims:
x = hk.Linear(layer_dim)(x)
x = jax.nn.relu(x)
x = hk.Linear(output_dim)(x)
return x
return forward
def make_mlp_regression(data_info, output_dim=2, layer_dims=[100, 100]):
return make_mlp(layer_dims, output_dim)
def make_mlp_regression_small(data_info):
return make_mlp([50], 2)
def make_mlp_classification(data_info, layer_dims=[256, 256]):
num_classes = data_info["num_classes"]
return make_mlp(layer_dims, num_classes)
def make_logistic_regression(data_info):
num_classes = data_info["num_classes"]
return make_mlp([], num_classes)
def get_model(model_name, data_info, **kwargs):
_MODEL_FNS = {
"lenet":
make_lenet5_fn,
"resnet20":
make_resnet20_fn,
"resnet20_frn":
make_resnet20_frn_fn,
"resnet20_frn_swish":
functools.partial(make_resnet20_frn_fn, activation=jax.nn.swish),
"cnn_lstm":
make_cnn_lstm,
"smooth_cnn_lstm":
make_smooth_cnn_lstm,
"mlp_regression":
make_mlp_regression,
"mlp_regression_small":
make_mlp_regression_small,
"mlp_classification":
make_mlp_classification,
"logistic_regression":
make_logistic_regression,
}
net_fn = _MODEL_FNS[model_name](data_info, **kwargs)
net = hk.transform_with_state(net_fn)
return net.apply, net.init
| 28.811912
| 80
| 0.632249
|
a1f114d31cc00af22fd6b712a43bae5a0d112afd
| 609
|
py
|
Python
|
products/migrations/0002_order_owner.py
|
dariothornhill/product_api
|
fbbe07733c3cc0f9ef35b1d36c2183d03ef9e6a7
|
[
"MIT"
] | null | null | null |
products/migrations/0002_order_owner.py
|
dariothornhill/product_api
|
fbbe07733c3cc0f9ef35b1d36c2183d03ef9e6a7
|
[
"MIT"
] | null | null | null |
products/migrations/0002_order_owner.py
|
dariothornhill/product_api
|
fbbe07733c3cc0f9ef35b1d36c2183d03ef9e6a7
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.4 on 2022-03-07 12:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='accounts.customuser'),
preserve_default=False,
),
]
| 26.478261
| 118
| 0.660099
|
302327b5564fe98b79f13cdaa51c641249417002
| 18,470
|
py
|
Python
|
lemur/tests/test_certificates.py
|
bunjiboys/lemur
|
b5fd8020055d8af07bd6f82f4dd38246dca8d0c5
|
[
"Apache-2.0"
] | null | null | null |
lemur/tests/test_certificates.py
|
bunjiboys/lemur
|
b5fd8020055d8af07bd6f82f4dd38246dca8d0c5
|
[
"Apache-2.0"
] | 2
|
2020-04-03T09:28:20.000Z
|
2020-04-04T04:56:35.000Z
|
lemur/tests/test_certificates.py
|
bunjiboys/lemur
|
b5fd8020055d8af07bd6f82f4dd38246dca8d0c5
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals # at top of module
import json
import pytest
import datetime
import arrow
from freezegun import freeze_time
from cryptography import x509
from lemur.certificates.views import * # noqa
from lemur.tests.vectors import VALID_ADMIN_HEADER_TOKEN, VALID_USER_HEADER_TOKEN, CSR_STR, \
INTERNAL_VALID_LONG_STR, INTERNAL_VALID_SAN_STR, PRIVATE_KEY_STR
def test_get_or_increase_name(session, certificate):
from lemur.certificates.models import get_or_increase_name
assert get_or_increase_name('test name') == 'test-name'
assert get_or_increase_name(certificate.name) == '{0}-1'.format(certificate.name)
certificate.name = 'test-cert-11111111'
assert get_or_increase_name(certificate.name) == 'test-cert-11111111-1'
certificate.name = 'test-cert-11111111-1'
assert get_or_increase_name('test-cert-11111111-1') == 'test-cert-11111111-2'
def test_get_certificate_primitives(certificate):
from lemur.certificates.service import get_certificate_primitives
names = [x509.DNSName(x.name) for x in certificate.domains]
data = {
'common_name': certificate.cn,
'owner': certificate.owner,
'authority': certificate.authority,
'description': certificate.description,
'extensions': {
'sub_alt_names': x509.SubjectAlternativeName(names)
},
'destinations': [],
'roles': [],
'validity_end': arrow.get(2021, 5, 7),
'validity_start': arrow.get(2016, 10, 30),
'country': 'US',
'location': 'A place',
'organization': 'Example',
'organizational_unit': 'Operations',
'state': 'CA'
}
with freeze_time(datetime.date(year=2016, month=10, day=30)):
primitives = get_certificate_primitives(certificate)
assert len(primitives) == 23
def test_certificate_edit_schema(session):
from lemur.certificates.schemas import CertificateEditInputSchema
input_data = {'owner': 'bob@example.com'}
data, errors = CertificateEditInputSchema().load(input_data)
assert len(data['notifications']) == 3
def test_authority_key_identifier_schema():
from lemur.schemas import AuthorityKeyIdentifierSchema
input_data = {
'useKeyIdentifier': True,
'useAuthorityCert': True
}
data, errors = AuthorityKeyIdentifierSchema().load(input_data)
assert sorted(data) == sorted({
'use_key_identifier': True,
'use_authority_cert': True
})
assert not errors
data, errors = AuthorityKeyIdentifierSchema().dumps(data)
assert sorted(data) == sorted(json.dumps(input_data))
assert not errors
def test_certificate_info_access_schema():
from lemur.schemas import CertificateInfoAccessSchema
input_data = {'includeAIA': True}
data, errors = CertificateInfoAccessSchema().load(input_data)
assert not errors
assert data == {'include_aia': True}
data, errors = CertificateInfoAccessSchema().dump(data)
assert not errors
assert data == input_data
def test_subject_key_identifier_schema():
from lemur.schemas import SubjectKeyIdentifierSchema
input_data = {'includeSKI': True}
data, errors = SubjectKeyIdentifierSchema().load(input_data)
assert not errors
assert data == {'include_ski': True}
data, errors = SubjectKeyIdentifierSchema().dump(data)
assert not errors
assert data == input_data
def test_extension_schema(client):
from lemur.certificates.schemas import ExtensionSchema
input_data = {
'keyUsage': {
'useKeyEncipherment': True,
'useDigitalSignature': True
},
'extendedKeyUsage': {
'useServerAuthentication': True
},
'subjectKeyIdentifier': {
'includeSKI': True
}
}
data, errors = ExtensionSchema().load(input_data)
assert not errors
data, errors = ExtensionSchema().dump(data)
assert not errors
def test_certificate_input_schema(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': 'jim@example.com',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityEnd': arrow.get(2016, 11, 9).isoformat(),
'validityStart': arrow.get(2015, 11, 9).isoformat()
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
assert data['authority'].id == authority.id
# make sure the defaults got set
assert data['common_name'] == 'test.example.com'
assert data['country'] == 'US'
assert data['location'] == 'Los Gatos'
assert len(data.keys()) == 17
def test_certificate_input_with_extensions(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': 'jim@example.com',
'authority': {'id': authority.id},
'description': 'testtestest',
'extensions': {
'keyUsage': {
'digital_signature': True
},
'extendedKeyUsage': {
'useClientAuthentication': True,
'useServerAuthentication': True
},
'subjectKeyIdentifier': {
'includeSKI': True
},
'subAltNames': {
'names': [
{'nameType': 'DNSName', 'value': 'test.example.com'}
]
}
}
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_out_of_range_date(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': 'jim@example.com',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityYears': 100
}
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data['validityStart'] = '2017-04-30T00:12:34.513631'
data, errors = CertificateInputSchema().load(input_data)
assert errors
input_data['validityEnd'] = '2018-04-30T00:12:34.513631'
data, errors = CertificateInputSchema().load(input_data)
assert errors
def test_certificate_valid_years(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': 'jim@example.com',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityYears': 2
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_certificate_valid_dates(client, authority):
from lemur.certificates.schemas import CertificateInputSchema
input_data = {
'commonName': 'test.example.com',
'owner': 'jim@example.com',
'authority': {'id': authority.id},
'description': 'testtestest',
'validityStart': '2020-01-01T00:00:00',
'validityEnd': '2020-01-01T00:00:01'
}
data, errors = CertificateInputSchema().load(input_data)
assert not errors
def test_create_basic_csr(client):
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from lemur.certificates.service import create_csr
csr_config = dict(
common_name='example.com',
organization='Example, Inc.',
organizational_unit='Operations',
country='US',
state='CA',
location='A place',
owner='joe@example.com',
key_type='RSA2048',
extensions=dict(names=dict(sub_alt_names=x509.SubjectAlternativeName([x509.DNSName('test.example.com'), x509.DNSName('test2.example.com')])))
)
csr, pem = create_csr(**csr_config)
csr = x509.load_pem_x509_csr(csr.encode('utf-8'), default_backend())
for name in csr.subject:
assert name.value in csr_config.values()
def test_get_name_from_arn(client):
from lemur.certificates.service import get_name_from_arn
arn = 'arn:aws:iam::11111111:server-certificate/mycertificate'
assert get_name_from_arn(arn) == 'mycertificate'
def test_get_account_number(client):
from lemur.certificates.service import get_account_number
arn = 'arn:aws:iam::11111111:server-certificate/mycertificate'
assert get_account_number(arn) == '11111111'
def test_mint_certificate(issuer_plugin, authority):
from lemur.certificates.service import mint
cert_body, private_key, chain = mint(authority=authority, csr=CSR_STR)
assert cert_body == INTERNAL_VALID_LONG_STR, INTERNAL_VALID_SAN_STR
def test_create_certificate(issuer_plugin, authority, user):
from lemur.certificates.service import create
cert = create(authority=authority, csr=CSR_STR, owner='joe@example.com', creator=user['user'])
assert str(cert.not_after) == '2040-01-01T20:30:52+00:00'
assert str(cert.not_before) == '2015-06-26T20:30:52+00:00'
assert cert.issuer == 'Example'
assert cert.name == 'long.lived.com-Example-20150626-20400101'
cert = create(authority=authority, csr=CSR_STR, owner='joe@example.com', name='ACustomName1', creator=user['user'])
assert cert.name == 'ACustomName1'
def test_reissue_certificate(issuer_plugin, authority, certificate):
from lemur.certificates.service import reissue_certificate
new_cert = reissue_certificate(certificate)
assert new_cert
def test_create_csr():
from lemur.certificates.service import create_csr
csr, private_key = create_csr(owner='joe@example.com', common_name='ACommonName', organization='test', organizational_unit='Meters', country='US',
state='CA', location='Here', key_type='RSA2048')
assert csr
assert private_key
extensions = {'sub_alt_names': {'names': x509.SubjectAlternativeName([x509.DNSName('AnotherCommonName')])}}
csr, private_key = create_csr(owner='joe@example.com', common_name='ACommonName', organization='test', organizational_unit='Meters', country='US',
state='CA', location='Here', extensions=extensions, key_type='RSA2048')
assert csr
assert private_key
def test_import(user):
from lemur.certificates.service import import_certificate
cert = import_certificate(body=INTERNAL_VALID_LONG_STR, chain=INTERNAL_VALID_SAN_STR, private_key=PRIVATE_KEY_STR, creator=user['user'])
assert str(cert.not_after) == '2040-01-01T20:30:52+00:00'
assert str(cert.not_before) == '2015-06-26T20:30:52+00:00'
assert cert.issuer == 'Example'
assert cert.name == 'long.lived.com-Example-20150626-20400101-2'
cert = import_certificate(body=INTERNAL_VALID_LONG_STR, chain=INTERNAL_VALID_SAN_STR, private_key=PRIVATE_KEY_STR, owner='joe@example.com', name='ACustomName2', creator=user['user'])
assert cert.name == 'ACustomName2'
def test_upload(user):
from lemur.certificates.service import upload
cert = upload(body=INTERNAL_VALID_LONG_STR, chain=INTERNAL_VALID_SAN_STR, private_key=PRIVATE_KEY_STR, owner='joe@example.com', creator=user['user'])
assert str(cert.not_after) == '2040-01-01T20:30:52+00:00'
assert str(cert.not_before) == '2015-06-26T20:30:52+00:00'
assert cert.issuer == 'Example'
assert cert.name == 'long.lived.com-Example-20150626-20400101-3'
cert = upload(body=INTERNAL_VALID_LONG_STR, chain=INTERNAL_VALID_SAN_STR, private_key=PRIVATE_KEY_STR, owner='joe@example.com', name='ACustomName', creator=user['user'])
assert 'ACustomName' in cert.name
# verify upload with a private key as a str
def test_upload_private_key_str(user):
from lemur.certificates.service import upload
cert = upload(body=INTERNAL_VALID_LONG_STR, chain=INTERNAL_VALID_SAN_STR, private_key=PRIVATE_KEY_STR, owner='joe@example.com', name='ACustomName', creator=user['user'])
assert cert
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_certificate_get_private_key(client, token, status):
assert client.get(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_certificate_get(client, token, status):
assert client.get(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
def test_certificate_get_body(client):
response_body = client.get(api.url_for(Certificates, certificate_id=1), headers=VALID_USER_HEADER_TOKEN).json
assert response_body['serial'] == '1001'
assert response_body['serialHex'] == '3E9'
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_post(client, token, status):
assert client.post(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_certificate_put(client, token, status):
assert client.put(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
def test_certificate_put_with_data(client, certificate, issuer_plugin):
resp = client.put(api.url_for(Certificates, certificate_id=certificate.id), data=json.dumps({'owner': 'bob@example.com', 'description': 'test', 'notify': True}), headers=VALID_ADMIN_HEADER_TOKEN)
assert resp.status_code == 200
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_delete(client, token, status):
assert client.delete(api.url_for(Certificates, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_patch(client, token, status):
assert client.patch(api.url_for(Certificates, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
('', 401)
])
def test_certificates_get(client, token, status):
assert client.get(api.url_for(CertificatesList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_certificates_post(client, token, status):
assert client.post(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_put(client, token, status):
assert client.put(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_delete(client, token, status):
assert client.delete(api.url_for(CertificatesList), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_patch(client, token, status):
assert client.patch(api.url_for(CertificatesList), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_post(client, token, status):
assert client.post(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_put(client, token, status):
assert client.put(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_delete(client, token, status):
assert client.delete(api.url_for(CertificatePrivateKey, certificate_id=1), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificate_credentials_patch(client, token, status):
assert client.patch(api.url_for(CertificatePrivateKey, certificate_id=1), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_upload_get(client, token, status):
assert client.get(api.url_for(CertificatesUpload), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 400),
(VALID_ADMIN_HEADER_TOKEN, 400),
('', 401)
])
def test_certificates_upload_post(client, token, status):
assert client.post(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_upload_put(client, token, status):
assert client.put(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_upload_delete(client, token, status):
assert client.delete(api.url_for(CertificatesUpload), headers=token).status_code == status
@pytest.mark.parametrize("token,status", [
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
('', 405)
])
def test_certificates_upload_patch(client, token, status):
assert client.patch(api.url_for(CertificatesUpload), data={}, headers=token).status_code == status
| 34.330855
| 199
| 0.702761
|
26b6ea5d100e0a9ba888bab2fd97ebc25f28cf49
| 20
|
py
|
Python
|
noisefilter/noisefilter/__init__.py
|
itsMagondu/IoTNeuralNetworks
|
7ba2098866966a55541019028b16301cfcf94b6b
|
[
"MIT"
] | null | null | null |
noisefilter/noisefilter/__init__.py
|
itsMagondu/IoTNeuralNetworks
|
7ba2098866966a55541019028b16301cfcf94b6b
|
[
"MIT"
] | 1
|
2021-06-11T04:40:55.000Z
|
2021-06-11T04:40:55.000Z
|
noisefilter/noisefilter/__init__.py
|
itsMagondu/IoTNeuralNetworks
|
7ba2098866966a55541019028b16301cfcf94b6b
|
[
"MIT"
] | null | null | null |
""" noisefilter """
| 10
| 19
| 0.55
|
1bc5a4afd1ad436f13d23aa1cb38ddbd70beedf3
| 336
|
py
|
Python
|
pyeccodes/defs/grib2/local_98_26_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib2/local_98_26_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib2/local_98_26_def.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
import pyeccodes.accessors as _
def load(h):
h.add(_.Unsigned('referenceDate', 4))
h.add(_.Unsigned('climateDateFrom', 4))
h.add(_.Unsigned('climateDateTo', 4))
h.alias('local.referenceDate', 'referenceDate')
h.alias('local.climateDateFrom', 'climateDateFrom')
h.alias('local.climateDateTo', 'climateDateTo')
| 28
| 55
| 0.693452
|
1b3ca629462aaf91b1bf5699b31cab7dad5540ee
| 172
|
py
|
Python
|
regions/io/ds9/setup_package.py
|
lpsinger/regions
|
55cb07f3ae54759637ba26d35bfcdf6043b825fb
|
[
"BSD-3-Clause"
] | 1
|
2020-02-26T05:46:07.000Z
|
2020-02-26T05:46:07.000Z
|
regions/io/ds9/setup_package.py
|
lpsinger/regions
|
55cb07f3ae54759637ba26d35bfcdf6043b825fb
|
[
"BSD-3-Clause"
] | null | null | null |
regions/io/ds9/setup_package.py
|
lpsinger/regions
|
55cb07f3ae54759637ba26d35bfcdf6043b825fb
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
parser_test = ['data/*.reg']
return {'regions.io.ds9.tests': parser_test}
| 24.571429
| 63
| 0.697674
|
d5aedbe41a8cadacde6aa6991d0ab041e1aed957
| 3,727
|
py
|
Python
|
python/8.web/2.Django/mysql_demo/userapp/models.py
|
lotapp/BaseCode
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
[
"Apache-2.0"
] | 25
|
2018-06-13T08:13:44.000Z
|
2020-11-19T14:02:11.000Z
|
python/8.web/2.Django/mysql_demo/userapp/models.py
|
lotapp/BaseCode
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
[
"Apache-2.0"
] | null | null | null |
python/8.web/2.Django/mysql_demo/userapp/models.py
|
lotapp/BaseCode
|
0255f498e1fe67ed2b3f66c84c96e44ef1f7d320
|
[
"Apache-2.0"
] | 13
|
2018-06-13T08:13:38.000Z
|
2022-01-06T06:45:07.000Z
|
from django.db import models
import uuid
# editable=False
# 快递公司表(一)
# https://www.kuaidi100.com/query?type=&postid=
# https://sp0.baidu.com/9_Q4sjW91Qh3otqbppnN2DJv/pae/channel/data/asyncqury?appid=4001&com=huitongkuaidi&nu=71330102164040
class Express(models.Model):
# 快递编号
ecode = models.CharField(max_length=20, blank=True, verbose_name="快递编号")
# 公司名(创建索引)
# 顺丰快递、圆通快递、韵达快递、中通快递、申通快递、百世快递、天天快递、邮政EMS、
# 京东物流、德邦快递、优速快递、安能快递、快捷快递、其他快递
ename = models.CharField(max_length=10, db_index=True, verbose_name="快递名称")
# 快递100的快递类型
etype = models.CharField(max_length=20, default="other", verbose_name="快递100对应的类型")
# 快递显示顺序(默认都是9,以后可能用到)
esort = models.SmallIntegerField(default=9, blank=True, verbose_name="快递排序")
# 创建时间 auto_now_add:当对象第一次被创建时自动设置当前时间
createtime = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# 更新时间 auto_now:每次保存对象时自动设置该字段为当前时间
updatetime = models.DateTimeField(auto_now=True, verbose_name="修改时间")
# 数据状态(0:访客/冻结,1:正常,99:删除
datastatus = models.SmallIntegerField(default=1, verbose_name="数据状态", help_text="默认为1,99代表数据被删除")
class Meta:
# 表名默认为app名_小写类名
# db_table = "express"
# Express在admin管理页面显示为啥
verbose_name = "快递公司" # 单数显示
verbose_name_plural = verbose_name # 复数显示
# 买家寄过来的物流信息(多)
class ExpressOrder(models.Model):
# 物流id(创建索引)
express_num = models.CharField(max_length=40, db_index=True, verbose_name="物流编号", help_text="您的物流信息")
# 快递对象(物流信息和快递公司是多对一的关系)PS:数据库中就是一个指向快递公司表的外键
# models.CASCADE:级联删除(以前的默认值)eg:快递公司删除,则对应的快递订单都删除
express = models.ForeignKey("Express", on_delete=models.CASCADE)
# 创建时间 auto_now_add:当对象第一次被创建时自动设置当前时间
createtime = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# 更新时间 auto_now:每次保存对象时自动设置该字段为当前时间
updatetime = models.DateTimeField(auto_now=True, verbose_name="修改时间")
# 数据状态(0:访客/冻结,1:正常,99:删除
datastatus = models.SmallIntegerField(default=1, verbose_name="数据状态", help_text="默认为1,99代表数据被删除")
class Meta:
verbose_name = "物流信息" # 单数显示
verbose_name_plural = verbose_name # 复数显示
class FileInfo(models.Model):
# 文件对应的md5码
file_md5 = models.CharField(max_length=32, verbose_name="文件MD5", help_text="同一文件MD5相同")
# file_url =models.FilePathField(path="/files",verbose_name="上传文件")
# 创建时间 auto_now_add:当对象第一次被创建时自动设置当前时间
createtime = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# 更新时间 auto_now:每次保存对象时自动设置该字段为当前时间
updatetime = models.DateTimeField(auto_now=True, verbose_name="修改时间")
# 数据状态(0:访客/冻结,1:正常,99:删除
datastatus = models.SmallIntegerField(default=1, verbose_name="数据状态", help_text="默认为1,99代表数据被删除")
class Meta:
verbose_name = "文件信息" # 单数显示
verbose_name_plural = verbose_name # 复数显示
# 买家订单
class Order(models.Model):
# 用户订单id(索引)自动生成 editable=False
order_id = models.UUIDField(db_index=True, default=uuid.uuid4, verbose_name="订单编号",
help_text="该编号是系统自动生成")
# files = models.
# 创建时间 auto_now_add:当对象第一次被创建时自动设置当前时间
createtime = models.DateTimeField(auto_now_add=True, verbose_name="创建时间")
# 更新时间 auto_now:每次保存对象时自动设置该字段为当前时间
updatetime = models.DateTimeField(auto_now=True, verbose_name="修改时间")
# 数据状态(0:访客/冻结,1:正常,99:删除
datastatus = models.SmallIntegerField(default=1, verbose_name="数据状态", help_text="默认为1,99代表数据被删除")
class Meta:
verbose_name = "买家订单" # 单数显示
verbose_name_plural = verbose_name # 复数显示
# from django.db import connection
# print(connection.queries[-1])
# QuerySet的query属性查看转化成的sql语句
# xx.objects.all().query
# 查看日志开启的状态和log文件路径
# show variables like '%general_log%';
| 35.160377
| 122
| 0.718004
|
e313a8ecd15386e09a4727c1b2de0ad136a51fa9
| 791
|
py
|
Python
|
setup.py
|
npinto/py-lars
|
2c6991f42e7ad266cf8c2b684fc6dc71e298c09c
|
[
"MIT"
] | 3
|
2018-11-27T19:08:45.000Z
|
2019-12-10T01:27:32.000Z
|
setup.py
|
npinto/py-lars
|
2c6991f42e7ad266cf8c2b684fc6dc71e298c09c
|
[
"MIT"
] | null | null | null |
setup.py
|
npinto/py-lars
|
2c6991f42e7ad266cf8c2b684fc6dc71e298c09c
|
[
"MIT"
] | 2
|
2019-11-17T22:47:08.000Z
|
2019-12-10T01:26:45.000Z
|
import setuptools
setuptools.setup(
name='lmj.lars',
version='0.2',
namespace_packages=['lmj'],
packages=setuptools.find_packages(),
author='Leif Johnson',
author_email='leif@leifjohnson.net',
description='An implementation of Least Angle Regression',
long_description=open('README.md').read(),
license='MIT',
keywords=('regression '
'sparse '
'regularized '
),
url='http://github.com/lmjohns3/py-lars',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
| 29.296296
| 62
| 0.595449
|
741166f76313ba4129cbd320c0cfce033b8e4c31
| 4,503
|
py
|
Python
|
tests/clvm/test_chialisp_deserialization.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
tests/clvm/test_chialisp_deserialization.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
tests/clvm/test_chialisp_deserialization.py
|
zcomputerwiz/silicoin-light-wallet
|
1cdc3784effec229cc841a04655078b1d9913d33
|
[
"Apache-2.0"
] | null | null | null |
from unittest import TestCase
from silicoin.types.blockchain_format.program import Program, INFINITE_COST
from silicoin.util.byte_types import hexstr_to_bytes
from silicoin.wallet.puzzles.load_clvm import load_clvm
DESERIALIZE_MOD = load_clvm("chialisp_deserialisation.clvm", package_or_requirement="silicoin.wallet.puzzles")
def serialized_atom_overflow(size):
if size == 0:
size_blob = b"\x80"
elif size < 0x40:
size_blob = bytes([0x80 | size])
elif size < 0x2000:
size_blob = bytes([0xC0 | (size >> 8), (size >> 0) & 0xFF])
elif size < 0x100000:
size_blob = bytes([0xE0 | (size >> 16), (size >> 8) & 0xFF, (size >> 0) & 0xFF])
elif size < 0x8000000:
size_blob = bytes(
[
0xF0 | (size >> 24),
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
elif size < 0x400000000:
size_blob = bytes(
[
0xF8 | (size >> 32),
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
else:
size_blob = bytes(
[
0xFC | ((size >> 40) & 0xFF),
(size >> 32) & 0xFF,
(size >> 24) & 0xFF,
(size >> 16) & 0xFF,
(size >> 8) & 0xFF,
(size >> 0) & 0xFF,
]
)
extra_str = "01" * 1000
return size_blob.hex() + extra_str
class TestClvmNativeDeserialization(TestCase):
"""
Test clvm deserialization done from within the clvm
"""
def test_deserialization_simple_list(self):
# ("hello" "friend")
b = hexstr_to_bytes("ff8568656c6c6fff86667269656e6480")
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_password_coin(self):
# (i (= (sha256 2) (q 0x2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824)) (c (q 51) (c 5 (c (q 100) (q ())))) (q "wrong password")) # noqa
b = hexstr_to_bytes(
"ff04ffff0affff0bff0280ffff01ffa02cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b98248080ffff05ffff01ff3380ffff05ff05ffff05ffff01ff6480ffff01ff8080808080ffff01ff8e77726f6e672070617373776f72648080" # noqa
) # noqa
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_deserialization_large_numbers(self):
# '(99999999999999999999999999999999999999999999999999999999999999999 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF -99999999999999999999999999999999999999999999999999999999999999999999999999999)' # noqa
b = hexstr_to_bytes(
"ff9c00f316271c7fc3908a8bef464e3945ef7a253609ffffffffffffffffffb00fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa1ff22ea0179500526edb610f148ec0c614155678491902d6000000000000000000180" # noqa
) # noqa
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
print(cost, output)
prog = Program.to(output)
assert prog == Program.from_bytes(b)
def test_overflow_atoms(self):
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x3FFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0xFFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
b = hexstr_to_bytes(serialized_atom_overflow(0x1FFFFFFFFFF))
try:
cost, output = DESERIALIZE_MOD.run_with_cost(INFINITE_COST, [b])
except Exception:
assert True
else:
assert False
| 39.156522
| 264
| 0.621586
|
9f1ecd9efb0bc6cb507fd1adf15e8040bb7ea8c9
| 2,309
|
py
|
Python
|
tests/test_specqp.py
|
Shipilin/specqp
|
c96c6f9476d871d73f7aa1fb7aeb3a435c9013a9
|
[
"MIT"
] | null | null | null |
tests/test_specqp.py
|
Shipilin/specqp
|
c96c6f9476d871d73f7aa1fb7aeb3a435c9013a9
|
[
"MIT"
] | null | null | null |
tests/test_specqp.py
|
Shipilin/specqp
|
c96c6f9476d871d73f7aa1fb7aeb3a435c9013a9
|
[
"MIT"
] | null | null | null |
from matplotlib import pyplot as plt
import unittest
import specqp as sp
import numpy as np
class TestHelpers(unittest.TestCase):
def test_is_iterable(self):
self.assertTrue(sp.helpers.is_iterable([1, 2, 3]))
self.assertTrue(sp.helpers.is_iterable((1, 2, 3)))
arr = np.ndarray(range(10))
self.assertTrue(sp.helpers.is_iterable(arr))
self.assertTrue(sp.helpers.is_iterable((1, 2, 3)))
self.assertFalse(sp.helpers.is_iterable(1))
self.assertTrue(1)
def doniachtest():
x = np.linspace(714.96, 702.16, 61, endpoint=True)
amp, cen, gfwhm, lfwhm, reverse = 6.34, 709.0, 2, 0.91, None
y = sp.fitter.Fitter.doniach_sunjic(x[::-1], amp, cen, gfwhm, lfwhm)
y2 = np.ones_like(x)
plt.plot(x[::-1], y, x, y2)
plt.gca().invert_xaxis()
plt.show()
def shirleytest():
x = np.linspace(714.96, 702.16, 61, endpoint=True)
y = [ 4.09974404e+00, 4.08549832e+00, 4.11214206e+00, 4.28109103e+00,
4.33858583e+00, 4.30001600e+00, 4.39268917e+00, 4.42246841e+00,
4.47486802e+00, 4.64455287e+00, 4.62125260e+00, 4.69875220e+00,
4.84191329e+00, 4.87371621e+00, 4.98736202e+00, 5.07505199e+00,
5.19087346e+00, 5.30921453e+00, 5.49130539e+00, 5.51066229e+00,
5.74675252e+00, 5.84502480e+00, 5.98661014e+00, 6.08369061e+00,
6.02580387e+00, 6.03721805e+00, 6.05379939e+00, 6.03497040e+00,
6.07546793e+00, 6.10429531e+00, 6.10463926e+00, 6.24841625e+00,
6.28337866e+00, 6.44102544e+00, 6.63068309e+00, 6.93672212e+00,
7.44953607e+00, 8.41378979e+00, 9.26994881e+00, 9.90373540e+00,
1.11497680e+01, 1.30781155e+01, 1.41555031e+01, 1.05690210e+01,
4.93501040e+00, 2.07736362e+00, 1.04520877e+00, 6.44288914e-01,
4.23492241e-01, 2.90737482e-01, 2.09998400e-01, 1.35570309e-01,
7.20444729e-02, 4.51847704e-02, 2.30283155e-02, 1.71252600e-02,
-5.35914254e-03, -4.77763558e-02, -4.07294833e-02, -6.35178371e-02,
-7.82834746e-02]
sh = sp.fitter.Fitter.shirley(x, y, 0.01, tolerance=1e-5, maxiter=10, asymmetry=None)
plt.plot(x, y, x, sh)
plt.gca().invert_xaxis()
plt.show()
shirleytest()
# if __name__ == '__main__':
# unittest.main()
| 41.232143
| 89
| 0.631009
|
c1701e7921e1b79fac6277fa23627a59abb09b4d
| 7,622
|
py
|
Python
|
SMBcorr/merra_hybrid_cumulative.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | null | null | null |
SMBcorr/merra_hybrid_cumulative.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | null | null | null |
SMBcorr/merra_hybrid_cumulative.py
|
SmithB/SMBcorr
|
7c35cf8383058986fd82b28beab3c5580f9f8667
|
[
"MIT"
] | 1
|
2020-08-06T19:48:52.000Z
|
2020-08-06T19:48:52.000Z
|
#!/usr/bin/env python
u"""
merra_hybrid_cumulative.py
Written by Tyler Sutterley (10/2019)
Calculates cumulative anomalies of MERRA-2 hybrid surface mass balance products
MERRA-2 Hybrid model outputs provided by Brooke Medley at GSFC
CALLING SEQUENCE:
python merra_hybrid_cumulative.py --directory=<path> --region=gris \
--mean=1980,1995 --product=p_minus_e
COMMAND LINE OPTIONS:
-D X, --directory=X: Working data directory
-R X, --region=X: Region to interpolate (gris, ais)
--mean: Start and end year of mean (separated by commas)
--product: MERRA-2 hybrid product to calculate
p_minus_e: Precipitation minus Evaporation
melt: Snowmelt
-M X, --mode=X: Local permissions mode of the directories and files
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Written 10/2019
"""
from __future__ import print_function
import sys
import os
import re
import time
import getopt
import netCDF4
import numpy as np
#-- PURPOSE: read and interpolate MERRA-2 hybrid surface mass balance variables
def merra_hybrid_cumulative(base_dir, REGION, DIRECTORY=None,
VARIABLE='p_minus_e', RANGE=None, MODE=0o775):
#-- set the input netCDF4 file for the variable of interest
if VARIABLE in ('p_minus_e','melt'):
hybrid_file = 'm2_hybrid_p_minus_e_melt_{0}.nc'.format(REGION.lower())
#-- Open the MERRA-2 Hybrid NetCDF file for reading
fileID = netCDF4.Dataset(os.path.join(base_dir,hybrid_file), 'r')
#-- Get data from each netCDF variable and remove singleton dimensions
fd = {}
DATA = np.squeeze(fileID.variables[VARIABLE][:].copy())
fd['x'] = fileID.variables['x'][:,:].copy()
fd['y'] = fileID.variables['y'][:,:].copy()
fd['time'] = fileID.variables['time'][:].copy()
#-- invalid data value
fill_value = np.float(fileID.variables[VARIABLE]._FillValue)
#-- input shape of MERRA-2 Hybrid firn data
nt,nx,ny = np.shape(DATA)
#-- close the NetCDF files
fileID.close()
#-- time is year decimal at time step 5 days
time_step = 5.0/365.25
#-- indices of specified ice mask
i,j = np.nonzero(DATA[0,:,:] != fill_value)
valid_count = len(DATA[0,i,j])
#-- calculate mean period for MERRA-2
tt, = np.nonzero((fd['time'] >= RANGE[0]) & (fd['time'] < (RANGE[1]+1)))
MEAN = np.mean(DATA[tt,:,:], axis=0)
#-- cumulative mass anomalies calculated by removing mean balance flux
fd[VARIABLE] = np.full((nt,nx,ny),fill_value)
CUMULATIVE = np.zeros((valid_count))
#-- Writing output cumulative anomalies to netcdf file
for t in range(nt):
#-- calculating cumulative anomalies for time t
CUMULATIVE += (DATA[t,i,j] - MEAN[i,j])
fd[VARIABLE][t,i,j] = CUMULATIVE.copy()
#-- set directory to base directory if None
if DIRECTORY is None:
DIRECTORY = os.path.expanduser(base_dir)
#-- create output directory if non-existent
if not os.access(DIRECTORY, os.F_OK):
os.makedirs(DIRECTORY,MODE)
#-- output MERRA-2 data file for cumulative data
FILE = 'm2_hybrid_{0}_cumul_{1}.nc'.format(VARIABLE,REGION.lower())
#-- opening NetCDF file for writing
fileID = netCDF4.Dataset(os.path.join(DIRECTORY,FILE),'w',format="NETCDF4")
#-- Defining the NetCDF dimensions
fileID.createDimension('x', nx)
fileID.createDimension('y', ny)
fileID.createDimension('time', nt)
#-- python dictionary with netCDF4 variables
nc = {}
#-- defining the NetCDF variables
nc['x'] = fileID.createVariable('x', fd['x'].dtype, ('x','y',))
nc['y'] = fileID.createVariable('y', fd['y'].dtype, ('x','y',))
nc['time'] = fileID.createVariable('time', fd['time'].dtype, ('time',))
nc[VARIABLE] = fileID.createVariable(VARIABLE, fd[VARIABLE].dtype,
('time','x','y',), fill_value=fill_value, zlib=True)
#-- filling NetCDF variables
for key,val in fd.items():
nc[key][:] = val.copy()
#-- Defining attributes for x and y coordinates
nc['x'].long_name = 'polar stereographic x coordinate, 12.5km resolution'
nc['x'].units = 'meters'
nc['y'].long_name = 'polar stereographic y coordinate, 12.5km resolution'
nc['y'].units = 'meters'
#-- Defining attributes for dataset
if (VARIABLE == 'p_minus_e'):
nc[VARIABLE].long_name = ('MERRA-2 hybrid '
'precipitation-minus-evaporation (net accumulation)')
nc[VARIABLE].units = 'meters of ice equivalent per year'
nc[VARIABLE].comment = ('developed using a degree-day model from our '
'MERRA-2 hybrid skin temperature product and MARv3.5.2 meltwater '
'for 1980-2019')
elif (VARIABLE == 'melt'):
nc[VARIABLE].long_name = ('MERRA-2 meltwater, calibrated to '
'MARv3.5.2 melt')
nc[VARIABLE].units = 'meters of ice equivalent per year'
#-- Defining attributes for date
nc['time'].long_name = 'time, 5-daily resolution'
nc['time'].units = 'decimal years, 5-daily resolution'
#-- global variable of NetCDF file
fileID.TITLE = ('Cumulative anomalies in MERRA-2 Hybrid variables relative '
'to {0:4d}-{1:4d}').format(*RANGE)
fileID.date_created = time.strftime('%Y-%m-%d',time.localtime())
#-- Closing the NetCDF file
fileID.close()
os.chmod(os.path.join(DIRECTORY,FILE), MODE)
#-- PURPOSE: help module to describe the optional input parameters
def usage():
print('\nHelp: {}'.format(os.path.basename(sys.argv[0])))
print(' -D X, --directory=X\tWorking data directory')
print(' -O X, --output=X\tOutput working data directory')
print(' -R X, --region=X\tRegion of firn model to interpolate')
print(' --mean\t\t\tStart and end year of mean (separated by commas)')
print(' --product\t\tMERRA-2 hybrid product to calculate')
print('\tp_minus_e: Precipitation minus Evaporation\n\tmelt: Snowmelt')
print(' -M X, --mode=X\t\tPermission mode of directories and files\n')
#-- Main program that calls merra_hybrid_cumulative()
def main():
#-- Read the system arguments listed after the program
lopt = ['help','directory=','output=','region=','mean=','product=','mode=']
optlist,arglist = getopt.getopt(sys.argv[1:], 'hD:O:R:M:', lopt)
#-- data directory
base_dir = os.getcwd()
DIRECTORY = None
#-- region of firn model
REGION = 'gris'
#-- surface mass balance product
PRODUCTS = ['p_minus_e','melt']
#-- start and end year of mean
RANGE = [1980,1995]
#-- permissions mode
MODE = 0o775
#-- extract parameters
for opt, arg in optlist:
if opt in ('-h','--help'):
usage()
sys.exit()
elif opt in ("-D","--directory"):
base_dir = os.path.expanduser(arg)
elif opt in ("-O","--output"):
DIRECTORY = os.path.expanduser(arg)
elif opt in ("-R","--region"):
REGION = arg.lower()
elif opt in ("--product"):
PRODUCTS = arg.split(',')
elif opt in ("--mean"):
RANGE = np.array(arg.split(','),dtype=np.int)
elif opt in ("-M","--mode"):
MODE = int(arg,8)
#-- run program with parameters
for p in PRODUCTS:
merra_hybrid_cumulative(base_dir, REGION, DIRECTORY=DIRECTORY,
VARIABLE=p, RANGE=RANGE, MODE=MODE)
#-- run main program
if __name__ == '__main__':
main()
| 38.887755
| 80
| 0.64445
|
28ec71432af154ad56ec5eeaa11e6c658125123f
| 2,301
|
py
|
Python
|
tests/test_main.py
|
SiriusKoan/shorten-url-with-kv
|
55d108a263441cc17dbec2ce3c8419b4b4ea111d
|
[
"MIT"
] | 3
|
2022-02-16T09:11:39.000Z
|
2022-03-25T06:07:34.000Z
|
tests/test_main.py
|
SiriusKoan/shorten-url-with-kv
|
55d108a263441cc17dbec2ce3c8419b4b4ea111d
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
SiriusKoan/shorten-url-with-kv
|
55d108a263441cc17dbec2ce3c8419b4b4ea111d
|
[
"MIT"
] | null | null | null |
import unittest
from time import sleep
from flask import url_for
from app import create_app
from tests.helper import TestModel, generate_test_data
from app.KV import kv
class IndexPageTest(TestModel):
def setUp(self) -> None:
super().setUp()
self.route = url_for("main.index_page")
self.url_ok = {"old": "https://example.com", "new": "ex"}
self.url_bad_characters = {"old": "https://example.com", "new": "%%%$123"}
self.url_bad_empty = {"old": "https://example.com"}
def test_get_with_no_auth(self):
res = self.get()
self.assertEqual(res.status_code, 200)
self.assertIn(b"Login", res.data)
self.assertIn(b"Register", res.data)
def test_get_with_auth(self):
res = self.get(login="user")
self.assertEqual(res.status_code, 200)
self.assertIn(b"Dashboard", res.data)
def test_post_ok(self):
res = self.post(data=self.url_ok)
self.assertEqual(res.status_code, 200)
self.assertIn(b"Successfully add this record.", res.data)
def test_post_bad_characters(self):
res = self.post(data=self.url_bad_characters)
self.assertEqual(res.status_code, 200)
self.assertIn(b"Bad characters in the field.", res.data)
def test_post_bad_empty(self):
res = self.post(data=self.url_bad_empty)
self.assertEqual(res.status_code, 200)
self.assertIn(b"This field is required.", res.data)
class RedirectPageTest(unittest.TestCase):
def setUp(self) -> None:
self.app = create_app("testing")
self.client = self.app.test_client()
self.app_context = self.app.app_context()
self.app_context.push()
generate_test_data()
kv.write("https://google.com", "test1")
kv.write("https://github.com", "gh")
self.route_ok = "/test1"
self.route_not_found = "/wrong"
def tearDown(self) -> None:
kv.delete("test1")
kv.delete("gh")
if self.app_context is not None:
self.app_context.pop()
def test_ok(self):
res = self.client.get(self.route_ok)
self.assertEqual(res.status_code, 302)
def test_not_found(self):
res = self.client.get(self.route_not_found)
self.assertEqual(res.status_code, 404)
| 33.347826
| 82
| 0.640591
|
2206b5e2acbf468c863b2ce261116c9742613306
| 2,030
|
py
|
Python
|
tests/gold_tests/logging/all_headers_sanitizer.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | 1
|
2019-10-28T04:36:50.000Z
|
2019-10-28T04:36:50.000Z
|
tests/gold_tests/logging/all_headers_sanitizer.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | 3
|
2017-09-22T19:18:56.000Z
|
2021-06-21T18:07:14.000Z
|
tests/gold_tests/logging/all_headers_sanitizer.py
|
zhaorun/trafficserver
|
757256129811441f29eea288b1d7e19bc54fab9c
|
[
"Apache-2.0"
] | null | null | null |
'''
Sanitize the ATS-generated custom log file from the all_headers test.
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
rexl = []
rexl.append((re.compile(r"\{\{Date\}\:\{[^}]*\}\}"), "({__DATE__}}"))
rexl.append((re.compile(r"\{\{Expires\}\:\{[^}]*\}\}"), "({__EXPIRES__}}"))
rexl.append((re.compile(r"\{\{Last-Modified\}\:\{[^}]*\}\}"), "({__LAST_MODIFIED__}}"))
rexl.append((re.compile(r"\{\{Server\}\:\{ATS/[0-9.]*\}\}"), "({__ATS_SERVER__}}"))
rexl.append((re.compile(r"\{\{Server\}\:\{ECS [^}]*\}\}"), "({__ECS_SERVER__}}"))
rexl.append((re.compile(r"\{\{Via\}\:\{[^}]*\}\}"), "({__VIA__}}"))
rexl.append((re.compile(r"\{\{Server\}\:\{ApacheTrafficServer/[0-9.]*\}\}"), "({__ATS2_SERVER__}}"))
rexl.append((re.compile(r"\{\{Age\}\:\{[0-9]*\}\}"), "({__AGE__}}"))
rexl.append((re.compile(r"\:" + sys.argv[1]), "__TS_PORT__")) # 1st and only argument is TS client port
# Handle inconsistencies which I think are caused by different revisions of the standard Python http.server.HTTPServer class.
rexl.append((re.compile(r'\{"359670651[^"]*"\}'), '{"{359670651__WEIRD__}"}'))
rexl.append((re.compile(r'\{\{Accept-Ranges\}:\{bytes\}\}'), ''))
for line in sys.stdin:
for rex, subStr in rexl:
line = rex.sub(subStr, line)
print(line)
| 46.136364
| 125
| 0.660591
|
42502579a4714e6479681b4e839e55fc4ca4cac4
| 569
|
py
|
Python
|
tests/testapp/models.py
|
vaibhavantil2/detail-personalized-feed
|
1fc74a5c8c514c4979fa08257eea5a95924e82bd
|
[
"MIT"
] | 15
|
2019-02-16T12:17:30.000Z
|
2022-03-27T20:11:49.000Z
|
tests/testapp/models.py
|
vaibhavantil2/detail-personalized-feed
|
1fc74a5c8c514c4979fa08257eea5a95924e82bd
|
[
"MIT"
] | 7
|
2019-05-10T08:27:14.000Z
|
2021-04-26T15:19:06.000Z
|
tests/testapp/models.py
|
andreynovikov/django-rated-reviews
|
ccb24f5412bf5c831f79120c32e1cd51ddca5fe8
|
[
"MIT"
] | 8
|
2019-11-07T21:05:10.000Z
|
2021-08-03T06:59:37.000Z
|
"""
Reviews may be attached to any object. See the review documentation for
more information.
"""
from django.db import models
class Article(models.Model):
headline = models.CharField(max_length=100)
body = models.TextField()
pub_date = models.DateField()
def __str__(self):
return self.headline
class Product(models.Model):
title = models.CharField(max_length=250)
price = models.DecimalField(max_digits=6, decimal_places=2)
enable_reviews = models.BooleanField(default=True)
def __str__(self):
return self.title
| 22.76
| 71
| 0.717047
|
3aefc7d439093a8b34008210a604f60c5756b826
| 728
|
py
|
Python
|
vaassdk/src/vaas/config.py
|
yilanyun/vaas-sdk-python
|
ea4cc94e8d75a07a7dec8da67977443a3f00eff8
|
[
"Apache-2.0"
] | null | null | null |
vaassdk/src/vaas/config.py
|
yilanyun/vaas-sdk-python
|
ea4cc94e8d75a07a7dec8da67977443a3f00eff8
|
[
"Apache-2.0"
] | 1
|
2021-06-11T06:48:05.000Z
|
2021-06-11T06:48:05.000Z
|
vaassdk/src/vaas/config.py
|
yilanyun/vaas-sdk-python
|
ea4cc94e8d75a07a7dec8da67977443a3f00eff8
|
[
"Apache-2.0"
] | 2
|
2021-06-11T06:44:57.000Z
|
2021-06-23T08:34:22.000Z
|
# encoding: utf-8
# 联系一览获取access_key、access_token
ACCESS_KEY = '' # 一览提供的渠道ak
ACCESS_TOKEN = '' # 一览提供的渠道token
PKG_NAME = '' # 应用包名
PLATFORM = 1 # 接入方式
# 请求url
FEED = '/video/feed' # feed流
CHANNELS = '/video/channels' # 频道
RELATION = '/video/relation' # 频道相关
DETAIL = '/video/getvideos' # 视频详情
CPINFO = '/video/cpinfo' # 作者详情
CPVIDOES = '/video/cpvideos' # 作者视频
PLAY = '/vaas/video/play' # 播放
# 以下常量不需要修改
HOST_PROD = 'http://api.yilanvaas.cn' # 线上正式域名(播放接口除外)
HOST_PROD_PLAY = 'http://play.yilanvaas.cn' # 线上正式域名(播放接口)
HOST_DATA = 'http://data.1lan.tv/log?ts=%d&access_key=%s&udid=%s&m=%s' # 上报相关域名
DATA_BEGION = '13149876'
DATA_END = '98761314'
CONNECT_TIMEOUT = 5
READ_TIMEOUT = 10
DEFAULT_MAX_RETRY_TIMES = 2
| 28
| 80
| 0.688187
|
6b1af921694d4f3da65562cf4b2c89a44fc320a7
| 22,101
|
py
|
Python
|
cmstack/cmlang/antlr_generator/lexer.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/cmlang/antlr_generator/lexer.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
cmstack/cmlang/antlr_generator/lexer.py
|
he-actlab/cdstack
|
38f605cfa299bf97b5875a19f9fd811a2671d56f
|
[
"Apache-2.0"
] | null | null | null |
# Generated from /home/kinzers/projects/cmstack.code/cmstack/cmlang/antlr_generator/CMLang.g4 by ANTLR 4.7.2
from antlr4 import *
from io import StringIO
from typing.io import TextIO
import sys
def serializedATN():
with StringIO() as buf:
buf.write("\3\u608b\ua72a\u8133\ub9ed\u417c\u3be7\u7786\u5964\2C")
buf.write("\u0217\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7")
buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r")
buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23")
buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30")
buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36")
buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%")
buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.")
buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64")
buf.write("\t\64\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:")
buf.write("\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\4A\tA\4B\tB\4C\t")
buf.write("C\4D\tD\4E\tE\4F\tF\4G\tG\4H\tH\4I\tI\4J\tJ\4K\tK\4L\t")
buf.write("L\4M\tM\4N\tN\3\2\3\2\3\3\3\3\3\4\3\4\3\5\3\5\3\6\3\6")
buf.write("\3\7\3\7\3\b\3\b\3\t\3\t\3\n\3\n\3\13\3\13\3\f\3\f\3\r")
buf.write("\3\r\3\16\3\16\3\17\3\17\3\20\3\20\3\21\3\21\3\22\3\22")
buf.write("\3\22\3\22\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24")
buf.write("\3\24\3\25\3\25\3\25\3\25\3\25\3\26\3\26\3\26\3\26\3\26")
buf.write("\3\26\3\26\3\26\3\27\3\27\3\27\3\27\3\27\3\27\3\30\3\30")
buf.write("\3\30\3\30\3\30\3\30\3\30\3\31\3\31\3\31\3\31\3\31\3\31")
buf.write("\3\32\3\32\3\32\3\32\3\32\3\32\3\33\3\33\3\33\3\33\3\33")
buf.write("\3\33\3\33\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34\3\34")
buf.write("\3\34\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35\3\35")
buf.write("\3\36\3\36\3\36\3\36\3\36\3\36\3\37\3\37\3\37\3\37\3\37")
buf.write("\3 \3 \3 \3!\3!\3!\3\"\3\"\3\"\3#\3#\3$\3$\3$\3$\3$\3")
buf.write("$\3%\3%\3%\3%\3%\3%\3%\3&\3&\3&\3&\3&\3&\3&\3&\3&\3\'")
buf.write("\3\'\3\'\3\'\3(\3(\3(\3(\3(\3(\3)\3)\3)\3)\3*\3*\3*\3")
buf.write("*\3*\3*\3*\3+\3+\3+\3,\3,\3,\3,\3,\3,\3-\3-\3-\3-\3-\3")
buf.write(".\3.\3.\3.\3.\3.\3.\3/\3/\3/\3\60\3\60\3\60\3\61\3\61")
buf.write("\3\61\3\62\3\62\3\62\3\63\3\63\3\63\3\63\3\63\3\63\3\63")
buf.write("\3\63\3\63\3\63\3\64\3\64\3\64\3\65\3\65\3\66\3\66\7\66")
buf.write("\u0180\n\66\f\66\16\66\u0183\13\66\3\66\3\66\3\67\3\67")
buf.write("\3\67\7\67\u018a\n\67\f\67\16\67\u018d\13\67\38\38\78")
buf.write("\u0191\n8\f8\168\u0194\138\38\68\u0197\n8\r8\168\u0198")
buf.write("\58\u019b\n8\39\39\39\69\u01a0\n9\r9\169\u01a1\3:\3:\3")
buf.write(":\6:\u01a7\n:\r:\16:\u01a8\3;\3;\3;\6;\u01ae\n;\r;\16")
buf.write(";\u01af\3<\3<\5<\u01b4\n<\3<\3<\3=\3=\5=\u01ba\n=\3>\3")
buf.write(">\3?\6?\u01bf\n?\r?\16?\u01c0\3?\3?\3@\3@\5@\u01c7\n@")
buf.write("\3@\5@\u01ca\n@\3@\3@\3A\3A\3A\3A\7A\u01d2\nA\fA\16A\u01d5")
buf.write("\13A\3A\3A\3A\3A\3A\3B\3B\3B\3B\7B\u01e0\nB\fB\16B\u01e3")
buf.write("\13B\3B\3B\3C\3C\3D\3D\3E\3E\3F\3F\3G\3G\3H\3H\3I\5I\u01f4")
buf.write("\nI\3I\3I\3I\3I\5I\u01fa\nI\3J\3J\5J\u01fe\nJ\3J\3J\3")
buf.write("K\6K\u0203\nK\rK\16K\u0204\3L\3L\6L\u0209\nL\rL\16L\u020a")
buf.write("\3M\3M\5M\u020f\nM\3M\6M\u0212\nM\rM\16M\u0213\3N\3N\4")
buf.write("\u0181\u01d3\2O\3\3\5\4\7\5\t\6\13\7\r\b\17\t\21\n\23")
buf.write("\13\25\f\27\r\31\16\33\17\35\20\37\21!\22#\23%\24\'\25")
buf.write(")\26+\27-\30/\31\61\32\63\33\65\34\67\359\36;\37= ?!A")
buf.write("\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_\61a\62c\63e\64g\65")
buf.write("i\66k\67m8o9q:s;u<w=y>{?}@\177A\u0081B\u0083C\u0085\2")
buf.write("\u0087\2\u0089\2\u008b\2\u008d\2\u008f\2\u0091\2\u0093")
buf.write("\2\u0095\2\u0097\2\u0099\2\u009b\2\3\2\17\4\2QQqq\4\2")
buf.write("ZZzz\4\2DDdd\4\2\13\13\"\"\4\2\f\f\17\17\5\2C\\aac|\3")
buf.write("\2\63;\3\2\62;\3\2\629\5\2\62;CHch\3\2\62\63\4\2GGgg\4")
buf.write("\2--//\2\u0221\2\3\3\2\2\2\2\5\3\2\2\2\2\7\3\2\2\2\2\t")
buf.write("\3\2\2\2\2\13\3\2\2\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3")
buf.write("\2\2\2\2\23\3\2\2\2\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2")
buf.write("\2\2\2\33\3\2\2\2\2\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2")
buf.write("\2#\3\2\2\2\2%\3\2\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2")
buf.write("\2\2-\3\2\2\2\2/\3\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65")
buf.write("\3\2\2\2\2\67\3\2\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2")
buf.write("\2?\3\2\2\2\2A\3\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2")
buf.write("\2\2I\3\2\2\2\2K\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2")
buf.write("\2\2\2S\3\2\2\2\2U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3")
buf.write("\2\2\2\2]\3\2\2\2\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e")
buf.write("\3\2\2\2\2g\3\2\2\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\2")
buf.write("o\3\2\2\2\2q\3\2\2\2\2s\3\2\2\2\2u\3\2\2\2\2w\3\2\2\2")
buf.write("\2y\3\2\2\2\2{\3\2\2\2\2}\3\2\2\2\2\177\3\2\2\2\2\u0081")
buf.write("\3\2\2\2\2\u0083\3\2\2\2\3\u009d\3\2\2\2\5\u009f\3\2\2")
buf.write("\2\7\u00a1\3\2\2\2\t\u00a3\3\2\2\2\13\u00a5\3\2\2\2\r")
buf.write("\u00a7\3\2\2\2\17\u00a9\3\2\2\2\21\u00ab\3\2\2\2\23\u00ad")
buf.write("\3\2\2\2\25\u00af\3\2\2\2\27\u00b1\3\2\2\2\31\u00b3\3")
buf.write("\2\2\2\33\u00b5\3\2\2\2\35\u00b7\3\2\2\2\37\u00b9\3\2")
buf.write("\2\2!\u00bb\3\2\2\2#\u00bd\3\2\2\2%\u00c1\3\2\2\2\'\u00c7")
buf.write("\3\2\2\2)\u00cb\3\2\2\2+\u00d0\3\2\2\2-\u00d8\3\2\2\2")
buf.write("/\u00de\3\2\2\2\61\u00e5\3\2\2\2\63\u00eb\3\2\2\2\65\u00f1")
buf.write("\3\2\2\2\67\u00f8\3\2\2\29\u0102\3\2\2\2;\u010c\3\2\2")
buf.write("\2=\u0112\3\2\2\2?\u0117\3\2\2\2A\u011a\3\2\2\2C\u011d")
buf.write("\3\2\2\2E\u0120\3\2\2\2G\u0122\3\2\2\2I\u0128\3\2\2\2")
buf.write("K\u012f\3\2\2\2M\u0138\3\2\2\2O\u013c\3\2\2\2Q\u0142\3")
buf.write("\2\2\2S\u0146\3\2\2\2U\u014d\3\2\2\2W\u0150\3\2\2\2Y\u0156")
buf.write("\3\2\2\2[\u015b\3\2\2\2]\u0162\3\2\2\2_\u0165\3\2\2\2")
buf.write("a\u0168\3\2\2\2c\u016b\3\2\2\2e\u016e\3\2\2\2g\u0178\3")
buf.write("\2\2\2i\u017b\3\2\2\2k\u017d\3\2\2\2m\u0186\3\2\2\2o\u019a")
buf.write("\3\2\2\2q\u019c\3\2\2\2s\u01a3\3\2\2\2u\u01aa\3\2\2\2")
buf.write("w\u01b3\3\2\2\2y\u01b9\3\2\2\2{\u01bb\3\2\2\2}\u01be\3")
buf.write("\2\2\2\177\u01c9\3\2\2\2\u0081\u01cd\3\2\2\2\u0083\u01db")
buf.write("\3\2\2\2\u0085\u01e6\3\2\2\2\u0087\u01e8\3\2\2\2\u0089")
buf.write("\u01ea\3\2\2\2\u008b\u01ec\3\2\2\2\u008d\u01ee\3\2\2\2")
buf.write("\u008f\u01f0\3\2\2\2\u0091\u01f9\3\2\2\2\u0093\u01fd\3")
buf.write("\2\2\2\u0095\u0202\3\2\2\2\u0097\u0206\3\2\2\2\u0099\u020c")
buf.write("\3\2\2\2\u009b\u0215\3\2\2\2\u009d\u009e\7*\2\2\u009e")
buf.write("\4\3\2\2\2\u009f\u00a0\7+\2\2\u00a0\6\3\2\2\2\u00a1\u00a2")
buf.write("\7}\2\2\u00a2\b\3\2\2\2\u00a3\u00a4\7\177\2\2\u00a4\n")
buf.write("\3\2\2\2\u00a5\u00a6\7.\2\2\u00a6\f\3\2\2\2\u00a7\u00a8")
buf.write("\7]\2\2\u00a8\16\3\2\2\2\u00a9\u00aa\7_\2\2\u00aa\20\3")
buf.write("\2\2\2\u00ab\u00ac\7<\2\2\u00ac\22\3\2\2\2\u00ad\u00ae")
buf.write("\7-\2\2\u00ae\24\3\2\2\2\u00af\u00b0\7/\2\2\u00b0\26\3")
buf.write("\2\2\2\u00b1\u00b2\7,\2\2\u00b2\30\3\2\2\2\u00b3\u00b4")
buf.write("\7\61\2\2\u00b4\32\3\2\2\2\u00b5\u00b6\7\'\2\2\u00b6\34")
buf.write("\3\2\2\2\u00b7\u00b8\7>\2\2\u00b8\36\3\2\2\2\u00b9\u00ba")
buf.write("\7@\2\2\u00ba \3\2\2\2\u00bb\u00bc\7A\2\2\u00bc\"\3\2")
buf.write("\2\2\u00bd\u00be\7k\2\2\u00be\u00bf\7p\2\2\u00bf\u00c0")
buf.write("\7v\2\2\u00c0$\3\2\2\2\u00c1\u00c2\7h\2\2\u00c2\u00c3")
buf.write("\7n\2\2\u00c3\u00c4\7q\2\2\u00c4\u00c5\7c\2\2\u00c5\u00c6")
buf.write("\7v\2\2\u00c6&\3\2\2\2\u00c7\u00c8\7u\2\2\u00c8\u00c9")
buf.write("\7v\2\2\u00c9\u00ca\7t\2\2\u00ca(\3\2\2\2\u00cb\u00cc")
buf.write("\7d\2\2\u00cc\u00cd\7q\2\2\u00cd\u00ce\7q\2\2\u00ce\u00cf")
buf.write("\7n\2\2\u00cf*\3\2\2\2\u00d0\u00d1\7e\2\2\u00d1\u00d2")
buf.write("\7q\2\2\u00d2\u00d3\7o\2\2\u00d3\u00d4\7r\2\2\u00d4\u00d5")
buf.write("\7n\2\2\u00d5\u00d6\7g\2\2\u00d6\u00d7\7z\2\2\u00d7,\3")
buf.write("\2\2\2\u00d8\u00d9\7k\2\2\u00d9\u00da\7p\2\2\u00da\u00db")
buf.write("\7r\2\2\u00db\u00dc\7w\2\2\u00dc\u00dd\7v\2\2\u00dd.\3")
buf.write("\2\2\2\u00de\u00df\7q\2\2\u00df\u00e0\7w\2\2\u00e0\u00e1")
buf.write("\7v\2\2\u00e1\u00e2\7r\2\2\u00e2\u00e3\7w\2\2\u00e3\u00e4")
buf.write("\7v\2\2\u00e4\60\3\2\2\2\u00e5\u00e6\7u\2\2\u00e6\u00e7")
buf.write("\7v\2\2\u00e7\u00e8\7c\2\2\u00e8\u00e9\7v\2\2\u00e9\u00ea")
buf.write("\7g\2\2\u00ea\62\3\2\2\2\u00eb\u00ec\7r\2\2\u00ec\u00ed")
buf.write("\7c\2\2\u00ed\u00ee\7t\2\2\u00ee\u00ef\7c\2\2\u00ef\u00f0")
buf.write("\7o\2\2\u00f0\64\3\2\2\2\u00f1\u00f2\7u\2\2\u00f2\u00f3")
buf.write("\7r\2\2\u00f3\u00f4\7t\2\2\u00f4\u00f5\7k\2\2\u00f5\u00f6")
buf.write("\7p\2\2\u00f6\u00f7\7i\2\2\u00f7\66\3\2\2\2\u00f8\u00f9")
buf.write("\7t\2\2\u00f9\u00fa\7g\2\2\u00fa\u00fb\7u\2\2\u00fb\u00fc")
buf.write("\7g\2\2\u00fc\u00fd\7t\2\2\u00fd\u00fe\7x\2\2\u00fe\u00ff")
buf.write("\7q\2\2\u00ff\u0100\7k\2\2\u0100\u0101\7t\2\2\u01018\3")
buf.write("\2\2\2\u0102\u0103\7e\2\2\u0103\u0104\7q\2\2\u0104\u0105")
buf.write("\7o\2\2\u0105\u0106\7r\2\2\u0106\u0107\7q\2\2\u0107\u0108")
buf.write("\7p\2\2\u0108\u0109\7g\2\2\u0109\u010a\7p\2\2\u010a\u010b")
buf.write("\7v\2\2\u010b:\3\2\2\2\u010c\u010d\7k\2\2\u010d\u010e")
buf.write("\7p\2\2\u010e\u010f\7f\2\2\u010f\u0110\7g\2\2\u0110\u0111")
buf.write("\7z\2\2\u0111<\3\2\2\2\u0112\u0113\7h\2\2\u0113\u0114")
buf.write("\7n\2\2\u0114\u0115\7q\2\2\u0115\u0116\7y\2\2\u0116>\3")
buf.write("\2\2\2\u0117\u0118\7\60\2\2\u0118\u0119\7,\2\2\u0119@")
buf.write("\3\2\2\2\u011a\u011b\7\60\2\2\u011b\u011c\7^\2\2\u011c")
buf.write("B\3\2\2\2\u011d\u011e\7\60\2\2\u011e\u011f\7\61\2\2\u011f")
buf.write("D\3\2\2\2\u0120\u0121\7`\2\2\u0121F\3\2\2\2\u0122\u0123")
buf.write("\7d\2\2\u0123\u0124\7t\2\2\u0124\u0125\7g\2\2\u0125\u0126")
buf.write("\7c\2\2\u0126\u0127\7m\2\2\u0127H\3\2\2\2\u0128\u0129")
buf.write("\7t\2\2\u0129\u012a\7g\2\2\u012a\u012b\7v\2\2\u012b\u012c")
buf.write("\7w\2\2\u012c\u012d\7t\2\2\u012d\u012e\7p\2\2\u012eJ\3")
buf.write("\2\2\2\u012f\u0130\7h\2\2\u0130\u0131\7w\2\2\u0131\u0132")
buf.write("\7p\2\2\u0132\u0133\7e\2\2\u0133\u0134\7v\2\2\u0134\u0135")
buf.write("\7k\2\2\u0135\u0136\7q\2\2\u0136\u0137\7p\2\2\u0137L\3")
buf.write("\2\2\2\u0138\u0139\7h\2\2\u0139\u013a\7q\2\2\u013a\u013b")
buf.write("\7t\2\2\u013bN\3\2\2\2\u013c\u013d\7y\2\2\u013d\u013e")
buf.write("\7j\2\2\u013e\u013f\7k\2\2\u013f\u0140\7n\2\2\u0140\u0141")
buf.write("\7g\2\2\u0141P\3\2\2\2\u0142\u0143\7g\2\2\u0143\u0144")
buf.write("\7p\2\2\u0144\u0145\7f\2\2\u0145R\3\2\2\2\u0146\u0147")
buf.write("\7i\2\2\u0147\u0148\7n\2\2\u0148\u0149\7q\2\2\u0149\u014a")
buf.write("\7d\2\2\u014a\u014b\7c\2\2\u014b\u014c\7n\2\2\u014cT\3")
buf.write("\2\2\2\u014d\u014e\7k\2\2\u014e\u014f\7h\2\2\u014fV\3")
buf.write("\2\2\2\u0150\u0151\7e\2\2\u0151\u0152\7n\2\2\u0152\u0153")
buf.write("\7g\2\2\u0153\u0154\7c\2\2\u0154\u0155\7t\2\2\u0155X\3")
buf.write("\2\2\2\u0156\u0157\7g\2\2\u0157\u0158\7n\2\2\u0158\u0159")
buf.write("\7u\2\2\u0159\u015a\7g\2\2\u015aZ\3\2\2\2\u015b\u015c")
buf.write("\7g\2\2\u015c\u015d\7n\2\2\u015d\u015e\7u\2\2\u015e\u015f")
buf.write("\7g\2\2\u015f\u0160\7k\2\2\u0160\u0161\7h\2\2\u0161\\")
buf.write("\3\2\2\2\u0162\u0163\7>\2\2\u0163\u0164\7?\2\2\u0164^")
buf.write("\3\2\2\2\u0165\u0166\7@\2\2\u0166\u0167\7?\2\2\u0167`")
buf.write("\3\2\2\2\u0168\u0169\7?\2\2\u0169\u016a\7?\2\2\u016ab")
buf.write("\3\2\2\2\u016b\u016c\7#\2\2\u016c\u016d\7?\2\2\u016dd")
buf.write("\3\2\2\2\u016e\u016f\7v\2\2\u016f\u0170\7t\2\2\u0170\u0171")
buf.write("\7c\2\2\u0171\u0172\7p\2\2\u0172\u0173\7u\2\2\u0173\u0174")
buf.write("\7r\2\2\u0174\u0175\7q\2\2\u0175\u0176\7u\2\2\u0176\u0177")
buf.write("\7g\2\2\u0177f\3\2\2\2\u0178\u0179\7\60\2\2\u0179\u017a")
buf.write("\7)\2\2\u017ah\3\2\2\2\u017b\u017c\7=\2\2\u017cj\3\2\2")
buf.write("\2\u017d\u0181\7$\2\2\u017e\u0180\13\2\2\2\u017f\u017e")
buf.write("\3\2\2\2\u0180\u0183\3\2\2\2\u0181\u0182\3\2\2\2\u0181")
buf.write("\u017f\3\2\2\2\u0182\u0184\3\2\2\2\u0183\u0181\3\2\2\2")
buf.write("\u0184\u0185\7$\2\2\u0185l\3\2\2\2\u0186\u018b\5\u0085")
buf.write("C\2\u0187\u018a\5\u0085C\2\u0188\u018a\5\u0089E\2\u0189")
buf.write("\u0187\3\2\2\2\u0189\u0188\3\2\2\2\u018a\u018d\3\2\2\2")
buf.write("\u018b\u0189\3\2\2\2\u018b\u018c\3\2\2\2\u018cn\3\2\2")
buf.write("\2\u018d\u018b\3\2\2\2\u018e\u0192\5\u0087D\2\u018f\u0191")
buf.write("\5\u0089E\2\u0190\u018f\3\2\2\2\u0191\u0194\3\2\2\2\u0192")
buf.write("\u0190\3\2\2\2\u0192\u0193\3\2\2\2\u0193\u019b\3\2\2\2")
buf.write("\u0194\u0192\3\2\2\2\u0195\u0197\7\62\2\2\u0196\u0195")
buf.write("\3\2\2\2\u0197\u0198\3\2\2\2\u0198\u0196\3\2\2\2\u0198")
buf.write("\u0199\3\2\2\2\u0199\u019b\3\2\2\2\u019a\u018e\3\2\2\2")
buf.write("\u019a\u0196\3\2\2\2\u019bp\3\2\2\2\u019c\u019d\7\62\2")
buf.write("\2\u019d\u019f\t\2\2\2\u019e\u01a0\5\u008bF\2\u019f\u019e")
buf.write("\3\2\2\2\u01a0\u01a1\3\2\2\2\u01a1\u019f\3\2\2\2\u01a1")
buf.write("\u01a2\3\2\2\2\u01a2r\3\2\2\2\u01a3\u01a4\7\62\2\2\u01a4")
buf.write("\u01a6\t\3\2\2\u01a5\u01a7\5\u008dG\2\u01a6\u01a5\3\2")
buf.write("\2\2\u01a7\u01a8\3\2\2\2\u01a8\u01a6\3\2\2\2\u01a8\u01a9")
buf.write("\3\2\2\2\u01a9t\3\2\2\2\u01aa\u01ab\7\62\2\2\u01ab\u01ad")
buf.write("\t\4\2\2\u01ac\u01ae\5\u008fH\2\u01ad\u01ac\3\2\2\2\u01ae")
buf.write("\u01af\3\2\2\2\u01af\u01ad\3\2\2\2\u01af\u01b0\3\2\2\2")
buf.write("\u01b0v\3\2\2\2\u01b1\u01b4\5y=\2\u01b2\u01b4\5\u0095")
buf.write("K\2\u01b3\u01b1\3\2\2\2\u01b3\u01b2\3\2\2\2\u01b4\u01b5")
buf.write("\3\2\2\2\u01b5\u01b6\7k\2\2\u01b6x\3\2\2\2\u01b7\u01ba")
buf.write("\5\u0091I\2\u01b8\u01ba\5\u0093J\2\u01b9\u01b7\3\2\2\2")
buf.write("\u01b9\u01b8\3\2\2\2\u01baz\3\2\2\2\u01bb\u01bc\7?\2\2")
buf.write("\u01bc|\3\2\2\2\u01bd\u01bf\t\5\2\2\u01be\u01bd\3\2\2")
buf.write("\2\u01bf\u01c0\3\2\2\2\u01c0\u01be\3\2\2\2\u01c0\u01c1")
buf.write("\3\2\2\2\u01c1\u01c2\3\2\2\2\u01c2\u01c3\b?\2\2\u01c3")
buf.write("~\3\2\2\2\u01c4\u01c6\7\17\2\2\u01c5\u01c7\7\f\2\2\u01c6")
buf.write("\u01c5\3\2\2\2\u01c6\u01c7\3\2\2\2\u01c7\u01ca\3\2\2\2")
buf.write("\u01c8\u01ca\7\f\2\2\u01c9\u01c4\3\2\2\2\u01c9\u01c8\3")
buf.write("\2\2\2\u01ca\u01cb\3\2\2\2\u01cb\u01cc\b@\2\2\u01cc\u0080")
buf.write("\3\2\2\2\u01cd\u01ce\7\61\2\2\u01ce\u01cf\7,\2\2\u01cf")
buf.write("\u01d3\3\2\2\2\u01d0\u01d2\13\2\2\2\u01d1\u01d0\3\2\2")
buf.write("\2\u01d2\u01d5\3\2\2\2\u01d3\u01d4\3\2\2\2\u01d3\u01d1")
buf.write("\3\2\2\2\u01d4\u01d6\3\2\2\2\u01d5\u01d3\3\2\2\2\u01d6")
buf.write("\u01d7\7,\2\2\u01d7\u01d8\7\61\2\2\u01d8\u01d9\3\2\2\2")
buf.write("\u01d9\u01da\bA\2\2\u01da\u0082\3\2\2\2\u01db\u01dc\7")
buf.write("\61\2\2\u01dc\u01dd\7\61\2\2\u01dd\u01e1\3\2\2\2\u01de")
buf.write("\u01e0\n\6\2\2\u01df\u01de\3\2\2\2\u01e0\u01e3\3\2\2\2")
buf.write("\u01e1\u01df\3\2\2\2\u01e1\u01e2\3\2\2\2\u01e2\u01e4\3")
buf.write("\2\2\2\u01e3\u01e1\3\2\2\2\u01e4\u01e5\bB\2\2\u01e5\u0084")
buf.write("\3\2\2\2\u01e6\u01e7\t\7\2\2\u01e7\u0086\3\2\2\2\u01e8")
buf.write("\u01e9\t\b\2\2\u01e9\u0088\3\2\2\2\u01ea\u01eb\t\t\2\2")
buf.write("\u01eb\u008a\3\2\2\2\u01ec\u01ed\t\n\2\2\u01ed\u008c\3")
buf.write("\2\2\2\u01ee\u01ef\t\13\2\2\u01ef\u008e\3\2\2\2\u01f0")
buf.write("\u01f1\t\f\2\2\u01f1\u0090\3\2\2\2\u01f2\u01f4\5\u0095")
buf.write("K\2\u01f3\u01f2\3\2\2\2\u01f3\u01f4\3\2\2\2\u01f4\u01f5")
buf.write("\3\2\2\2\u01f5\u01fa\5\u0097L\2\u01f6\u01f7\5\u0095K\2")
buf.write("\u01f7\u01f8\7\60\2\2\u01f8\u01fa\3\2\2\2\u01f9\u01f3")
buf.write("\3\2\2\2\u01f9\u01f6\3\2\2\2\u01fa\u0092\3\2\2\2\u01fb")
buf.write("\u01fe\5\u0095K\2\u01fc\u01fe\5\u0091I\2\u01fd\u01fb\3")
buf.write("\2\2\2\u01fd\u01fc\3\2\2\2\u01fe\u01ff\3\2\2\2\u01ff\u0200")
buf.write("\5\u0099M\2\u0200\u0094\3\2\2\2\u0201\u0203\5\u0089E\2")
buf.write("\u0202\u0201\3\2\2\2\u0203\u0204\3\2\2\2\u0204\u0202\3")
buf.write("\2\2\2\u0204\u0205\3\2\2\2\u0205\u0096\3\2\2\2\u0206\u0208")
buf.write("\7\60\2\2\u0207\u0209\5\u0089E\2\u0208\u0207\3\2\2\2\u0209")
buf.write("\u020a\3\2\2\2\u020a\u0208\3\2\2\2\u020a\u020b\3\2\2\2")
buf.write("\u020b\u0098\3\2\2\2\u020c\u020e\t\r\2\2\u020d\u020f\t")
buf.write("\16\2\2\u020e\u020d\3\2\2\2\u020e\u020f\3\2\2\2\u020f")
buf.write("\u0211\3\2\2\2\u0210\u0212\5\u0089E\2\u0211\u0210\3\2")
buf.write("\2\2\u0212\u0213\3\2\2\2\u0213\u0211\3\2\2\2\u0213\u0214")
buf.write("\3\2\2\2\u0214\u009a\3\2\2\2\u0215\u0216\t\16\2\2\u0216")
buf.write("\u009c\3\2\2\2\32\2\u0181\u0189\u018b\u0192\u0198\u019a")
buf.write("\u01a1\u01a8\u01af\u01b3\u01b9\u01c0\u01c6\u01c9\u01d3")
buf.write("\u01e1\u01f3\u01f9\u01fd\u0204\u020a\u020e\u0213\3\b\2")
buf.write("\2")
return buf.getvalue()
class CMLangLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
T__4 = 5
T__5 = 6
T__6 = 7
T__7 = 8
T__8 = 9
T__9 = 10
T__10 = 11
T__11 = 12
T__12 = 13
T__13 = 14
T__14 = 15
T__15 = 16
T__16 = 17
T__17 = 18
T__18 = 19
T__19 = 20
T__20 = 21
INPUT = 22
OUTPUT = 23
STATE = 24
PARAMETER = 25
SPRING = 26
RESERVOIR = 27
COMPONENT = 28
INDEX = 29
FLOW = 30
ARRAYMUL = 31
ARRAYDIV = 32
ARRAYRDIV = 33
POW = 34
BREAK = 35
RETURN = 36
FUNCTION = 37
FOR = 38
WHILE = 39
END = 40
GLOBAL = 41
IF = 42
CLEAR = 43
ELSE = 44
ELSEIF = 45
LE_OP = 46
GE_OP = 47
EQ_OP = 48
NE_OP = 49
TRANSPOSE = 50
NCTRANSPOSE = 51
SEMI = 52
STRING_LITERAL = 53
IDENTIFIER = 54
DECIMAL_INTEGER = 55
OCT_INTEGER = 56
HEX_INTEGER = 57
BIN_INTEGER = 58
IMAG_NUMBER = 59
FLOAT_NUMBER = 60
EQ = 61
WHITESPACE = 62
NEWLINE = 63
BLOCKCOMMENT = 64
LINECOMMENT = 65
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'('", "')'", "'{'", "'}'", "','", "'['", "']'", "':'", "'+'",
"'-'", "'*'", "'/'", "'%'", "'<'", "'>'", "'?'", "'int'", "'float'",
"'str'", "'bool'", "'complex'", "'input'", "'output'", "'state'",
"'param'", "'spring'", "'reservoir'", "'component'", "'index'",
"'flow'", "'.*'", "'.\\'", "'./'", "'^'", "'break'", "'return'",
"'function'", "'for'", "'while'", "'end'", "'global'", "'if'",
"'clear'", "'else'", "'elseif'", "'<='", "'>='", "'=='", "'!='",
"'transpose'", "'.''", "';'", "'='" ]
symbolicNames = [ "<INVALID>",
"INPUT", "OUTPUT", "STATE", "PARAMETER", "SPRING", "RESERVOIR",
"COMPONENT", "INDEX", "FLOW", "ARRAYMUL", "ARRAYDIV", "ARRAYRDIV",
"POW", "BREAK", "RETURN", "FUNCTION", "FOR", "WHILE", "END",
"GLOBAL", "IF", "CLEAR", "ELSE", "ELSEIF", "LE_OP", "GE_OP",
"EQ_OP", "NE_OP", "TRANSPOSE", "NCTRANSPOSE", "SEMI", "STRING_LITERAL",
"IDENTIFIER", "DECIMAL_INTEGER", "OCT_INTEGER", "HEX_INTEGER",
"BIN_INTEGER", "IMAG_NUMBER", "FLOAT_NUMBER", "EQ", "WHITESPACE",
"NEWLINE", "BLOCKCOMMENT", "LINECOMMENT" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "T__4", "T__5", "T__6",
"T__7", "T__8", "T__9", "T__10", "T__11", "T__12", "T__13",
"T__14", "T__15", "T__16", "T__17", "T__18", "T__19",
"T__20", "INPUT", "OUTPUT", "STATE", "PARAMETER", "SPRING",
"RESERVOIR", "COMPONENT", "INDEX", "FLOW", "ARRAYMUL",
"ARRAYDIV", "ARRAYRDIV", "POW", "BREAK", "RETURN", "FUNCTION",
"FOR", "WHILE", "END", "GLOBAL", "IF", "CLEAR", "ELSE",
"ELSEIF", "LE_OP", "GE_OP", "EQ_OP", "NE_OP", "TRANSPOSE",
"NCTRANSPOSE", "SEMI", "STRING_LITERAL", "IDENTIFIER",
"DECIMAL_INTEGER", "OCT_INTEGER", "HEX_INTEGER", "BIN_INTEGER",
"IMAG_NUMBER", "FLOAT_NUMBER", "EQ", "WHITESPACE", "NEWLINE",
"BLOCKCOMMENT", "LINECOMMENT", "NONDIGIT", "NON_ZERO_DIGIT",
"DIGIT", "OCT_DIGIT", "HEX_DIGIT", "BIN_DIGIT", "POINT_FLOAT",
"EXPONENT_FLOAT", "INT_PART", "FRACTION", "EXPONENT",
"SIGN" ]
grammarFileName = "CMLang.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.7.2")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| 59.894309
| 108
| 0.564183
|
843987cccbfc20ee98b71faa5dcf3f69e893db92
| 6,220
|
py
|
Python
|
SUSYBSMAnalysis/HSCP/test/BuildHSCParticles/HSCParticleProducer_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
SUSYBSMAnalysis/HSCP/test/BuildHSCParticles/HSCParticleProducer_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
SUSYBSMAnalysis/HSCP/test/BuildHSCParticles/HSCParticleProducer_cfg.py
|
SWuchterl/cmssw
|
769b4a7ef81796579af7d626da6039dfa0347b8e
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("HSCPAnalysis")
#The following parameters need to be provided
#isSignal, isBckg, isData, isSkimmedSample, GTAG, InputFileList
#isSignal = True
#isBckg = False
#isData = False
#isSkimmedSample = False
#GTAG = 'START72_V1::All'
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load('Configuration.Geometry.GeometryExtended2015Reco_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load('Configuration.StandardSequences.Services_cff')
process.options = cms.untracked.PSet(
wantSummary = cms.untracked.bool(True),
SkipEvent = cms.untracked.vstring('ProductNotFound'),
)
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
process.source = cms.Source("PoolSource",
fileNames = InputFileList,
inputCommands = cms.untracked.vstring("keep *", "drop *_MEtoEDMConverter_*_*")
)
if(isSignal): process.source.duplicateCheckMode = cms.untracked.string("noDuplicateCheck")
#for i in range(0,25):
# process.source.fileNames.extend(["file:/afs/cern.ch/user/q/querten/workspace/public/14_08_12_Run2HSCP/CMSSW_7_2_X_2014-08-18-0200/src/SUSYBSMAnalysis/HSCP/test/BuildHSCParticles/Signals/../../../../../SampleProd/FARM_RECO/outputs/gluino1TeV_RECO_%04i.root" % i])
process.GlobalTag.globaltag = GTAG
process.HSCPTuplePath = cms.Path()
########################################################################
#Run the Skim sequence if necessary
if(not isSkimmedSample):
process.nEventsBefSkim = cms.EDProducer("EventCountProducer")
process.load('Configuration.Skimming.PDWG_EXOHSCP_cff')
process.load('HLTrigger.HLTfilters.hltHighLevel_cfi')
process.HSCPTrigger = process.hltHighLevel.clone()
process.HSCPTrigger.TriggerResultsTag = cms.InputTag( "TriggerResults", "", "HLT" )
process.HSCPTrigger.andOr = cms.bool( True ) #OR
process.HSCPTrigger.throw = cms.bool( False )
if(isData):
process.HSCPTrigger.HLTPaths = [
"HLT_*_dEdx*",
"HLT_Mu40_eta2p1*",
"HLT_Mu50_eta2p1*",
"HLT_HT650_*",
"HLT_MET80_*",
"HLT_L2Mu*MET*",
"HLT_L2Mu*NoBPTX*",
"HLT_PFMET150_*",
]
elif(isBckg):
#to be updated to Run2 Triggers, in the meanwhile keep all of them to study trigger efficiency
process.HSCPTrigger.HLTPaths = ["*"]
else:
#do not apply trigger filter on signal
process.HSCPTrigger.HLTPaths = ["*"]
process.HSCPTuplePath += process.nEventsBefSkim + process.HSCPTrigger + process.exoticaHSCPSeq
########################################################################
#Run the HSCP EDM-tuple Sequence on skimmed sample
process.nEventsBefEDM = cms.EDProducer("EventCountProducer")
process.load("SUSYBSMAnalysis.HSCP.HSCParticleProducerFromSkim_cff")
process.HSCPTuplePath += process.nEventsBefEDM + process.HSCParticleProducerSeq
########################################################################
# Only for MC samples, save skimmed genParticles
if(isSignal or isBckg):
process.load("PhysicsTools.HepMCCandAlgos.genParticles_cfi")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.allGenParticles = cms.EDProducer("GenParticleProducer",
saveBarCodes = cms.untracked.bool(False),
src = cms.InputTag("VtxSmeared"),
abortOnUnknownPDGCode = cms.untracked.bool(False)
)
process.genParticles = cms.EDFilter("GenParticleSelector",
filter = cms.bool(False),
src = cms.InputTag("allGenParticles"),
cut = cms.string('charge != 0 & pt > 5.0'),
stableOnly = cms.bool(True)
)
process.HSCPTuplePath += process.allGenParticles + process.genParticles
########################################################################
#make the pool output
process.Out = cms.OutputModule("PoolOutputModule",
outputCommands = cms.untracked.vstring(
"drop *",
"keep EventAux_*_*_*",
"keep LumiSummary_*_*_*",
"keep edmMergeableCounter_*_*_*",
"keep GenRunInfoProduct_*_*_*",
"keep *_genParticles_*_HSCPAnalysis",
"keep GenEventInfoProduct_generator_*_*",
"keep *_offlinePrimaryVertices_*_*",
"keep SiStripClusteredmNewDetSetVector_generalTracksSkim_*_*",
"keep SiPixelClusteredmNewDetSetVector_generalTracksSkim_*_*",
"keep *_TrackRefitter_*_*",
"keep *_standAloneMuons_*_*",
"keep *_globalMuons_*_*",
"keep *_muonsSkim_*_*",
"keep edmTriggerResults_TriggerResults_*_*",
"keep *_ak5PFJetsPt15__*",
"keep recoPFMETs_pfMet__*",
"keep *_HSCParticleProducer_*_*",
"keep *_HSCPIsolation01__*",
"keep *_HSCPIsolation03__*",
"keep *_HSCPIsolation05__*",
"keep *_dedx*_*_HSCPAnalysis",
"keep *_muontiming_*_HSCPAnalysis",
"keep triggerTriggerEvent_hltTriggerSummaryAOD_*_*",
"keep *_RefitMTSAMuons_*_*",
"keep *_MTMuons_*_*",
"keep *_MTSAMuons_*_*",
"keep *_MTmuontiming_*_*",
"keep *_refittedStandAloneMuons_*_*",
"keep *_offlineBeamSpot_*_*",
"drop *_offlineBeamSpot_*_HSCPAnalysis", #no need to save the BS from this process
"keep *_MuonSegmentProducer_*_*",
"drop TrajectorysToOnerecoTracksAssociation_TrackRefitter__",
"drop recoTrackExtras_*_*_*",
"keep recoTrackExtras_TrackRefitter_*_*",
"drop TrackingRecHitsOwned_*Muon*_*_*",
"keep *_g4SimHits_StoppedParticles*_*",
"keep PileupSummaryInfos_addPileupInfo_*_*"
),
fileName = cms.untracked.string('HSCP.root'),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('')
),
)
if(isBckg or isData):
process.Out.SelectEvents.SelectEvents = cms.vstring('HSCPTuplePath')
########################################################################
#schedule the sequence
process.endPath1 = cms.EndPath(process.Out)
process.schedule = cms.Schedule(process.HSCPTuplePath, process.endPath1)
| 38.875
| 266
| 0.661093
|
df520e80fdd85bb1b608cd4fbf064c83355308b1
| 3,329
|
py
|
Python
|
openfl/models/inference_only_model_wrapper.py
|
brandon-edwards/OpenFederatedLearning
|
379e1207f2e69b14ab5552e998bd3190363d6ecf
|
[
"Apache-2.0"
] | 1
|
2021-07-01T03:40:52.000Z
|
2021-07-01T03:40:52.000Z
|
openfl/models/inference_only_model_wrapper.py
|
brandon-edwards/OpenFederatedLearning
|
379e1207f2e69b14ab5552e998bd3190363d6ecf
|
[
"Apache-2.0"
] | null | null | null |
openfl/models/inference_only_model_wrapper.py
|
brandon-edwards/OpenFederatedLearning
|
379e1207f2e69b14ab5552e998bd3190363d6ecf
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Intel Corporation
# Licensed subject to the terms of the separately executed evaluation license agreement between Intel Corporation and you.
from openfl.models.flmodel import FLModel
from openfl.flplan import init_object
class InferenceOnlyModelWrapper(FLModel):
"""Model class wrapper for Federated Learning to enable inferencing on the model using
run_inference_from_flplan with minimal requirements on the base_model. Those requirements
on the base model are as follows:
A python class providing access to your model, with two required instance methods.
Please populate argument defaults where appropriate to allow for the optimal
configuration, and document to allow users to understand how to customize.
Particularly relevant, will be that your model infer_volume method processes images according
to our assumption on input and output shapes (see below).
The required instance methods are as follows:
def __init__(self, *args, **kwargs):
'''
Instantiates a properly configured model object, including population
of all model weights from a model serialization file.
Args: ...
Kwargs: ...
Returns:
None
'''
raise NotImplementedError()
def infer_volume(self, X):
'''
Perform model inference on a volume of data.
Args:
X (numpy array): Input volume to perform inference on, containing channels for
all scan modalities required for the model task.
The expected input shape can be either:
(num_channels is the number of scanning modalities used by the model)
- channels first -
(num_samples, num_channels,coronal_axis_dim, sagittal_axis_dim, transversal_axis_dim)
- or channels last -
(num_samples,coronal_axis_dim, sagittal_axis_dim, transversal_axis_dim, num_channels)
Returns:
(numpy array): Model output for the input volume.
The output shape is as follows:
(num_samples, coronal_axis_dim, sagittal_axis_dim, transversal_axis_dim)
'''
raise NotImplementedError()
"""
def __init__(self, data, base_model):
"""Initializer
Args:
data (child class of fldata): Object to provide inference data loader.
base_model: Base model satisfying requirements above
"""
super(InferenceOnlyModelWrapper, self).__init__(data=data)
self.infer_batch = base_model.infer_volume
# If base_model has implemented FLModel methods below, we expose them
# to be used to populate model weights outside of the model init if desired.
# FIXME: Dependency on knowlege of FLModel object here
# Should we instead dynamically transfer methods?
for method_name in ['get_tensor_dict', 'set_tensor_dict', 'load_native']:
if hasattr(base_model, method_name):
setattr(self, method_name, getattr(base_model, method_name))
| 36.988889
| 122
| 0.634425
|
c215c4e7f8d43e829eccf39245bc16c75be0cd2e
| 192
|
py
|
Python
|
test/tests/os_test.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | 1
|
2015-11-06T03:39:51.000Z
|
2015-11-06T03:39:51.000Z
|
test/tests/os_test.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | null | null | null |
test/tests/os_test.py
|
kevinxucs/pyston
|
bdb87c1706ac74a0d15d9bc2bae53798678a5f14
|
[
"Apache-2.0"
] | null | null | null |
# allow-warning: converting unicode literal to str
# currently broken:
# import os.path
import os
r1 = os.urandom(8)
r2 = os.urandom(8)
print len(r1), len(r2), type(r1), type(r2), r1 == r2
| 17.454545
| 52
| 0.682292
|
2bd7b7bb556c43fc7c963d719a7ae3a88dbb9bb5
| 14,110
|
py
|
Python
|
Protheus_WebApp/Modules/SIGACTB/CTBA161TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 17
|
2018-09-24T17:27:08.000Z
|
2021-09-16T19:09:46.000Z
|
Protheus_WebApp/Modules/SIGACTB/CTBA161TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 4
|
2018-09-24T17:30:32.000Z
|
2022-01-03T11:39:30.000Z
|
Protheus_WebApp/Modules/SIGACTB/CTBA161TESTCASE.py
|
98llm/tir-script-samples
|
0bff8393b79356aa562e9e6512c11ee6e039b177
|
[
"MIT"
] | 18
|
2019-06-07T17:41:34.000Z
|
2022-01-31T18:17:31.000Z
|
from tir import Webapp
import unittest
class CTBA161(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGACTB", "01/01/2019", "T1", "D MG 01", "34")
inst.oHelper.Program("CTBA161")
#########################################
# Inclusão de Visão - Sintetica - MODO TRADICIONAL
#########################################
def test_CTBA161_001(self):
self.oHelper.WaitShow("Cadastro Visao Gerencial")
self.oHelper.SetKey("F12")
self.oHelper.SetValue("Modo de Exibicao ?", "Tradicional")
self.oHelper.SetButton("OK")
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("D MG 01 ")
self.oHelper.SetValue("CTS_CODPLA", "T01")
# OrdemAut = self.oHelper.GetValue("CTS_ORDEM")
# EntiGerenAut = self.oHelper.GetValue("CTS_CONTAG")
# self.oHelper.CheckResult("CTS_ORDEM", OrdemAut)
# self.oHelper.CheckResult("CTS_CONTAG", EntiGerenAut)
self.oHelper.SetValue("CTS_CTASUP", "")
self.oHelper.SetValue("CTS_DESCCG", "ENTIDADE TIR 01 INCLUIR")
self.oHelper.SetValue("CTS_DETHCG", "TIR")
self.oHelper.SetValue("CTS_NORMAL", "2 - Credito")
self.oHelper.SetValue("CTS_COLUNA", "0")
self.oHelper.SetValue("CTS_CLASSE", "1 - Sintetica")
self.oHelper.SetValue("CTS_NOME", "TIR INCS")
self.oHelper.SetValue("CTS_VISENT", "1 - Sim")
self.oHelper.SetValue("CTS_FATSLD", "1 - Mantem")
self.oHelper.SetValue("CTS_TOTVIS", "1 - Sim")
self.oHelper.CheckView("Identificadores")
self.oHelper.ClickCheckBox("Total Geral")
self.oHelper.SetButton("Salvar")
self.oHelper.SetButton("Cancelar")
chave = "T01"
# VISUALIZANDO REGISTROS PRREENCHIDOS
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("CTS_CODPLA", "T01")
# self.oHelper.CheckResult("CTS_ORDEM", OrdemAut)
# self.oHelper.CheckResult("CTS_CONTAG", EntiGerenAut)
self.oHelper.CheckResult("CTS_CTASUP", "")
self.oHelper.CheckResult("CTS_DESCCG", "ENTIDADE TIR 01 INCLUIR")
self.oHelper.CheckResult("CTS_DETHCG", "TIR")
self.oHelper.CheckResult("CTS_NORMAL", "2 - Credito")
self.oHelper.CheckResult("CTS_COLUNA", "0")
self.oHelper.CheckResult("CTS_CLASSE", "1 - Sintetica")
self.oHelper.CheckResult("CTS_NOME", "TIR INCS")
self.oHelper.CheckResult("CTS_VISENT", "1 - Sim")
self.oHelper.CheckResult("CTS_FATSLD", "1 - Mantem")
self.oHelper.CheckResult("CTS_TOTVIS", "1 - Sim")
self.oHelper.SetButton("Cancelar")
self.oHelper.AssertTrue()
#####################################################
# Alterar Planilha com Observação - Alterar Cadastro
#####################################################
def test_CTBA161_002(self):
chave = "T020000000001001"
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Outras Ações", "Alterar Cadastro")
self.oHelper.SetValue("CVE_DESCRI", "TIR ALT3")
self.oHelper.SetValue("CVE_OBS", "TIR OBSER")
self.oHelper.CheckResult("CVE_DESCRI", "TIR ALT3")
self.oHelper.CheckResult("CVE_OBS", "TIR OBSER")
self.oHelper.SetButton("Salvar")
self.oHelper.AssertTrue()
#####################################################
# Alterar visão de analitica para sintetica - modo tradicional
#####################################################
def test_CTBA161_003(self):
chave = "T050000000001001"
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Alterar")
self.oHelper.SetValue("CTS_DESCCG", "ALTERADO")
self.oHelper.SetValue("CTS_CLASSE", "1 - Sintetica")
self.oHelper.SetValue("CTS_VISENT", "1 - Sim")
self.oHelper.SetValue("CTS_FATSLD", "1 - Mantem")
self.oHelper.SetValue("CTS_TOTVIS", "1 - Sim")
self.oHelper.ClickCheckBox("Total Geral")
self.oHelper.SetButton("Confirmar")
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
###
# visualizando dados
###
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("CTS_DESCCG", "ALTERADO")
self.oHelper.CheckResult("CTS_CLASSE", "1 - Sintetica")
self.oHelper.CheckResult("CTS_VISENT", "1 - Sim")
self.oHelper.CheckResult("CTS_FATSLD", "1 - Mantem")
self.oHelper.CheckResult("CTS_TOTVIS", "1 - Sim")
self.oHelper.SetButton("Confirmar")
self.oHelper.AssertTrue()
#####################################################
## Exclusão de visão Gerencial - MODO TRADICIONAL
#####################################################
def test_CTBA161_004(self):
chave = "T030000000001001"
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Outras Ações", "Excluir")
self.oHelper.SetButton("Confirmar")
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("CTS_DESCCG", "TIR EXCLUSAO")
self.oHelper.SetButton("Confirmar")
self.oHelper.AssertFalse()
#####################################################
## Exportar estrutura, visão gerencial
#####################################################
def test_CTBA161_005(self):
chave = "T000000000001001"
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Outras Ações","Exp. Estrutura")
self.oHelper.SetValue("Estrutura de visao ?","\\baseline\\visaotir.cve")
self.oHelper.SetButton("Ok")
self.oHelper.WaitShow("Exportacao gerada com sucesso")
self.oHelper.SetButton("Ok")
self.oHelper.AssertTrue()
#####################################################
## importar estrutura, visão gerencial
#####################################################
def test_CTBA161_006(self):
#self.oHelper.WaitShow("Cadastro Visao Gerencial")
#self.oHelper.SetKey("F12")
#self.oHelper.SetValue("Modo de Exibicao ?", "Arvore")
#self.oHelper.SetButton("OK")
self.oHelper.SetButton("Outras Ações","Imp. Estrutura")
self.oHelper.SetBranch("D MG 01 ")
self.oHelper.SetValue("Estrutura de visao ?","\\baseline\\tirImport.cve")
self.oHelper.SetButton("Ok")
self.oHelper.SetValue("CVE_CODIGO","T07")
self.oHelper.SetValue("CVE_DESCRI","TIR IMPORTADO")
self.oHelper.SetValue("CVE_OBS","TIR")
self.oHelper.SetButton("Salvar")
self.oHelper.WaitShow("Importacao finalizada com sucesso")
self.oHelper.SetButton("Fechar")
##aguardar automação - implementação possivelmente futura de clicar com direito em arvores ANALITICAS
#self.oHelper.SetButton("Visualizar")
#self.oHelper.ClickTree("TIR IMPORTADO")
#self.oHelper.ClickTree("00000000000000000005-TIR ARVORE")
#self.oHelper.ClickLabel("00000000000000000006 - TIR ANALITICO TIR")
#self.oHelper.ClickIcon("Close")
self.oHelper.AssertTrue()
#########################################
# Inclusão de Visão - Analitica - MODO ARVORE
#########################################
def test_CTBA161_007(self):
# Variaveis para verificar codigo gerado automaticamente
self.oHelper.WaitShow("Cadastro Visao Gerencial")
self.oHelper.SetKey("F12")
self.oHelper.SetValue("Modo de Exibicao ?", "Arvore")
self.oHelper.SetButton("OK")
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("D MG 01 ")
self.oHelper.WaitShow("Cadastro Visao Gerencial - INCLUIR")
self.oHelper.SetValue("CVE_CODIGO","T04")
self.oHelper.SetFocus("CVE_DESCRI")
self.oHelper.SetValue("CVE_DESCRI","TIR INC ARVORE")
self.oHelper.SetFocus("CVE_OBS")
self.oHelper.SetValue("CVE_OBS","TIR OBS")
self.oHelper.SetButton("Salvar")
##Node da visão Gerencial
self.oHelper.ClickTree("TIR INC ARVORE",right_click=True)
self.oHelper.CheckView("Incluir",element_type="str")
self.oHelper.ClickMenuPopUpItem("Incluir")
self.oHelper.WaitShow("Conta Gerencial - Inclusao")
cEntidGerAut=self.oHelper.GetValue("CVF_CONTAG")
cORDEMAut=self.oHelper.GetValue("CVF_ORDEM")
self.oHelper.CheckResult("CVF_CONTAG",cEntidGerAut)
self.oHelper.CheckResult("CVF_ORDEM",cORDEMAut)
self.oHelper.SetValue("CVF_CLASSE","1 - Sintetica")
self.oHelper.SetValue("CVF_DESCCG","TIR ARVORE")
self.oHelper.SetValue("CVF_NORMAL","2 - Credito")
self.oHelper.SetValue("CVF_COLUNA","0 - Nenhuma")
self.oHelper.ClickCheckBox("Total Geral")
self.oHelper.SetValue("CVF_TOTVIS", "1 - Sim")
self.oHelper.SetValue("CVF_VISENT", "1 - Sim")
self.oHelper.SetValue("CVF_FATSLD", "1 - Matem")
#self.oHelper.SetValue("CVF_DETHCG","")
self.oHelper.SetValue("CVF_TPVALO", "D - Padrao D/C")
##self.oHelper.SetValue("CVF_PICTUR", "")
self.oHelper.SetButton("Confirmar")
##Node da entidade gerencial, criando analitica
self.oHelper.ClickTree(cEntidGerAut+"-TIR ARVORE",right_click=True)
#self.oHelper.CheckView("Incluir",element_type="str")
self.oHelper.ClickMenuPopUpItem("Incluir")
self.oHelper.WaitShow("Conta Gerencial - Inclusao")
cEntidGerAut2 = self.oHelper.GetValue("CVF_CONTAG")
cORDEMAut2 = self.oHelper.GetValue("CVF_ORDEM")
self.oHelper.CheckResult("CVF_CONTAG",cEntidGerAut2)
self.oHelper.CheckResult("CVF_ORDEM",cORDEMAut2)
self.oHelper.SetValue("CVF_CLASSE","2 - Analitica")
self.oHelper.CheckResult("CVF_CTASUP",cEntidGerAut)
self.oHelper.SetValue("CVF_DESCCG","TIR ANALITICO")
self.oHelper.SetValue("CVF_NORMAL","1 - Debito")
self.oHelper.SetValue("CVF_TOTVIS", "2 - Nao")
self.oHelper.SetValue("CVF_VISENT", "1 - Sim")
self.oHelper.SetValue("CVF_FATSLD", "1 - Matem")
self.oHelper.SetValue("CVF_DETHCG","TIR")
self.oHelper.SetValue("CVF_TPVALO", "D - Padrao D/C")
self.oHelper.SetButton("Confirmar")
####ABRIR TASK PARA AUTOMAÇÃO/AGUARDAR RESOLUÇAO
#self.oHelper.ClickIcon("Alterar")
#self.oHelper.SetValue("CTS_CT1INI","000000002 ",grid=True,row=1)
#self.oHelper.SetValue("CTS_CT1FIM","000000002 ",grid=True,row=1)
#self.oHelper.SetValue("CTS_IDENTI","1 - Soma",grid=True,row=1)
#self.oHelper.SetButton("Confirmar")
#self.oHelper.SetButton("X")
#self.oHelper.SetButton("Cancelar")
self.oHelper.AssertTrue()
#####################################################
## Modificações estrutura - MODO Arvore, completa com estrutura
#####################################################
def test_CTBA161_008(self):
self.oHelper.WaitShow("Cadastro Visao Gerencial")
self.oHelper.SetKey("F12")
self.oHelper.SetValue("Modo de Exibicao ?", "Arvore")
self.oHelper.SetButton("OK")
chave = "T06"
self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
self.oHelper.SetButton("Alterar")
##Alterando estrutura sintetica
#como era
# self.oHelper.ClickTree("00000000000000000010-TIR ARVORE")
# self.oHelper.CheckView("")
# self.oHelper.ClickTree("00000000000000000011-TIR SINT",right_click=True)
##
#alteração sugerida pela task ca-2467
self.oHelper.ClickTree(" 00000000000000000010-TIR ARVORE> 00000000000000000011-TIR SINT",right_click=True)
self.oHelper.CheckView("Alterar",element_type="str")
self.oHelper.ClickMenuPopUpItem("Alterar")
self.oHelper.WaitShow("Conta Gerencial - Alteracao")
self.oHelper.SetValue("CVF_DESCCG","TIR SINT ALTERADO")
self.oHelper.SetButton("Confirmar")
#exclusão de uma entidade sintetica com duas analiticas uma com conta atreladas
self.oHelper.WaitShow("Conta Gerencial")
self.oHelper.ClickTree("00000000000000000010-TIR ARVORE")
self.oHelper.ClickTree("00000000000000000011-TIR SINT ALTERADO",right_click=True)
self.oHelper.CheckView("Excluir",element_type="str")
self.oHelper.ClickMenuPopUpItem("Excluir")
self.oHelper.CheckHelp("Confirma exclusao da conta gerencial","Ok")
#exclusão estrutura restante sintetica
self.oHelper.ClickTree("00000000000000000010-TIR ARVORE",right_click=True)
self.oHelper.CheckView("Excluir",element_type="str")
self.oHelper.ClickMenuPopUpItem("Excluir")
self.oHelper.CheckHelp("Confirma exclusao da conta gerencial (serao excluidas todas as contas dependentes).","Ok")
#self.oHelper.TearDown()
self.oHelper.AssertTrue()
#testes exploratório fechando pelo botão "close", para testes não executar por enquanto
##def test_CTBA161_009(self):
##self.oHelper.WaitShow("Cadastro Visao Gerencial")
##self.oHelper.SetKey("F12")
##self.oHelper.SetValue("Modo de Exibicao ?", "Arvore")
##self.oHelper.SetButton("OK")
#chave = "1660000000001001"
#chave = "T04"
#self.oHelper.SearchBrowse(f"D MG 01 {chave}", key=1, index=True)
#self.oHelper.SetButton("Visualizar")
#self.oHelper.WaitShow("Visao Gerencial")
#self.oHelper.SetButton("x") #Tentativa com Close x,X
#self.oHelper.ClickIcon("x") #Tentativa com x, X, Close
#self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main()
| 41.622419
| 122
| 0.614033
|
cd0711e8ba003df48d328efb409420523f09b14f
| 5,288
|
py
|
Python
|
lanenet_model/lanenet_discriminative_loss.py
|
aiLibrary/lanenet-lane-detection
|
6abedbe1ba6da2e6f6e3935c6d26bd79c55a0d45
|
[
"Apache-2.0"
] | 2
|
2018-11-05T01:31:06.000Z
|
2019-02-02T08:03:24.000Z
|
lanenet_model/lanenet_discriminative_loss.py
|
aiLibrary/lanenet-lane-detection
|
6abedbe1ba6da2e6f6e3935c6d26bd79c55a0d45
|
[
"Apache-2.0"
] | null | null | null |
lanenet_model/lanenet_discriminative_loss.py
|
aiLibrary/lanenet-lane-detection
|
6abedbe1ba6da2e6f6e3935c6d26bd79c55a0d45
|
[
"Apache-2.0"
] | 2
|
2020-04-10T08:55:33.000Z
|
2021-07-07T06:22:29.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-5-11 下午3:48
# @Author : Luo Yao
# @Site : http://icode.baidu.com/repos/baidu/personal-code/Luoyao
# @File : lanenet_discriminative_loss.py
# @IDE: PyCharm Community Edition
"""
实现LaneNet的Discriminative Loss函数
"""
import tensorflow as tf
def discriminative_loss_single(
prediction,
correct_label,
feature_dim,
label_shape,
delta_v,
delta_d,
param_var,
param_dist,
param_reg):
"""
论文equ(1)提到的实例分割损失函数
:param prediction: inference of network
:param correct_label: instance label
:param feature_dim: feature dimension of prediction
:param label_shape: shape of label
:param delta_v: cut off variance distance
:param delta_d: cut off cluster distance
:param param_var: weight for intra cluster variance
:param param_dist: weight for inter cluster distances
:param param_reg: weight regularization
"""
# 像素对齐为一行
correct_label = tf.reshape(
correct_label, [
label_shape[1] * label_shape[0]])
reshaped_pred = tf.reshape(
prediction, [
label_shape[1] * label_shape[0], feature_dim])
# 统计实例个数
unique_labels, unique_id, counts = tf.unique_with_counts(correct_label)
counts = tf.cast(counts, tf.float32)
num_instances = tf.size(unique_labels)
# 计算pixel embedding均值向量
segmented_sum = tf.unsorted_segment_sum(
reshaped_pred, unique_id, num_instances)
mu = tf.div(segmented_sum, tf.reshape(counts, (-1, 1)))
mu_expand = tf.gather(mu, unique_id)
# 计算公式的loss(var)
distance = tf.norm(tf.subtract(mu_expand, reshaped_pred), axis=1)
distance = tf.subtract(distance, delta_v)
distance = tf.clip_by_value(distance, 0., distance)
distance = tf.square(distance)
l_var = tf.unsorted_segment_sum(distance, unique_id, num_instances)
l_var = tf.div(l_var, counts)
l_var = tf.reduce_sum(l_var)
l_var = tf.divide(l_var, tf.cast(num_instances, tf.float32))
# 计算公式的loss(dist)
mu_interleaved_rep = tf.tile(mu, [num_instances, 1])
mu_band_rep = tf.tile(mu, [1, num_instances])
mu_band_rep = tf.reshape(
mu_band_rep,
(num_instances *
num_instances,
feature_dim))
mu_diff = tf.subtract(mu_band_rep, mu_interleaved_rep)
# 去除掩模上的零点
intermediate_tensor = tf.reduce_sum(tf.abs(mu_diff), axis=1)
zero_vector = tf.zeros(1, dtype=tf.float32)
bool_mask = tf.not_equal(intermediate_tensor, zero_vector)
mu_diff_bool = tf.boolean_mask(mu_diff, bool_mask)
mu_norm = tf.norm(mu_diff_bool, axis=1)
mu_norm = tf.subtract(2. * delta_d, mu_norm)
mu_norm = tf.clip_by_value(mu_norm, 0., mu_norm)
mu_norm = tf.square(mu_norm)
l_dist = tf.reduce_mean(mu_norm)
# 计算原始Discriminative Loss论文中提到的正则项损失
l_reg = tf.reduce_mean(tf.norm(mu, axis=1))
# 合并损失按照原始Discriminative Loss论文中提到的参数合并
param_scale = 1.
l_var = param_var * l_var
l_dist = param_dist * l_dist
l_reg = param_reg * l_reg
loss = param_scale * (l_var + l_dist + l_reg)
return loss, l_var, l_dist, l_reg
def discriminative_loss(prediction, correct_label, feature_dim, image_shape,
delta_v, delta_d, param_var, param_dist, param_reg):
"""
按照论文的思想迭代计算loss损失
:return: discriminative loss and its three components
"""
def cond(label, batch, out_loss, out_var, out_dist, out_reg, i):
return tf.less(i, tf.shape(batch)[0])
def body(label, batch, out_loss, out_var, out_dist, out_reg, i):
disc_loss, l_var, l_dist, l_reg = discriminative_loss_single(
prediction[i], correct_label[i], feature_dim, image_shape, delta_v, delta_d, param_var, param_dist, param_reg)
out_loss = out_loss.write(i, disc_loss)
out_var = out_var.write(i, l_var)
out_dist = out_dist.write(i, l_dist)
out_reg = out_reg.write(i, l_reg)
return label, batch, out_loss, out_var, out_dist, out_reg, i + 1
# TensorArray is a data structure that support dynamic writing
output_ta_loss = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
output_ta_var = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
output_ta_dist = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
output_ta_reg = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True)
_, _, out_loss_op, out_var_op, out_dist_op, out_reg_op, _ = tf.while_loop(
cond, body, [
correct_label, prediction, output_ta_loss, output_ta_var, output_ta_dist, output_ta_reg, 0])
out_loss_op = out_loss_op.stack()
out_var_op = out_var_op.stack()
out_dist_op = out_dist_op.stack()
out_reg_op = out_reg_op.stack()
disc_loss = tf.reduce_mean(out_loss_op)
l_var = tf.reduce_mean(out_var_op)
l_dist = tf.reduce_mean(out_dist_op)
l_reg = tf.reduce_mean(out_reg_op)
return disc_loss, l_var, l_dist, l_reg
| 34.337662
| 122
| 0.648638
|
1e801ab7a3e0231506df234e64493ccf2222ed75
| 4,258
|
py
|
Python
|
workalendar/tests/test_registry_europe.py
|
mr-shovel/workalendar
|
969386e68d258afc49ee875af7ebe6ac606c7ab3
|
[
"MIT"
] | null | null | null |
workalendar/tests/test_registry_europe.py
|
mr-shovel/workalendar
|
969386e68d258afc49ee875af7ebe6ac606c7ab3
|
[
"MIT"
] | null | null | null |
workalendar/tests/test_registry_europe.py
|
mr-shovel/workalendar
|
969386e68d258afc49ee875af7ebe6ac606c7ab3
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from ..europe import (
Austria, Belgium, Bulgaria, Croatia, Cyprus, CzechRepublic, Estonia,
Denmark, Finland, France,
# FranceAlsaceMoselle, # TODO: Should we add it to the registry?
Greece, Hungary, Iceland, Ireland, Italy, Latvia, Lithuania, Luxembourg,
Malta, Monaco, Netherlands, Norway, Poland, Portugal, Romania, Russia,
Slovakia, Slovenia, Spain,
# Catalonia, # TODO: Add it to registry
Sweden, UnitedKingdom,
UnitedKingdomNorthernIreland,
)
# Switzerland
from ..europe import (
Switzerland,
Aargau, AppenzellInnerrhoden, AppenzellAusserrhoden, Bern, BaselLandschaft,
BaselStadt, Fribourg, Geneva, Glarus, Graubunden, Jura, Luzern, Neuchatel,
Nidwalden, Obwalden, StGallen, Schaffhausen, Solothurn, Schwyz, Thurgau,
Ticino, Uri, Vaud, Valais, Zug, Zurich
)
# Germany
from ..europe import (
Germany, BadenWurttemberg, Bavaria, Berlin, Brandenburg, Bremen,
Hamburg, Hesse, MecklenburgVorpommern, LowerSaxony,
NorthRhineWestphalia, RhinelandPalatinate, Saarland, Saxony,
SaxonyAnhalt, SchleswigHolstein, Thuringia
)
from ..registry import registry
classes = (v for k, v in registry.region_registry.items())
classes = list(classes)
GERMANY_REGION_CLASSES = (
BadenWurttemberg, Bavaria, Berlin, Brandenburg, Bremen,
Hamburg, Hesse, MecklenburgVorpommern, LowerSaxony,
NorthRhineWestphalia, RhinelandPalatinate, Saarland, Saxony,
SaxonyAnhalt, SchleswigHolstein, Thuringia
)
SWITZERLAND_REGION_CLASSES = (
Aargau, AppenzellInnerrhoden, AppenzellAusserrhoden, Bern, BaselLandschaft,
BaselStadt, Fribourg, Geneva, Glarus, Graubunden, Jura, Luzern, Neuchatel,
Nidwalden, Obwalden, StGallen, Schaffhausen, Solothurn, Schwyz, Thurgau,
Ticino, Uri, Vaud, Valais, Zug, Zurich
)
class RegistryEurope(TestCase):
def test_europe(self):
self.assertIn(Austria, classes)
self.assertIn(Belgium, classes)
self.assertIn(Bulgaria, classes)
self.assertIn(Croatia, classes)
self.assertIn(Cyprus, classes)
self.assertIn(CzechRepublic, classes)
self.assertIn(Estonia, classes)
self.assertIn(Denmark, classes)
self.assertIn(Finland, classes)
self.assertIn(France, classes)
self.assertIn(Greece, classes)
self.assertIn(Hungary, classes)
self.assertIn(Iceland, classes)
self.assertIn(Ireland, classes)
self.assertIn(Italy, classes)
self.assertIn(Latvia, classes)
self.assertIn(Lithuania, classes)
self.assertIn(Luxembourg, classes)
self.assertIn(Malta, classes)
self.assertIn(Monaco, classes)
self.assertIn(Netherlands, classes)
self.assertIn(Norway, classes)
self.assertIn(Poland, classes)
self.assertIn(Portugal, classes)
self.assertIn(Romania, classes)
self.assertIn(Russia, classes)
self.assertIn(Slovakia, classes)
self.assertIn(Slovenia, classes)
self.assertIn(Spain, classes)
self.assertIn(Sweden, classes)
self.assertIn(Switzerland, classes)
self.assertIn(Vaud, classes)
self.assertIn(Geneva, classes)
self.assertIn(UnitedKingdom, classes)
self.assertIn(UnitedKingdomNorthernIreland, classes)
# Germany & Länders
self.assertIn(Germany, classes)
for klass in GERMANY_REGION_CLASSES:
self.assertIn(klass, classes)
for klass in SWITZERLAND_REGION_CLASSES:
self.assertIn(klass, classes)
def test_germany_subregion(self):
# Get all the subregions
classes = (v for k, v in registry.get_subregions('DE').items())
classes = list(classes)
for klass in GERMANY_REGION_CLASSES:
self.assertIn(klass, classes)
def test_switzerland_subregion(self):
# Get all the subregions
classes = (v for k, v in registry.get_subregions('CH').items())
classes = list(classes)
for klass in SWITZERLAND_REGION_CLASSES:
self.assertIn(klass, classes)
def test_slovenia_code(self):
# Source: https://github.com/peopledoc/workalendar/pull/291
self.assertEqual(registry.region_registry['SI'], Slovenia)
| 38.017857
| 79
| 0.696336
|
3912aed56d820c1f18029cf5906e025a30525f97
| 3,983
|
py
|
Python
|
src/client/ServerLogin.py
|
UnluckyAj/FaceRecognitionApp
|
6da12360159368231a0a9773fded4365e8479890
|
[
"MIT"
] | null | null | null |
src/client/ServerLogin.py
|
UnluckyAj/FaceRecognitionApp
|
6da12360159368231a0a9773fded4365e8479890
|
[
"MIT"
] | null | null | null |
src/client/ServerLogin.py
|
UnluckyAj/FaceRecognitionApp
|
6da12360159368231a0a9773fded4365e8479890
|
[
"MIT"
] | null | null | null |
import socket
address = ("192.168.43.67", 5000)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(address)
s.listen(10000)
flag=0
print("Server Running...")
global course
global sem
global stream
while True:
try:
client, address = s.accept()
print("Client arrived " , address)
client_data = client.recv(2048)
## print("a")
client_data_s = client_data.decode("utf-8")
print(client_data_s)
## print("b")
client_data=client_data_s.split("/")
## print("c")
print(client_data)
if(str(client_data[0])=="Login"):
import mysql.connector
cnx = mysql.connector.connect(user='root', password='adi', host='127.0.0.1', database='project')
if(cnx):
cursor=cnx.cursor()
try:
q=("select password from sregister where email=%s")
data=(client_data[1],)
cursor.execute(q,data)
result=cursor.fetchone()
print(result[0])
print(client_data[1])
if(result[0]==client_data[2]):
server_data='1'
server_data = server_data.encode("utf-8")
client.send(server_data)
else:
server_data='0'
server_data=server_data.encode("utf-8")
client.send(server_data)
break
except Exception as e:
print( type(e))
cnx.commit()
elif(str(client_data[0])=="LoginSuccessful"):
server_data='1'
server_data = server_data.encode("utf-8")
client.send(server_data)
course=client_data[1]
sem=client_data[2]
stream=client_data[3]
elif(client_data[0]=="Department"):
server_data='1'
## print(server_data)
server_data = server_data.encode("utf-8")
client.send(server_data)
course=client_data[1]
sem=client_data[2]
stream=client_data[3]
elif(str(client_data[0])=="Image"):
server_data='1'
server_data = server_data.encode("utf-8")
client.send(server_data)
## print("aashish")
filename = open('input.jpg', 'wb')
while True:
strng = client.recv(512)
if not strng:
flag=1
break
filename.write(strng)
## print("out of loop")
filename.close()
if(course!="" and sem!="" and stream!=""):
from traineer import Trainee
Trainee(course,sem,stream)
from detector import detect
tup=detect(stream,client).detection()
from datalist import database
global tup1
tup1=database(tup[0],tup[1],tup[2]).WOL()
elif(str(client_data[0])=="Request"):
print("request")
print(tup1)
server_data = '1'
server_data = server_data.encode("utf-8")
client.send(server_data)
server_data = str(tup1).encode("utf-8")
client.send(server_data)
elif(str(client_data[0]) == "End"):
server_data = '1'
server_data = server_data.encode("utf-8")
client.send(server_data)
strng = client.recv(1024)
if(strng != ""):
from final import Final_att
Final_att(strng)
print(strng)
## s.close()
print('End')
else:
print("Error")
except Exception as e:
print( type(e), " - ", str(e) )
| 30.638462
| 108
| 0.476274
|
1f5e804946fdb37e4835f510aa63c6b50e537ee4
| 1,002
|
py
|
Python
|
axformer/noam_opt.py
|
sytelus/axformer
|
2492582f0e37a1edaa21f3c9f88ce1bbee91c90f
|
[
"MIT"
] | null | null | null |
axformer/noam_opt.py
|
sytelus/axformer
|
2492582f0e37a1edaa21f3c9f88ce1bbee91c90f
|
[
"MIT"
] | null | null | null |
axformer/noam_opt.py
|
sytelus/axformer
|
2492582f0e37a1edaa21f3c9f88ce1bbee91c90f
|
[
"MIT"
] | null | null | null |
import torch
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
| 31.3125
| 84
| 0.546906
|
5017c317102e57f3ed27052dcd2797bbdd349b87
| 657
|
py
|
Python
|
blog/migrations/0002_tag.py
|
greener2/simple-blog
|
ca37e5d22c704713b0c447afd9bfeba4232600e8
|
[
"MIT"
] | null | null | null |
blog/migrations/0002_tag.py
|
greener2/simple-blog
|
ca37e5d22c704713b0c447afd9bfeba4232600e8
|
[
"MIT"
] | 5
|
2021-03-18T23:18:36.000Z
|
2021-09-22T18:30:27.000Z
|
blog/migrations/0002_tag.py
|
greener2/simple-blog
|
ca37e5d22c704713b0c447afd9bfeba4232600e8
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.2 on 2020-02-02 15:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=24)),
('posts', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tags', to='blog.Post')),
],
),
]
| 28.565217
| 127
| 0.598174
|
73ab612033c0e432631323e9e229012fd9c116f8
| 94
|
py
|
Python
|
zunzun/signals/__init__.py
|
aprezcuba24/zunzun
|
cc294d9dfb84695be0ed1425cf946a0f4ea644a9
|
[
"MIT"
] | null | null | null |
zunzun/signals/__init__.py
|
aprezcuba24/zunzun
|
cc294d9dfb84695be0ed1425cf946a0f4ea644a9
|
[
"MIT"
] | null | null | null |
zunzun/signals/__init__.py
|
aprezcuba24/zunzun
|
cc294d9dfb84695be0ed1425cf946a0f4ea644a9
|
[
"MIT"
] | null | null | null |
from .signal import Signal # noqa
from .listerner_connector import ListenerConnector # noqa
| 31.333333
| 58
| 0.808511
|
61e7af7de7b9d6b03a0490cd6442e0ad786ef650
| 5,457
|
py
|
Python
|
pydocmd/document.py
|
Pandinosaurus/pydoc-markdown
|
38a6b48edfc132586aef7b5a71c5fccce62a9518
|
[
"MIT"
] | null | null | null |
pydocmd/document.py
|
Pandinosaurus/pydoc-markdown
|
38a6b48edfc132586aef7b5a71c5fccce62a9518
|
[
"MIT"
] | null | null | null |
pydocmd/document.py
|
Pandinosaurus/pydoc-markdown
|
38a6b48edfc132586aef7b5a71c5fccce62a9518
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module implements the structural representation of an API documentation
in separate documents and symbolic names. The final documentation is rendered
from this structured representation.
"""
from __future__ import print_function
import os
class Section(object):
"""
A section represents a part of a #Document. It contains Markdown-formatted
content that will be rendered into a file at some point.
# Attributes
doc (Document): The document that the section belongs to.
identifier (str, None): The globally unique identifier of the section. This
identifier usually matches the name of the element that the section
describes (eg. a class or function) and will be used for cross-referencing.
title (str, None): The title of the section. If specified, it will be
rendered before `section.content` and the header-size will depend on
the `section.depth`.
depth (int): The depth of the section, defaults to 1. Currently only affects
the header-size that is rendered for the `section.title`.
content (str): The Markdown-formatted content of the section.
"""
def __init__(self, doc, identifier=None, title=None, depth=1, content=None, header_type='html'):
self.doc = doc
self.identifier = identifier
self.title = title
self.depth = depth
self.content = content if content is not None else '*Nothing to see here.*'
self.header_type = header_type
def render(self, stream):
"""
Render the section into *stream*.
"""
if self.header_type == 'html':
print('<h{depth} id="{id}">{title}</h{depth}>\n'
.format(depth=self.depth, id=self.identifier, title=self.title),
file=stream)
elif self.header_type == 'markdown':
print('\n' + ('#' * self.depth), self.title, file=stream)
else:
raise ValueError('Invalid header type: %s' % self.header_type)
print(self.content, file=stream)
@property
def index(self):
"""
Returns the #Index that this section is associated with, accessed via
`section.document`.
"""
return self.document.index
class Document(object):
"""
Represents a single document that may contain several #Section#s. Every
document *must* have a relative URL associated with it.
# Attributes
index (Index): The index that the document belongs to.
url (str): The relative URL of the document.
"""
def __init__(self, index, url):
self.index = index
self.url = url
self.sections = []
class Index(object):
"""
The index manages all documents and sections globally. It keeps track of
the symbolic names allocated for the sections to be able to link to them
from other sections.
# Attributes
documents (dict):
sections (dict):
"""
def __init__(self):
self.documents = {}
self.sections = {}
def new_document(self, filename, url=None):
"""
Create a new document.
# Arguments
filename (str): The filename of the document. Must end with `.md`.
url (str): The relative URL of the document. If omitted, will be
automatically deduced from *filename* (same without the `.md` suffix).
# Raises
ValueError: If *filename* does not end with `.md`.
ValueError: If *filename* is not a relative path.
ValueError: If a document with the specified *filename* already exists.
"""
if not filename.endswith('.md'):
raise ValueError('filename must end with `.md`')
if os.path.isabs(filename):
raise ValueError('filename must be relative')
if filename in self.documents:
raise ValueError(
'document filename {!r} already used'.format(filename))
if not url:
url = filename[:-3]
doc = Document(self, url)
self.documents[filename] = doc
return doc
def new_section(self, doc, *args, **kwargs):
"""
Create a new section in the specified document. The arguments for this
method match the parameters for the #Section constructor.
# Raises
ValueError: If the section identifier is already used.
"""
section = Section(doc, *args, **kwargs)
if section.identifier:
if section.identifier in self.sections:
raise ValueError('section identifier {!r} already used'
.format(section.identifier))
self.sections[section.identifier] = section
doc.sections.append(section)
return section
| 34.537975
| 98
| 0.70295
|
9ee29657571df689b1b48a83f181d031e4d91d21
| 3,820
|
py
|
Python
|
app/fedcv/object_detection/data/coco/coco_base.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
app/fedcv/object_detection/data/coco/coco_base.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
app/fedcv/object_detection/data/coco/coco_base.py
|
ray-ruisun/FedML
|
24ff30d636bb70f64e94e9ca205375033597d3dd
|
[
"Apache-2.0"
] | null | null | null |
import os
from abc import ABC
from pathlib import Path, PurePath
from typing import Literal
from torch.utils.data import Dataset
from .utils import _download_file, _extract_file
class COCOBase(Dataset, ABC):
root_dir: PurePath
annotations_zip_path: PurePath
train_zip_path: PurePath
val_zip_path: PurePath
test_zip_path: PurePath
annotations_path: PurePath
images_path: PurePath
instances_path: PurePath
downloaded: bool
def __init__(self,
root_dir: str = "../../data/coco/",
download_dataset: bool = False,
year: Literal['2014', '2017'] = '2017',
split: Literal['train', 'val', 'test'] = 'train') -> None:
"""
An abstract class for COCO based datasets
Args:
root_dir: The path to the COCO images and annotations
download_dataset: Specify whether to download the dataset if not present
year: The year of the COCO dataset to use (2014, 2017)
split: The split of the data to be used (train, val, test)
"""
self.root_dir = Path('{root}/{year}'.format(root=root_dir, year=year))
self.annotations_zip_path = Path('{root}/annotations_trainval{year}.zip'.format(root=self.root_dir, year=year))
self.train_zip_path = Path('{root}/train{year}.zip'.format(root=self.root_dir, year=year))
self.val_zip_path = Path('{root}/val{year}.zip'.format(root=self.root_dir, year=year))
self.test_zip_path = Path('{root}/test{year}.zip'.format(root=self.root_dir, year=year))
self.annotations_path = Path('{root}/annotations'.format(root=self.root_dir))
self.images_path = Path('{root}/{split}{year}'.format(root=self.root_dir, split=split, year=year))
self.instances_path = Path(
'{root}/annotations/instances_{split}{year}.json'.format(root=self.root_dir, split=split, year=year))
if download_dataset and (not os.path.exists(self.images_path) or not os.path.exists(self.annotations_path)):
self._download_dataset(year, split)
def _download_dataset(self, year: Literal['2014', '2017'], split: Literal['train', 'test', 'val']) -> None:
"""
Downloads the dataset from COCO website.
Args:
year: The year of the dataset to download
split: The split of the dataset to download
"""
files = {
'annotations': {
'name': 'Train-Val {} Annotations'.format(year),
'file_path': self.annotations_zip_path,
'url': 'http://images.cocodataset.org/annotations/annotations_trainval{}.zip'.format(year),
'unit': 'MB'
},
'train': {
'name': 'Train {} Dataset'.format(year),
'file_path': self.train_zip_path,
'url': 'http://images.cocodataset.org/zips/train{}.zip'.format(year),
'unit': 'GB'
},
'val': {
'name': 'Validation {} Dataset'.format(year),
'file_path': self.val_zip_path,
'url': 'http://images.cocodataset.org/zips/val{}.zip'.format(year),
'unit': 'GB'
},
'test': {
'name': 'Test {} Dataset'.format(year),
'file_path': self.test_zip_path,
'url': 'http://images.cocodataset.org/zips/test{}.zip'.format(year),
'unit': 'GB'
}
}
if split == 'train' or split == 'val':
_download_file(**files['annotations'])
_extract_file(files['annotations']['file_path'], self.root_dir)
_download_file(**files[split])
_extract_file(files[split]['file_path'], self.root_dir)
self.downloaded = True
| 42.921348
| 119
| 0.587958
|
73d43188f3d5e4cd8ff0a46f8866e98d730decb4
| 3,210
|
py
|
Python
|
python/tests/test_load.py
|
JX7P/libucl
|
d1cf4e9783fa96b9399f6585e2dc9a42e4d64746
|
[
"BSD-2-Clause"
] | 1,301
|
2015-01-01T10:42:07.000Z
|
2022-03-21T08:00:14.000Z
|
python/tests/test_load.py
|
JX7P/libucl
|
d1cf4e9783fa96b9399f6585e2dc9a42e4d64746
|
[
"BSD-2-Clause"
] | 137
|
2015-01-04T18:15:11.000Z
|
2022-03-21T20:46:58.000Z
|
python/tests/test_load.py
|
JX7P/libucl
|
d1cf4e9783fa96b9399f6585e2dc9a42e4d64746
|
[
"BSD-2-Clause"
] | 161
|
2015-01-04T14:15:23.000Z
|
2022-03-20T04:11:01.000Z
|
from .compat import unittest
import ucl
class LoadTest(unittest.TestCase):
def test_no_args(self):
with self.assertRaises(TypeError):
ucl.load()
def test_multi_args(self):
with self.assertRaises(TypeError):
ucl.load(0,0)
def test_none(self):
self.assertEqual(ucl.load(None), None)
def test_null(self):
data = "a: null"
valid = { "a" : None }
self.assertEqual(ucl.load(data), valid)
def test_int(self):
data = "a : 1"
valid = { "a" : 1 }
self.assertEqual(ucl.load(data), valid)
def test_braced_int(self):
data = "{a : 1}"
valid = { "a" : 1 }
self.assertEqual(ucl.load(data), valid)
def test_nested_int(self):
data = "a : { b : 1 }"
valid = { "a" : { "b" : 1 } }
self.assertEqual(ucl.load(data), valid)
def test_str(self):
data = "a : b"
valid = { "a" : "b" }
self.assertEqual(ucl.load(data), valid)
def test_float(self):
data = "a : 1.1"
valid = {"a" : 1.1}
self.assertEqual(ucl.load(data), valid)
def test_boolean(self):
data = (
"a : True;" \
"b : False"
)
valid = { "a" : True, "b" : False }
self.assertEqual(ucl.load(data), valid)
def test_empty_ucl(self):
self.assertEqual(ucl.load("{}"), {})
def test_single_brace(self):
self.assertEqual(ucl.load("{"), {})
def test_single_back_brace(self):
self.assertEqual(ucl.load("}"), {})
def test_single_square_forward(self):
self.assertEqual(ucl.load("["), [])
def test_invalid_ucl(self):
with self.assertRaisesRegex(ValueError, "unfinished key$"):
ucl.load('{ "var"')
def test_comment_ignored(self):
self.assertEqual(ucl.load("{/*1*/}"), {})
def test_1_in(self):
valid = {
'key1': [
'value',
'value2',
'value;',
1.0,
-0xdeadbeef,
'0xdeadbeef.1',
'0xreadbeef',
-1e-10,
1,
True,
False,
True,
]
}
with open("../tests/basic/1.in", "r") as in1:
self.assertEqual(ucl.load(in1.read()), valid)
def test_every_type(self):
data = ("""{
"key1": value;
"key2": value2;
"key3": "value;"
"key4": 1.0,
"key5": -0xdeadbeef
"key6": 0xdeadbeef.1
"key7": 0xreadbeef
"key8": -1e-10,
"key9": 1
"key10": true
"key11": no
"key12": yes
}""")
valid = {
'key1': 'value',
'key2': 'value2',
'key3': 'value;',
'key4': 1.0,
'key5': -3735928559,
'key6': '0xdeadbeef.1',
'key7': '0xreadbeef',
'key8': -1e-10,
'key9': 1,
'key10': True,
'key11': False,
'key12': True,
}
self.assertEqual(ucl.load(data), valid)
| 26.097561
| 67
| 0.454206
|
b0c7760b11b2683ba4aab32370a67187d81a986b
| 12,060
|
py
|
Python
|
pyinfrabox/tests/test_json.py
|
fspaniol/InfraBox-cli
|
da174cf4b189e8acd889a7b1de09fa56350ee984
|
[
"Apache-2.0"
] | null | null | null |
pyinfrabox/tests/test_json.py
|
fspaniol/InfraBox-cli
|
da174cf4b189e8acd889a7b1de09fa56350ee984
|
[
"Apache-2.0"
] | null | null | null |
pyinfrabox/tests/test_json.py
|
fspaniol/InfraBox-cli
|
da174cf4b189e8acd889a7b1de09fa56350ee984
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from pyinfrabox import ValidationError
from pyinfrabox.infrabox import validate_json
class TestDockerCompose(unittest.TestCase):
def raises_expect(self, data, expected):
try:
validate_json(data)
assert False
except ValidationError as e:
self.assertEqual(e.message, expected)
def test_version(self):
self.raises_expect({}, "#: property 'version' is required")
self.raises_expect({'version': 'asd', 'jobs': []}, "#version: must be an int")
self.raises_expect({'version': '1', 'jobs': []}, "#version: must be an int")
self.raises_expect({'version': 2, 'jobs': []}, "#version: unsupported version")
def test_jobs(self):
self.raises_expect({'version': 1, 'jobs': 'asd'}, "#jobs: must be an array")
self.raises_expect({'version': 1, 'jobs': [{}]}, "#jobs[0]: does not contain a 'type'")
def test_empty_jobs(self):
validate_json({'version': 1, 'jobs': []})
def test_dep_defined_later(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "source",
"docker_file": "Dockerfile",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": ["compile"]
}, {
"type": "docker",
"name": "compile",
"docker_file": "Dockerfile",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}]
}
self.raises_expect(d, "#jobs[0].depends_on: Job 'compile' not found")
def test_dep_not_found(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": ["not_found"]
}]
}
self.raises_expect(d, "#jobs[0].depends_on: Job 'not_found' not found")
def test_deps_must_be_unique(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "source",
"docker_file": "Dockerfile",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}, {
"type": "docker",
"name": "compile",
"docker_file": "Dockerfile",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"depends_on": ["source", "source"]
}]
}
self.raises_expect(d, "#jobs[1].depends_on: 'source' duplicate dependencies")
def test_duplicate_job_name(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}, {
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": ["compile"]
}]
}
self.raises_expect(d, "#jobs[1].name: Job name 'compile' already exists")
def test_dependency_conditions(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}, {
"type": "docker",
"name": "compile2",
"docker_file": "test/Dockerfile_benchmarks",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": [{"job": "compile", "on": True}]
}]
}
self.raises_expect(d, "#jobs[1].depends_on[0].on: must be a list")
d['jobs'][1]['depends_on'] = [{"job": "compile", "on": []}]
self.raises_expect(d, "#jobs[1].depends_on[0].on: must not be empty")
d['jobs'][1]['depends_on'] = [{"job": "compile", "on": [True]}]
self.raises_expect(d, "#jobs[1].depends_on[0].on: True is not a valid value")
d['jobs'][1]['depends_on'] = [{"job": "compile", "on": ["not valid"]}]
self.raises_expect(d, "#jobs[1].depends_on[0].on: not valid is not a valid value")
d['jobs'][1]['depends_on'] = [{"job": "not-valid", "on": ["*"]}]
self.raises_expect(d, "#jobs[1].depends_on: Job 'not-valid' not found")
d['jobs'][1]['depends_on'] = [{"job": "compile", "on": ["error", "error"]}]
self.raises_expect(d, "#jobs[1].depends_on[0].on: error used twice")
def test_empty_dep_array(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": []
}]
}
self.raises_expect(d, "#jobs[0].depends_on: must not be empty")
def test_invalid_name(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "../blub",
"docker_file": "Dockerfile",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}]
}
self.raises_expect(d, "#jobs[0].name: '../blub' not a valid value")
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "blub'",
"docker_file": "Dockerfile",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}]
}
self.raises_expect(d, "#jobs[0].name: 'blub\'' not a valid value")
def test_may_not_depend_on_itself(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "Dockerfile",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": ["compile"]
}]
}
self.raises_expect(d, "#jobs[0]: Job 'compile' may not depend on itself")
def test_may_not_create_jobs(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "Create Jobs",
"docker_file": "test/Dockerfile_benchmarks",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}]
}
self.raises_expect(d, "#jobs[0].name: 'Create Jobs' not a valid value")
def test_environment(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "test",
"docker_file": "Dockerfile",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"environment": None
}]
}
self.raises_expect(d, "#jobs[0].environment: must be an object")
d['jobs'][0]['environment'] = []
self.raises_expect(d, "#jobs[0].environment: must be an object")
d['jobs'][0]['environment'] = {'key': 123}
self.raises_expect(d, "#jobs[0].environment.key: must be a string or object")
d['jobs'][0]['environment'] = {'key': {}}
self.raises_expect(d, "#jobs[0].environment.key: must contain a $secret")
d['jobs'][0]['environment'] = {'key': {'$secret': None}}
self.raises_expect(d, "#jobs[0].environment.key.$secret: is not a string")
d['jobs'][0]['environment'] = {}
validate_json(d)
def test_deployments(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "test",
"docker_file": "Dockerfile",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"deployments": None
}]
}
self.raises_expect(d, "#jobs[0].deployments: must be an array")
d['jobs'][0]['deployments'] = []
self.raises_expect(d, "#jobs[0].deployments: must not be empty")
d['jobs'][0]['deployments'] = [{}]
self.raises_expect(d, "#jobs[0].deployments[0]: does not contain a 'type'")
d['jobs'][0]['deployments'] = [{'type': 'unknown'}]
self.raises_expect(d, "#jobs[0].deployments[0]: type 'unknown' not supported")
d['jobs'][0]['deployments'] = [{'type': 'docker-registry', 'host': 'hostname',
'repository': 'repo', 'username': 'user', 'password': 'value'}]
self.raises_expect(d, "#jobs[0].deployments[0].password: must be an object")
d['jobs'][0]['deployments'] = [{'type': 'docker-registry', 'host': 'hostname', 'repository': 'repo',
'username': 'user', 'password': {'$secret': 'blub'}}]
validate_json(d)
def test_build_arguments(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "test",
"docker_file": "Dockerfile",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_arguments": None
}]
}
self.raises_expect(d, "#jobs[0].build_arguments: must be an object")
d['jobs'][0]['build_arguments'] = []
self.raises_expect(d, "#jobs[0].build_arguments: must be an object")
d['jobs'][0]['build_arguments'] = {'key': 123}
self.raises_expect(d, "#jobs[0].build_arguments.key: is not a string")
d['jobs'][0]['build_arguments'] = {'key': {}}
self.raises_expect(d, "#jobs[0].build_arguments.key: is not a string")
d['jobs'][0]['build_arguments'] = {}
validate_json(d)
def test_valid(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"build_only": False,
"resources": {"limits": {"cpu": 1, "memory": 1024}},
}, {
"type": "docker",
"name": "benchmark_server",
"docker_file": "test/Dockerfile_benchmarks",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": ["compile"]
}, {
"type": "docker",
"name": "test_server",
"docker_file": "test/Dockerfile_test_server",
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False,
"depends_on": ["compile"]
}]
}
validate_json(d)
def test_repository(self):
d = {
"version": 1,
"jobs": [{
"type": "docker",
"name": "compile",
"docker_file": "test/Dockerfile_benchmarks",
"repository" : { "full_history": True, "clone": False, "submodules": True },
"resources": {"limits": {"cpu": 1, "memory": 1024}},
"build_only": False
}]
}
validate_json(d)
| 35.470588
| 108
| 0.461609
|
fa9e0c876314653054d6f30dec3f6f20c7c828bf
| 6,277
|
py
|
Python
|
OParcels/fibres_OP.py
|
SalishSeaCast/analysis-jose
|
9eed549f0e3117de69c54c162307f16bf4fe0af3
|
[
"Apache-2.0"
] | null | null | null |
OParcels/fibres_OP.py
|
SalishSeaCast/analysis-jose
|
9eed549f0e3117de69c54c162307f16bf4fe0af3
|
[
"Apache-2.0"
] | null | null | null |
OParcels/fibres_OP.py
|
SalishSeaCast/analysis-jose
|
9eed549f0e3117de69c54c162307f16bf4fe0af3
|
[
"Apache-2.0"
] | null | null | null |
import sys
import xarray as xr
import numpy as np
import os
import yaml
import math
from datetime import datetime, timedelta
from parcels import FieldSet, Field, VectorField, ParticleSet, JITParticle, ErrorCode, ParcelsRandom, Variable
sys.path.append('/home/jvalenti/MOAD/analysis-jose/notebooks/parcels')
from Kernels_biofilm import DeleteParticle, Buoyancy, AdvectionRK4_3D, Stokes_drift, Beaching, Unbeaching
from OP_functions_biofilm import *
def fibers_OP(config,local=0,restart=0):
param = load_config(config)
#Definitions
start = datetime(param['startdate']['year'], param['startdate']['month'], param['startdate']['day']) #Start date
Tmax = param['param']['length'] # Set Time length [days]
dt = param['param']['dt'] #toggle between - or + to pick backwards or forwards
N = param['param']['N'] # number of deploying locations
n = param['param']['n'] # 1000 # number of particles per location
dmin = param['param']['dmin'] #minimum depth
dd = param['param']['dd'] #max depth difference from dmin
name = param['file']['name'] #name output file
dtp = param['param']['dtp'] #how often particle released in hours
# Define paths
paths = path(local)
#Set outfall coordinates (Modify to choose other deploying location)
coord=xr.open_dataset(paths['coords'],decode_times=False)
outf_lat=coord['nav_lat'][445,304]
outf_lon=coord['nav_lon'][445,304]
clon, clat = [float(outf_lon)],[float(outf_lat)]
duration = timedelta(days=Tmax)
x_offset, y_offset, z = p_deploy(N,n,dmin,dd)
#Set deploy locations
lon = np.zeros([N,n])
lat = np.zeros([N,n])
for i in range(N):
lon[i,:]=(clon[i] + x_offset[i,:])
lat[i,:]=(clat[i] + y_offset[i,:])
#Set start date time and the name of the output file
daterange = [start+timedelta(days=i) for i in range(Tmax)]
fn = name + '_'.join(d.strftime('%Y%m%d')+'_1n' for d in [start, start+duration]) + '.nc'
outfile = os.path.join(paths['out'], fn)
####BUILD FIELDS FOR SIMULATION######
#Fill in the list of variables that you want to use as fields
varlist=['U','V','W','R']
filenames,variables,dimensions=filename_set(start,Tmax,varlist,local)
field_set=FieldSet.from_nemo(filenames, variables, dimensions, allow_time_extrapolation=True)
varlist=['US','VS','WL']
filenames,variables,dimensions=filename_set(start,Tmax,varlist,local)
us = Field.from_netcdf(filenames['US'], variables['US'], dimensions,allow_time_extrapolation=True)
vs = Field.from_netcdf(filenames['VS'], variables['VS'], dimensions,allow_time_extrapolation=True)
wl = Field.from_netcdf(filenames['WL'], variables['WL'], dimensions,allow_time_extrapolation=True)
field_set.add_field(us)
field_set.add_field(vs)
field_set.add_field(wl)
field_set.add_vector_field(VectorField("stokes", us, vs, wl))
filenames,variables,dimensions=filename_set(start,Tmax,['Bathy'],local)
Bth = Field.from_netcdf(filenames['Bathy'], variables['Bathy'], dimensions,allow_time_extrapolation=True)
field_set.add_field(Bth)
MPParticle = particle_maker(param)
######RUN OCEAN PARCELS WITH DEFINED PARTICLE AND PRESET FIELDS
if restart==1:
name_temp=find_temp(paths['out'])
os.system(f"cd {paths['out']} && parcels_convert_npydir_to_netcdf {name_temp}")
outfile=newest(paths['out'])
pset = ParticleSet.from_particlefile(field_set, MPParticle,outfile)
else:
pset = ParticleSet.from_list(field_set, MPParticle, lon=lon, lat=lat, depth=z, repeatdt = timedelta(hours=dtp))
k_sink = pset.Kernel(Buoyancy)
k_waves = pset.Kernel(Stokes_drift)
k_beach = pset.Kernel(Beaching)
k_unbeach = pset.Kernel(Unbeaching)
pset.execute(AdvectionRK4_3D + k_sink + k_waves + k_beach + k_unbeach,
runtime=duration,
dt=dt,
output_file=pset.ParticleFile(name=outfile, outputdt=timedelta(hours=1)),
recovery={ErrorCode.ErrorOutOfBounds: DeleteParticle})
def load_config(config_yaml):
with open(config_yaml[0]) as f:
config = yaml.safe_load(f)
return config
def particle_maker(config):
#Define particle properties
class MPParticle(JITParticle):
if 'ro' in config['particle']:
ro = Variable('ro', initial = config['particle']['ro']) # config['particle']['ro']
if 'diameter' in config['particle']:
diameter = Variable('diameter', initial = config['particle']['diameter'])
if 'length' in config['particle']:
length = Variable('length', initial = config['particle']['length'])
if 'Lb' in config['particle']:
Lb = Variable('Lb', initial = config['particle']['Lb']) #days needed in days for particle to have 67% probability of beaching if in beaching zone (500m)
if 'Db' in config['particle']:
Db = Variable('Db', initial = config['particle']['Db']) #Distance at which particles can randomly beach.
if 'Ub' in config['particle']:
Ub = Variable('Ub', initial = config['particle']['Ub']) #days to have 67% probability of unbeaching
if 'beached' in config['particle']:
beached = Variable('beached', initial = 0)
if 'Ws' in config['particle']:
Ws = Variable('Ws', initial = config['particle']['Ws']) #200m/dia
if 'tau' in config['particle']:
tau = Variable('tau', initial = 0) # track age particle
return MPParticle
def find_temp(rootdir):
dirs=[]
for file in os.listdir(rootdir):
d = os.path.join(rootdir, file)
if os.path.isdir(d):
dirs.append(d)
temp=sorted(dirs, key=lambda x: os.path.getctime(x), reverse=True)[:1][0]
return temp[-12:]
def newest(path):
files = os.listdir(path)
paths = [os.path.join(path, basename) for basename in files]
return max(paths, key=os.path.getctime)
if __name__=="__main__":
try:
config,restart = sys.argv[1:]
config = [str(config)]
except ValueError:
pass
try:
config = sys.argv[1:]
restart=0
except :
print('Something went wrong')
fibers_OP(config,restart)
| 42.412162
| 165
| 0.653656
|
82dadb590040265c035e6b84d2fdcdc1963acafc
| 4,977
|
py
|
Python
|
kolibri/core/device/models.py
|
mrpau/kolibri
|
94ed5887ec083398e55ee5a291df6d09c062bfe1
|
[
"MIT"
] | null | null | null |
kolibri/core/device/models.py
|
mrpau/kolibri
|
94ed5887ec083398e55ee5a291df6d09c062bfe1
|
[
"MIT"
] | null | null | null |
kolibri/core/device/models.py
|
mrpau/kolibri
|
94ed5887ec083398e55ee5a291df6d09c062bfe1
|
[
"MIT"
] | null | null | null |
import platform
import time
from uuid import uuid4
from django.conf import settings
from django.core.cache import cache
from django.db import models
from morango.models import UUIDField
from .utils import LANDING_PAGE_LEARN
from .utils import LANDING_PAGE_SIGN_IN
from kolibri.core.auth.models import Facility
from kolibri.core.auth.models import FacilityUser
from kolibri.plugins.app.utils import interface
device_permissions_fields = ["is_superuser", "can_manage_content"]
class DevicePermissions(models.Model):
"""
This class stores metadata about device permissions for FacilityUsers.
"""
user = models.OneToOneField(
FacilityUser,
on_delete=models.CASCADE,
related_name="devicepermissions",
blank=False,
null=False,
primary_key=True,
)
is_superuser = models.BooleanField(default=False)
can_manage_content = models.BooleanField(default=False)
DEVICE_SETTINGS_CACHE_KEY = "device_settings_cache_key"
class DeviceSettingsManager(models.Manager):
def get(self, **kwargs):
if DEVICE_SETTINGS_CACHE_KEY not in cache:
model = super(DeviceSettingsManager, self).get(**kwargs)
cache.set(DEVICE_SETTINGS_CACHE_KEY, model, 600)
else:
model = cache.get(DEVICE_SETTINGS_CACHE_KEY)
return model
def get_device_hostname():
# Get the device hostname to set it as the default value of name field in
# DeviceSettings model
hostname = platform.node()
# make sure the default name does not exceed max length of the field
return hostname[:50]
def app_is_enabled():
return interface.enabled
class DeviceSettings(models.Model):
"""
This class stores data about settings particular to this device
"""
LANDING_PAGE_CHOICES = [
(LANDING_PAGE_SIGN_IN, "Sign-in page"),
(LANDING_PAGE_LEARN, "Learn page"),
]
objects = DeviceSettingsManager()
is_provisioned = models.BooleanField(default=False)
language_id = models.CharField(
max_length=15, default=settings.LANGUAGE_CODE, blank=True, null=True
)
default_facility = models.ForeignKey(
Facility, on_delete=models.SET_NULL, blank=True, null=True
)
landing_page = models.CharField(
max_length=7, choices=LANDING_PAGE_CHOICES, default=LANDING_PAGE_SIGN_IN
)
allow_guest_access = models.BooleanField(default=True)
allow_peer_unlisted_channel_import = models.BooleanField(default=False)
allow_learner_unassigned_resource_access = models.BooleanField(default=True)
name = models.CharField(max_length=50, default=get_device_hostname)
allow_other_browsers_to_connect = models.BooleanField(default=app_is_enabled)
def save(self, *args, **kwargs):
self.pk = 1
self.full_clean()
super(DeviceSettings, self).save(*args, **kwargs)
cache.set(DEVICE_SETTINGS_CACHE_KEY, self, 600)
CONTENT_CACHE_KEY_CACHE_KEY = "content_cache_key"
class ContentCacheKey(models.Model):
"""
This class stores a cache key for content models that should be updated
whenever the content metadata stored on the device changes.
"""
key = models.IntegerField(default=time.time)
def save(self, *args, **kwargs):
self.pk = 1
super(ContentCacheKey, self).save(*args, **kwargs)
@classmethod
def update_cache_key(cls):
cache_key, created = cls.objects.get_or_create()
cache_key.key = time.time()
cache_key.save()
cache.set(CONTENT_CACHE_KEY_CACHE_KEY, cache_key.key, 5000)
return cache_key
@classmethod
def get_cache_key(cls):
key = cache.get(CONTENT_CACHE_KEY_CACHE_KEY)
if key is None:
try:
cache_key = cls.objects.get()
except cls.DoesNotExist:
cache_key = cls.update_cache_key()
key = cache_key.key
cache.set(CONTENT_CACHE_KEY_CACHE_KEY, key, 5000)
return key
APP_KEY_CACHE_KEY = "app_key"
class DeviceAppKey(models.Model):
"""
This class stores a key that is checked to make sure that a webview
is making requests from a privileged device (i.e. from inside an
app-wrapper webview)
"""
key = UUIDField(default=uuid4)
def save(self, *args, **kwargs):
self.pk = 1
super(DeviceAppKey, self).save(*args, **kwargs)
@classmethod
def update_app_key(cls):
app_key, created = cls.objects.get_or_create()
app_key.key = uuid4()
app_key.save()
cache.set(APP_KEY_CACHE_KEY, app_key.key, 5000)
return app_key
@classmethod
def get_app_key(cls):
key = cache.get(APP_KEY_CACHE_KEY)
if key is None:
try:
app_key = cls.objects.get()
except cls.DoesNotExist:
app_key = cls.update_app_key()
key = app_key.key
cache.set(APP_KEY_CACHE_KEY, key, 5000)
return key
| 29.625
| 81
| 0.683142
|
5f3555eb3b96ead2cda9bcad92065c6a596f3e9a
| 1,531
|
py
|
Python
|
[Kaleido-subs]/Completed/Granblue Fantasy Season 2 [BD]/GBF2BD_06.py
|
LightArrowsEXE/Encoding-Projects
|
4ea96a5b25a7710f615ada5ff25949c496492b53
|
[
"MIT"
] | 57
|
2019-01-31T17:32:46.000Z
|
2022-03-23T05:46:51.000Z
|
[Kaleido-subs]/Completed/Granblue Fantasy Season 2 [BD]/GBF2BD_06.py
|
LightArrowsEXE/Encoding-Projects
|
4ea96a5b25a7710f615ada5ff25949c496492b53
|
[
"MIT"
] | null | null | null |
[Kaleido-subs]/Completed/Granblue Fantasy Season 2 [BD]/GBF2BD_06.py
|
LightArrowsEXE/Encoding-Projects
|
4ea96a5b25a7710f615ada5ff25949c496492b53
|
[
"MIT"
] | 12
|
2019-04-30T06:16:13.000Z
|
2022-03-14T16:15:07.000Z
|
from typing import Any, Dict, Tuple
import vapoursynth as vs
from lvsfunc.misc import source
from vardautomation import FileInfo, PresetAAC, PresetBD, VPath
from project_module import chain, encode
core = vs.core
# Sources
JP_BD = FileInfo(r'BDMV/GRANBLUE_FANTASY_SEASON2_3/BDMV/STREAM/00002.m2ts', (None, -27),
idx=lambda x: source(x, force_lsmas=True, cachedir=''),
preset=[PresetBD, PresetAAC])
JP_BD.name_file_final = VPath(fr"premux/{JP_BD.name} (Premux).mkv")
JP_BD.a_src_cut = VPath(f"{JP_BD.name}_cut.aac")
JP_BD.do_qpfile = True
zones: Dict[Tuple[int, int], Dict[str, Any]] = { # Zones for x265
(6105, 6443): {'b': 0.75},
(9429, 9870): {'b': 0.75},
(15544, 15802): {'b': 0.75},
(26423, 26806): {'b': 0.75},
(28250, 28786): {'b': 0.75},
(29909, 30180): {'b': 0.75},
}
if __name__ == '__main__':
filtered = chain.filterchain(JP_BD.clip_cut)
encode.Encoder(JP_BD, filtered).run(zones=zones)
elif __name__ == '__vapoursynth__':
filtered = chain.filterchain(JP_BD.clip_cut)
if not isinstance(filtered, vs.VideoNode):
raise RuntimeError("Multiple output nodes were set when `vspipe` only expected one")
else:
filtered.set_output(0)
else:
JP_BD.clip_cut.set_output(0)
FILTERED = chain.filterchain(JP_BD.clip_cut)
if not isinstance(FILTERED, vs.VideoNode):
for i, clip_filtered in enumerate(FILTERED, start=1): # type: ignore
clip_filtered.set_output(i)
else:
FILTERED.set_output(1)
| 33.282609
| 92
| 0.667538
|
7965d7fc2b3450155e47320165654538f4547481
| 467
|
py
|
Python
|
social/posts/forms.py
|
vyahello/social-media
|
77cabf08dee1da0eb4e5c277391d92b7d829b67d
|
[
"MIT"
] | null | null | null |
social/posts/forms.py
|
vyahello/social-media
|
77cabf08dee1da0eb4e5c277391d92b7d829b67d
|
[
"MIT"
] | 2
|
2022-02-07T09:44:15.000Z
|
2022-03-22T16:43:18.000Z
|
social/posts/forms.py
|
vyahello/social-media
|
77cabf08dee1da0eb4e5c277391d92b7d829b67d
|
[
"MIT"
] | null | null | null |
from django import forms
from posts import models
class PostForm(forms.ModelForm):
class Meta:
fields = ("message", "group")
model = models.Post
def __init__(self, *args, **kwargs):
user = kwargs.pop("user", None)
super().__init__(*args, **kwargs)
if user is not None:
self.fields["group"].queryset = models.Group.objects.filter(
pk__in=user.groups.values_list("group__pk")
)
| 27.470588
| 72
| 0.595289
|
eee7b6197909f52db2915549517c44ba1620cd3a
| 8,303
|
py
|
Python
|
official/vision/beta/losses/retinanet_losses.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | 1
|
2021-05-22T12:50:50.000Z
|
2021-05-22T12:50:50.000Z
|
official/vision/beta/losses/retinanet_losses.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
official/vision/beta/losses/retinanet_losses.py
|
DemonDamon/mask-detection-based-on-tf2odapi
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
[
"MIT"
] | null | null | null |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for detection models."""
# Import libraries
import tensorflow as tf
def focal_loss(logits, targets, alpha, gamma):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size
[batch, d_1, ..., d_k, n_classes].
targets: A float32 tensor of size
[batch, d_1, ..., d_k, n_classes].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
Returns:
loss: A float32 Tensor of size
[batch, d_1, ..., d_k, n_classes] representing
normalized loss on the prediction map.
"""
with tf.name_scope('focal_loss'):
positive_label_mask = tf.equal(targets, 1.0)
cross_entropy = (
tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
probs = tf.sigmoid(logits)
probs_gt = tf.where(positive_label_mask, probs, 1.0 - probs)
# With small gamma, the implementation could produce NaN during back prop.
modulator = tf.pow(1.0 - probs_gt, gamma)
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, alpha * loss,
(1.0 - alpha) * loss)
return weighted_loss
class FocalLoss(tf.keras.losses.Loss):
"""Implements a Focal loss for classification problems.
Reference:
[Focal Loss for Dense Object Detection](https://arxiv.org/abs/1708.02002).
"""
def __init__(self,
alpha,
gamma,
num_classes,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `FocalLoss`.
Args:
alpha: The `alpha` weight factor for binary class imbalance.
gamma: The `gamma` focusing parameter to re-weight loss.
num_classes: Number of foreground classes.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._num_classes = num_classes
self._alpha = alpha
self._gamma = gamma
super(FocalLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Invokes the `FocalLoss`.
Args:
y_true: Ordered Dict with level to [batch, height, width, num_anchors].
for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.float32),
4: tf.Tensor([shape=32, 256, 256, 9, dtype=tf.float32])}
y_pred: Ordered Dict with level to [batch, height, width, num_anchors *
num_classes]. for example,
{3: tf.Tensor(shape=[32, 512, 512, 9], dtype=tf.int64),
4: tf.Tensor(shape=[32, 256, 256, 9 * 21], dtype=tf.int64)}
Returns:
Summed loss float `Tensor`.
"""
flattened_cls_outputs = []
flattened_labels = []
batch_size = None
for level in y_pred.keys():
cls_output = y_pred[level]
label = y_true[level]
if batch_size is None:
batch_size = cls_output.shape[0] or tf.shape(cls_output)[0]
flattened_cls_outputs.append(
tf.reshape(cls_output, [batch_size, -1, self._num_classes]))
flattened_labels.append(tf.reshape(label, [batch_size, -1]))
cls_outputs = tf.concat(flattened_cls_outputs, axis=1)
labels = tf.concat(flattened_labels, axis=1)
cls_targets_one_hot = tf.one_hot(labels, self._num_classes)
return focal_loss(
tf.cast(cls_outputs, dtype=tf.float32),
tf.cast(cls_targets_one_hot, dtype=tf.float32), self._alpha,
self._gamma)
def get_config(self):
config = {
'alpha': self._alpha,
'gamma': self._gamma,
'num_classes': self._num_classes,
}
base_config = super(FocalLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class RetinanetBoxLoss(tf.keras.losses.Loss):
"""RetinaNet box Huber loss."""
def __init__(self,
delta,
reduction=tf.keras.losses.Reduction.AUTO,
name=None):
"""Initializes `RetinanetBoxLoss`.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial](
https://www.tensorflow.org/tutorials/distribute/custom_training) for
more details.
name: Optional name for the op. Defaults to 'retinanet_class_loss'.
"""
self._huber_loss = tf.keras.losses.Huber(
delta=delta, reduction=tf.keras.losses.Reduction.NONE)
self._delta = delta
super(RetinanetBoxLoss, self).__init__(reduction=reduction, name=name)
def call(self, y_true, y_pred):
"""Computes box detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
y_true: Ordered Dict with level to [batch, height, width,
num_anchors * 4] for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.float32),
4: tf.Tensor([shape=32, 256, 256, 9 * 4, dtype=tf.float32])}
y_pred: Ordered Dict with level to [batch, height, width,
num_anchors * 4]. for example,
{3: tf.Tensor(shape=[32, 512, 512, 9 * 4], dtype=tf.int64),
4: tf.Tensor(shape=[32, 256, 256, 9 * 4], dtype=tf.int64)}
Returns:
an integer tensor representing total box regression loss.
"""
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
flattened_box_outputs = []
flattened_labels = []
batch_size = None
for level in y_pred.keys():
box_output = y_pred[level]
label = y_true[level]
if batch_size is None:
batch_size = box_output.shape[0] or tf.shape(box_output)[0]
flattened_box_outputs.append(tf.reshape(box_output, [batch_size, -1, 4]))
flattened_labels.append(tf.reshape(label, [batch_size, -1, 4]))
box_outputs = tf.concat(flattened_box_outputs, axis=1)
labels = tf.concat(flattened_labels, axis=1)
loss = self._huber_loss(labels, box_outputs)
return loss
def get_config(self):
config = {
'delta': self._delta,
}
base_config = super(RetinanetBoxLoss, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| 40.111111
| 81
| 0.652174
|
cfabc4d57fa1ca098aa4fddb4193795b253f3cfc
| 179
|
py
|
Python
|
numplates/owners/admin.py
|
Jurevic/numplates
|
c53dc5e32a0d597a1fbc5e4567f735113310e0b9
|
[
"MIT"
] | null | null | null |
numplates/owners/admin.py
|
Jurevic/numplates
|
c53dc5e32a0d597a1fbc5e4567f735113310e0b9
|
[
"MIT"
] | null | null | null |
numplates/owners/admin.py
|
Jurevic/numplates
|
c53dc5e32a0d597a1fbc5e4567f735113310e0b9
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from.models import Owner
class OwnerAdmin(admin.ModelAdmin):
fields = ['first_name', 'last_name']
admin.site.register(Owner, OwnerAdmin)
| 16.272727
| 40
| 0.759777
|
a3a2cdcd16aa3ef2b7d3f1917f2fcd51d2ee1798
| 19,626
|
py
|
Python
|
leo/plugins/free_layout.py
|
drmikecrowe/leo-editor
|
16d8e1e8564799496c7a90c5d3cc85461deca7e7
|
[
"MIT"
] | null | null | null |
leo/plugins/free_layout.py
|
drmikecrowe/leo-editor
|
16d8e1e8564799496c7a90c5d3cc85461deca7e7
|
[
"MIT"
] | null | null | null |
leo/plugins/free_layout.py
|
drmikecrowe/leo-editor
|
16d8e1e8564799496c7a90c5d3cc85461deca7e7
|
[
"MIT"
] | null | null | null |
#@+leo-ver=5-thin
#@+node:ekr.20120419093256.10048: * @file ../plugins/free_layout.py
#@+<< docstring >>
#@+node:ekr.20110319161401.14467: ** << docstring >> (free_layout.py)
"""
Free layout
===========
Adds flexible panel layout through context menus on the handles between panels.
Uses NestedSplitter, a more intelligent QSplitter, from leo.plugins.nested_splitter
Requires Qt.
Commands (bindable with @settings-->@keys-->@shortcuts):
free-layout-load
Open context menu for loading a different layout,
conventient keyboard shortcut target.
free-layout-restore
Use the layout this outline had when it was opened.
free-layout-zoom
Zoom or unzoom the current pane
"""
#@-<< docstring >>
# Written by Terry Brown.
#@+<< imports >>
#@+node:tbrown.20110203111907.5520: ** << imports >> (free_layout.py)
import leo.core.leoGlobals as g
from leo.core.leoQt import QtWidgets, QtCore
if QtWidgets:
from leo.plugins.nested_splitter import NestedSplitter
# NestedSplitterChoice
import json
#@-<< imports >>
#@+others
#@+node:tbrown.20110203111907.5521: ** free_layout:init
def init():
'''Return True if the free_layout plugin can be loaded.'''
return g.app.gui.guiName() == "qt"
#@+node:ekr.20110318080425.14389: ** class FreeLayoutController
class FreeLayoutController(object):
"""Glue between Leo and the NestedSplitter gui widget. All Leo aware
code should be in here, none in NestedSplitter.
*ALSO* implements the provider interface for NestedSplitter, in
ns_provides, ns_provide, ns_context, ns_do_context, which
NestedSplitter uses as callbacks to populate splitter-handle context-menu
and the empty pane Action button menu:
see (ctrl-click this URL)
file://{{g.getBaseDirectory(c)}}/LeoPyRef.leo#Code-->Qt%20gui-->@file%20../plugins/nested_splitter.py-->class%20NestedSplitter%20(QSplitter)-->register_provider
ns_provides
tell NestedSplitter which Action button items we can provide
ns_provide
provide the advertised service when an Action button item we
advertised is selected
ns_context
tell NestedSplitter which splitter-handle context-menu items
we can provide
ns_do_context
provide the advertised service when a splitter-handle context-menu
item we advertised is selected
"""
#@+others
#@+node:ekr.20110318080425.14390: *3* flc.ctor
def __init__(self, c):
'''Ctor for FreeLayoutController class.'''
# g.trace('(FreeLayoutController)',c) # ,g.callers(files=True))
# if hasattr(c,'free_layout'):
# return
self.c = c
# c.free_layout = self
# To be removed
# g.registerHandler('after-create-leo-frame',self.bindControllers)
# attach to an outline
g.registerHandler('after-create-leo-frame', self.init)
# now that the outline's set up (plugins etc.), load layout for
# outline, can't do that sooner as plugins must be loaded first
# to provide their widgets in panels etc.
g.registerHandler('after-create-leo-frame2', self.loadLayouts)
# self.init()
#@+node:tbrown.20110203111907.5522: *3* flc.init
def init(self, tag, keys):
"""Attach to an outline and
- add tags to widgets to indicate that they're essential
(tree, body, log-window-tabs) and
- tag the log-window-tabs widget as the place to put widgets
from free-laout panes which are closed
- register this FreeLayoutController as a provider of menu items
for NestedSplitter
"""
c = self.c
if c != keys.get('c'):
return
# g.trace(c.frame.title)
# Careful: we could be unit testing.
splitter = self.get_top_splitter() # A NestedSplitter.
if not splitter:
# g.trace('no splitter!')
return None
# by default NestedSplitter's context menus are disabled, needed
# once to globally enable them
NestedSplitter.enabled = True
# when NestedSplitter disposes of children, it will either close
# them, or move them to another designated widget. Here we set
# up two designated widgets
logTabWidget = splitter.findChild(QtWidgets.QWidget, "logTabWidget")
splitter.root.holders['_is_from_tab'] = logTabWidget
splitter.root.holders['_is_permanent'] = 'TOP'
# allow body and tree widgets to be "removed" to tabs on the log tab panel
bodyWidget = splitter.findChild(QtWidgets.QFrame, "bodyFrame")
bodyWidget._is_from_tab = "Body"
treeWidget = splitter.findChild(QtWidgets.QFrame, "outlineFrame")
treeWidget._is_from_tab = "Tree"
# also the other tabs will have _is_from_tab set on them by the
# offer_tabs menu callback above
# if the log tab panel is removed, move it back to the top splitter
logWidget = splitter.findChild(QtWidgets.QFrame, "logFrame")
logWidget._is_permanent = True
# tag core Leo components (see ns_provides)
splitter.findChild(QtWidgets.QWidget, "outlineFrame")._ns_id = '_leo_pane:outlineFrame'
splitter.findChild(QtWidgets.QWidget, "logFrame")._ns_id = '_leo_pane:logFrame'
splitter.findChild(QtWidgets.QWidget, "bodyFrame")._ns_id = '_leo_pane:bodyFrame'
splitter.register_provider(self)
splitter.splitterClicked_connect(self.splitter_clicked)
#@+node:tbrown.20120119080604.22982: *3* flc.embed (FreeLayoutController)
def embed(self):
"""called from ns_do_context - embed layout in outline's
@settings, an alternative to the Load/Save named layout system
"""
# Careful: we could be unit testing.
top_splitter = self.get_top_splitter()
if not top_splitter: return
c = self.c
layout = top_splitter.get_saveable_layout()
nd = g.findNodeAnywhere(c, "@data free-layout-layout")
if not nd:
settings = g.findNodeAnywhere(c, "@settings")
if not settings:
settings = c.rootPosition().insertAfter()
settings.h = "@settings"
nd = settings.insertAsNthChild(0)
nd.h = "@data free-layout-layout"
nd.b = json.dumps(layout, indent=4)
nd = nd.parent()
if not nd or nd.h != "@settings":
g.es("WARNING: @data free-layout-layout node is not " "under an active @settings node")
c.redraw()
#@+node:ekr.20160424035257.1: *3* flc.get_main_splitter & helper
def get_main_splitter(self, w=None):
'''
Return the splitter the main splitter, or None. The main splitter is a
NestedSplitter that contains the body pane.
Yes, the user could delete the secondary splitter but if so, there is
not much we can do here.
'''
trace = False and not g.unitTesting
top = self.get_top_splitter()
if top:
w = top.find_child(QtWidgets.QWidget, "bodyFrame")
while w:
if isinstance(w, NestedSplitter):
if trace: g.trace('found splitter', id(w))
return w
w = w.parent()
if trace: g.trace('not found')
return None
#@+node:ekr.20160424035254.1: *3* flc.get_secondary_splitter & helper
def get_secondary_splitter(self):
'''
Return the secondary splitter, if it exists. The secondary splitter
contains the outline pane.
Yes, the user could delete the outline pane, but if so, there is not
much we can do here.
'''
trace = False and not g.unitTesting
top = self.get_top_splitter()
if top:
w = top.find_child(QtWidgets.QWidget, 'outlineFrame')
while w:
if isinstance(w, NestedSplitter):
if trace: g.trace('found splitter', id(w))
return w
w = w.parent()
if trace: g.trace('not found')
return None
#@+node:tbrown.20110621120042.22914: *3* flc.get_top_splitter
def get_top_splitter(self):
'''Return the top splitter of c.frame.top.'''
# Careful: we could be unit testing.
f = self.c.frame
if hasattr(f, 'top') and f.top:
return f.top.findChild(NestedSplitter).top()
else:
return None
#@+node:ekr.20120419095424.9927: *3* flc.loadLayouts (sets wrap=True)
def loadLayouts(self, tag, keys, reloading=False):
"""loadLayouts - Load the outlines layout
:Parameters:
- `tag`: from hook event
- `keys`: from hook event
- `reloading`: True if this is not the initial load, see below
When called from the `after-create-leo-frame2` hook this defaults
to False. When called from the `resotre-layout` command, this is set
True, and the layout the outline had *when first loaded* is restored.
Useful if you want to temporarily switch to a different layout and then
back, without having to remember the original layouts name.
"""
c = self.c
if not (g.app and g.app.db):
return # Can happen when running from the Leo bridge.
d = g.app.db.get('ns_layouts', {})
if c != keys.get('c'):
return
# g.trace(c.frame.title)
layout = c.config.getData("free-layout-layout")
if layout:
layout = json.loads('\n'.join(layout))
name = c.db.get('_ns_layout')
if name:
# g.trace('Layout:',name,'reloading',reloading)
if reloading:
name = c.free_layout.original_layout
c.db['_ns_layout'] = name
else:
c.free_layout.original_layout = name
if layout:
g.es("NOTE: embedded layout in @settings/@data free-layout-layout "
"overrides saved layout " + name)
else:
layout = d.get(name)
# EKR: Create commands that will load each layout.
if d:
for name in sorted(d.keys()):
def func(event, c=c, d=d, name=name):
layout = d.get(name)
if layout:
c.free_layout.get_top_splitter().load_layout(layout)
else:
g.trace('no layout', name)
commandName = 'free-layout-load-%s' % name.strip().lower().replace(' ', '-')
c.k.registerCommand(commandName, func)
# Careful: we could be unit testing or in the Leo bridge.
if layout:
splitter = c.free_layout.get_top_splitter()
if splitter:
splitter.load_layout(layout)
#@+node:tbrown.20110628083641.11730: *3* flc.ns_context
def ns_context(self):
ans = [
('Embed layout', '_fl_embed_layout'),
('Save layout', '_fl_save_layout'),
]
d = g.app.db.get('ns_layouts', {})
if d:
ans.append({'Load layout': [(k, '_fl_load_layout:' + k) for k in d]})
ans.append({'Delete layout': [(k, '_fl_delete_layout:' + k) for k in d]})
ans.append(('Forget layout', '_fl_forget_layout:'))
ans.append(('Restore initial layout', '_fl_restore_layout:'))
ans.append(('Restore default layout', '_fl_restore_default:'))
ans.append(('Help for this menu', '_fl_help:'))
return ans
#@+node:tbrown.20110628083641.11732: *3* flc.ns_do_context (FreeLayoutController)
def ns_do_context(self, id_, splitter, index):
if id_.startswith('_fl_embed_layout'):
self.embed()
return True
if id_.startswith('_fl_restore_default'):
self.get_top_splitter().load_layout(
{'content': [{'content': ['_leo_pane:outlineFrame',
'_leo_pane:logFrame'], 'orientation': 1, 'sizes':
[509, 275]}, '_leo_pane:bodyFrame'],
'orientation': 2, 'sizes': [216, 216]})
if id_.startswith('_fl_help'):
self.c.putHelpFor(__doc__)
# g.handleUrl("http://leoeditor.com/")
return True
if id_ == '_fl_save_layout':
if self.c.config.getData("free-layout-layout"):
g.es("WARNING: embedded layout in @settings/@data free-layout-layout " "will override saved layout")
layout = self.get_top_splitter().get_saveable_layout()
name = g.app.gui.runAskOkCancelStringDialog(self.c,
title="Save layout",
message="Name for layout?",
)
if name:
self.c.db['_ns_layout'] = name
d = g.app.db.get('ns_layouts', {})
d[name] = layout
# make sure g.app.db's __set_item__ is hit so it knows to save
g.app.db['ns_layouts'] = d
return True
if id_.startswith('_fl_load_layout:'):
if self.c.config.getData("free-layout-layout"):
g.es("WARNING: embedded layout in @settings/@data free-layout-layout " "will override saved layout")
name = id_.split(':', 1)[1]
self.c.db['_ns_layout'] = name
layout = g.app.db['ns_layouts'][name]
self.get_top_splitter().load_layout(layout)
return True
if id_.startswith('_fl_delete_layout:'):
name = id_.split(':', 1)[1]
if ('yes' == g.app.gui.runAskYesNoCancelDialog(self.c,
"Really delete Layout?",
"Really permanently delete the layout '%s'?" % name)
):
d = g.app.db.get('ns_layouts', {})
del d[name]
# make sure g.app.db's __set_item__ is hit so it knows to save
g.app.db['ns_layouts'] = d
if '_ns_layout' in self.c.db:
del self.c.db['_ns_layout']
return True
if id_.startswith('_fl_forget_layout:'):
if '_ns_layout' in self.c.db:
del self.c.db['_ns_layout']
return True
if id_.startswith('_fl_restore_layout:'):
self.loadLayouts("reload", {'c': self.c}, reloading=True)
return True
return False
#@+node:tbrown.20110628083641.11724: *3* flc.ns_provide
def ns_provide(self, id_):
if id_.startswith('_leo_tab:'):
id_ = id_.split(':', 1)[1]
logTabWidget = self.get_top_splitter().find_child(QtWidgets.QWidget, "logTabWidget")
for n in range(logTabWidget.count()):
if logTabWidget.tabText(n) == id_:
w = logTabWidget.widget(n)
w.setHidden(False)
w._is_from_tab = logTabWidget.tabText(n)
w.setMinimumSize(20, 20)
return w
# didn't find it, maybe it's already in a splitter
return 'USE_EXISTING'
if id_.startswith('_leo_pane:'):
id_ = id_.split(':', 1)[1]
w = self.get_top_splitter().find_child(QtWidgets.QWidget, id_)
if w:
w.setHidden(False) # may be from Tab holder
w.setMinimumSize(20, 20)
return w
return None
#@+node:tbrown.20110627201141.11745: *3* flc.ns_provides
def ns_provides(self):
ans = []
# list of things in tab widget
logTabWidget = self.get_top_splitter().find_child(QtWidgets.QWidget, "logTabWidget")
for n in range(logTabWidget.count()):
text = str(logTabWidget.tabText(n)) # not QString
if text in ('Body', 'Tree'):
continue # handled below
if text == 'Log':
# if Leo can't find Log in tab pane, it creates another
continue
ans.append((text, '_leo_tab:' + text))
ans.append(('Tree', '_leo_pane:outlineFrame'))
ans.append(('Body', '_leo_pane:bodyFrame'))
ans.append(('Tab pane', '_leo_pane:logFrame'))
return ans
#@+node:tbnorth.20160510122413.1: *3* flc.splitter_clicked
def splitter_clicked(self, splitter, handle, event, release, double):
"""
splitter_clicked - middle click release will zoom adjacent
body / tree panes
:param NestedSplitter splitter: splitter containing clicked handle
:param NestedSplitterHandle handle: clicked handle
:param QMouseEvent event: mouse event for click
:param bool release: was it a Press or Release event
:param bool double: was it a double click event
"""
if not release or event.button() != QtCore.Qt.MidButton:
return
if splitter.root.zoomed: # unzoom if *any* handle clicked
splitter.zoom_toggle()
return
before = splitter.widget(splitter.indexOf(handle) - 1)
after = splitter.widget(splitter.indexOf(handle))
for pane in before, after:
if pane.objectName() == 'bodyFrame':
pane.setFocus()
splitter.zoom_toggle()
return
if pane.objectName() == 'outlineFrame':
pane.setFocus()
splitter.zoom_toggle(local=True)
return
#@-others
#@+node:ekr.20160416065221.1: ** commands: free_layout.py
#@+node:tbrown.20140524112944.32658: *3* @g.command free-layout-context-menu
@g.command('free-layout-context-menu')
def free_layout_context_menu(event):
"""free_layout_context_menu - open free layout's context menu, using
the first divider of the top splitter for context, for now.
"""
c = event.get('c')
splitter = c.free_layout.get_top_splitter()
handle = splitter.handle(1)
handle.splitter_menu(handle.rect().topLeft())
#@+node:tbrown.20130403081644.25265: *3* @g.command free-layout-restore
@g.command('free-layout-restore')
def free_layout_restore(event):
"""free_layout_restore - restore layout outline had when it was loaded
"""
c = event.get('c')
c.free_layout.loadLayouts('reload', {'c': c}, reloading=True)
#@+node:tbrown.20131111194858.29876: *3* @g.command free-layout-load
@g.command('free-layout-load')
def free_layout_load(event):
"""free_layout_load - load layout from menu
"""
c = event.get('c')
d = g.app.db.get('ns_layouts', {})
menu = QtWidgets.QMenu(c.frame.top)
for k in d:
menu.addAction(k)
pos = c.frame.top.window().frameGeometry().center()
action = menu.exec_(pos)
if action is None:
return
name = str(action.text())
c.db['_ns_layout'] = name
# layout = g.app.db['ns_layouts'][name]
layouts = g.app.db.get('ns_layouts', {})
layout = layouts.get(name)
if layout:
c.free_layout.get_top_splitter().load_layout(layout)
#@+node:tbrown.20140522153032.32658: *3* @g.command free-layout-zoom
@g.command('free-layout-zoom')
def free_layout_zoom(event):
"""free_layout_zoom - (un)zoom the current pane.
"""
c = event.get('c')
c.free_layout.get_top_splitter().zoom_toggle()
#@+node:ekr.20160327060009.1: *3* free_layout:register_provider
def register_provider(c, provider_instance):
'''Register the provider instance with the top splitter.'''
# Careful: c.free_layout may not exist during unit testing.
if c and hasattr(c, 'free_layout'):
splitter = c.free_layout.get_top_splitter()
if splitter:
splitter.register_provider(provider_instance)
#@-others
#@@language python
#@@tabwidth -4
#@@pagewidth 70
#@-leo
| 42.572668
| 164
| 0.60695
|
a1b8f360cda662c5c6b0cc23a487b3d11c4b68d4
| 12,052
|
py
|
Python
|
readthedocs/embed/views.py
|
mehrdad-khojastefar/readthedocs.org
|
b958bb8d04c454324d612345890b13af54a19eb6
|
[
"MIT"
] | 2,092
|
2019-06-29T07:47:30.000Z
|
2022-03-31T14:54:59.000Z
|
readthedocs/embed/views.py
|
mehrdad-khojastefar/readthedocs.org
|
b958bb8d04c454324d612345890b13af54a19eb6
|
[
"MIT"
] | 2,389
|
2019-06-29T04:22:55.000Z
|
2022-03-31T22:57:49.000Z
|
readthedocs/embed/views.py
|
mehrdad-khojastefar/readthedocs.org
|
b958bb8d04c454324d612345890b13af54a19eb6
|
[
"MIT"
] | 1,185
|
2019-06-29T21:49:31.000Z
|
2022-03-30T09:57:15.000Z
|
"""Views for the embed app."""
import functools
import json
import re
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import slugify
from django.utils.functional import cached_property
from docutils.nodes import make_id
from pyquery import PyQuery as PQ # noqa
from rest_framework import status
from rest_framework.renderers import BrowsableAPIRenderer, JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
import structlog
from readthedocs.api.v2.mixins import CachedResponseMixin
from readthedocs.api.v2.permissions import IsAuthorizedToViewVersion
from readthedocs.builds.constants import EXTERNAL
from readthedocs.core.resolver import resolve
from readthedocs.core.unresolver import unresolve
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.embed.utils import recurse_while_none, clean_links
from readthedocs.projects.models import Project
from readthedocs.storage import build_media_storage
log = structlog.get_logger(__name__)
def escape_selector(selector):
"""Escape special characters from the section id."""
regex = re.compile(r'(!|"|#|\$|%|\'|\(|\)|\*|\+|\,|\.|\/|\:|\;|\?|@)')
ret = re.sub(regex, r'\\\1', selector)
return ret
class EmbedAPIBase(CachedResponseMixin, APIView):
# pylint: disable=line-too-long
"""
Embed a section of content from any Read the Docs page.
Returns headers and content that matches the queried section.
### Arguments
We support two different ways to query the API:
* project (required)
* version (required)
* doc or path (required)
* section
or:
* url (with fragment) (required)
### Example
- GET https://readthedocs.org/api/v2/embed/?project=requestsF&version=latest&doc=index§ion=User%20Guide&path=/index.html
- GET https://readthedocs.org/api/v2/embed/?url=https://docs.readthedocs.io/en/latest/features.html%23github-bitbucket-and-gitlab-integration
# Current Request
""" # noqa
permission_classes = [IsAuthorizedToViewVersion]
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
@functools.lru_cache(maxsize=1)
def _get_project(self):
if self.unresolved_url:
project_slug = self.unresolved_url.project.slug
else:
project_slug = self.request.GET.get('project')
return get_object_or_404(Project, slug=project_slug)
@functools.lru_cache(maxsize=1)
def _get_version(self):
if self.unresolved_url:
version_slug = self.unresolved_url.version_slug
else:
version_slug = self.request.GET.get('version', 'latest')
project = self._get_project()
return get_object_or_404(project.versions.all(), slug=version_slug)
@cached_property
def unresolved_url(self):
url = self.request.GET.get('url')
if not url:
return None
return unresolve(url)
def get(self, request):
"""Handle the get request."""
project = self._get_project()
version = self._get_version()
url = request.GET.get('url')
path = request.GET.get('path', '')
doc = request.GET.get('doc')
section = request.GET.get('section')
if url:
unresolved = self.unresolved_url
path = unresolved.filename
section = unresolved.fragment
elif not path and not doc:
return Response(
{
'error': (
'Invalid Arguments. '
'Please provide "url" or "section" and "path" GET arguments.'
)
},
status=status.HTTP_400_BAD_REQUEST
)
# Generate the docname from path
# by removing the ``.html`` extension and trailing ``/``.
if path:
doc = re.sub(r'(.+)\.html$', r'\1', path.strip('/'))
response = do_embed(
project=project,
version=version,
doc=doc,
section=section,
path=path,
url=url,
)
if not response:
return Response(
{
'error': (
"Can't find content for section: "
f"doc={doc} path={path} section={section}"
)
},
status=status.HTTP_404_NOT_FOUND
)
log.info(
'EmbedAPI successful response.',
project_slug=project.slug,
version_slug=version.slug,
doc=doc,
section=section,
path=path,
url=url,
referer=request.META.get('HTTP_REFERER'),
hoverxref_version=request.META.get('HTTP_X_HOVERXREF_VERSION'),
)
return Response(response)
class EmbedAPI(SettingsOverrideObject):
_default_class = EmbedAPIBase
def do_embed(*, project, version, doc=None, path=None, section=None, url=None):
"""Get the embed response from a document section."""
if not url:
external = version.type == EXTERNAL
url = resolve(
project=project,
version_slug=version.slug,
filename=path or doc,
external=external,
)
content = None
headers = None
if version.is_sphinx_type:
file_content = _get_doc_content(
project=project,
version=version,
doc=doc,
)
if not file_content:
return None
content, headers, section = parse_sphinx(
content=file_content,
section=section,
url=url,
)
else:
# TODO: this should read from the html file itself,
# we don't have fjson files for mkdocs.
file_content = _get_doc_content(
project=project,
version=version,
doc=doc,
)
content, headers, section = parse_mkdocs(
content=file_content,
section=section,
url=url,
)
if content is None:
return None
return {
'content': content,
'headers': headers,
'url': url,
'meta': {
'project': project.slug,
'version': version.slug,
'doc': doc,
'section': section,
},
}
def _get_doc_content(project, version, doc):
storage_path = project.get_storage_path(
'json',
version_slug=version.slug,
include_file=False,
version_type=version.type,
)
file_path = build_media_storage.join(
storage_path,
f'{doc}.fjson'.lstrip('/'),
)
try:
with build_media_storage.open(file_path) as file:
return json.load(file)
except Exception: # noqa
log.warning('Unable to read file.', file_path=file_path)
return None
def parse_sphinx(content, section, url):
"""Get the embed content for the section."""
body = content.get('body')
toc = content.get('toc')
if not content or not body or not toc:
return (None, None, section)
headers = [
recurse_while_none(element)
for element in PQ(toc)('a')
]
if not section and headers:
# If no section is sent, return the content of the first one
# TODO: This will always be the full page content,
# lets do something smarter here
section = list(headers[0].keys())[0].lower()
if not section:
return [], headers, None
body_obj = PQ(body)
escaped_section = escape_selector(section)
elements_id = [
escaped_section,
slugify(escaped_section),
make_id(escaped_section),
f'module-{escaped_section}',
]
query_result = []
for element_id in elements_id:
if not element_id:
continue
try:
query_result = body_obj(f'#{element_id}')
if query_result:
break
except Exception: # noqa
log.info(
'Failed to query section.',
url=url,
element_id=element_id,
)
if not query_result:
selector = f':header:contains("{escaped_section}")'
query_result = body_obj(selector).parent()
# Handle ``dt`` special cases
if len(query_result) == 1 and query_result[0].tag == 'dt':
parent = query_result.parent()
if 'glossary' in parent.attr('class'):
# Sphinx HTML structure for term glossary puts the ``id`` in the
# ``dt`` element with the title of the term. In this case, we
# need to return the next sibling which contains the definition
# of the term itself.
# Structure:
# <dl class="glossary docutils">
# <dt id="term-definition">definition</dt>
# <dd>Text definition for the term</dd>
# ...
# </dl>
query_result = query_result.next()
elif 'citation' in parent.attr('class'):
# Sphinx HTML structure for sphinxcontrib-bibtex puts the ``id`` in the
# ``dt`` element with the title of the cite. In this case, we
# need to return the next sibling which contains the cite itself.
# Structure:
# <dl class="citation">
# <dt id="cite-id"><span><a>Title of the cite</a></span></dt>
# <dd>Content of the cite</dd>
# ...
# </dl>
query_result = query_result.next()
else:
# Sphinx HTML structure for definition list puts the ``id``
# the ``dt`` element, instead of the ``dl``. This makes
# the backend to return just the title of the definition. If we
# detect this case, we return the parent (the whole ``dl``)
# Structure:
# <dl class="confval">
# <dt id="confval-config">
# <code class="descname">config</code>
# <a class="headerlink" href="#confval-config">¶</a></dt>
# <dd><p>Text with a description</p></dd>
# </dl>
query_result = parent
def dump(obj):
"""Handle API-based doc HTML."""
if obj[0].tag in ['span', 'h2']:
return obj.parent().outerHtml()
return obj.outerHtml()
ret = [
dump(clean_links(obj, url))
for obj in query_result
]
return ret, headers, section
def parse_mkdocs(content, section, url): # pylint: disable=unused-argument
"""Get the embed content for the section."""
ret = []
headers = []
if not content or not content.get('content'):
return (None, None, section)
body = content['content']
for element in PQ(body)('h2'):
headers.append(recurse_while_none(element))
if not section and headers:
# If no section is sent, return the content of the first one
section = list(headers[0].keys())[0].lower()
if section:
body_obj = PQ(body)
escaped_section = escape_selector(section)
section_list = body_obj(
':header:contains("{title}")'.format(title=str(escaped_section)))
for num in range(len(section_list)):
header2 = section_list.eq(num)
# h2_title = h2.text().strip()
# section_id = h2.attr('id')
h2_content = ""
next_p = header2.next()
while next_p:
if next_p[0].tag == 'h2':
break
h2_html = next_p.outerHtml()
if h2_html:
h2_content += "\n%s\n" % h2_html
next_p = next_p.next()
if h2_content:
ret.append(h2_content)
# ret.append({
# 'id': section_id,
# 'title': h2_title,
# 'content': h2_content,
# })
return (ret, headers, section)
| 30.982005
| 145
| 0.574842
|
831b1599417d24275ead0fcf071d1d67c4568792
| 1,267
|
py
|
Python
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Practica1/Practica_1.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Practica1/Practica_1.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ene-Jun-2021/perez-sanchez-jose-jahir/Practica1/Practica_1.py
|
bryanbalderas/DAS_Sistemas
|
1e31f088c0de7134471025a5730b0abfc19d936e
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
class Vehiculo():
random = 0
def __init__(self,velocidad_maxima, kilometraje, capacidad):
self.velocidad_maxima = velocidad_maxima
self.kilometraje = kilometraje
self.capacidad = capacidad
def tarifa(self,capacidad):
tarifa_i = self.capacidad*100
return tarifa_i
def __str__(self):
return f"El Vehiculo tiene una velocidad maxima de: {self.velocidad_maxima}, un kilometraje de: {self.kilometraje} y una capacidad de: {self.capacidad}"
class Autobus(Vehiculo):
def __init__(self,velocidad_maxima, kilometraje, capacidad):
super().__init__(velocidad_maxima, kilometraje, capacidad)
def __str__(self):
return f"Soy un autobus! -> velocidad maxima {self.velocidad_maxima}, kilometraje {self.kilometraje}, capacidad {self.capacidad} "
if __name__ == "__main__":
lista = [
Vehiculo(100,30000,4),
Autobus(80,40000,30),
Vehiculo(180,10000,6),
Autobus(60,50000,20),
Vehiculo(180,20000,2),
Autobus(90,40000,25)
]
for x in lista:
if isinstance(x, Autobus):
manten = x.tarifa(x.capacidad)*0.1
tarifa_real = x.tarifa(x.capacidad)+manten
print(x,tarifa_real)
| 31.675
| 160
| 0.642463
|
61461c3d522664432e08c7671725b2c22db930ca
| 4,431
|
py
|
Python
|
carla/src/ros-bridge/carla_ad_agent/src/carla_ad_agent/carla_ad_agent.py
|
jaypatravali/VirtualFastKeyboardFurious_carla
|
cf68e7ea7d0436b38b8f7bb3fb1dbeffb6812c97
|
[
"MIT"
] | null | null | null |
carla/src/ros-bridge/carla_ad_agent/src/carla_ad_agent/carla_ad_agent.py
|
jaypatravali/VirtualFastKeyboardFurious_carla
|
cf68e7ea7d0436b38b8f7bb3fb1dbeffb6812c97
|
[
"MIT"
] | 1
|
2020-06-05T15:31:31.000Z
|
2020-06-05T15:31:31.000Z
|
carla/src/ros-bridge/carla_ad_agent/src/carla_ad_agent/carla_ad_agent.py
|
jaypatravali/VirtualFastKeyboardFurious_carla
|
cf68e7ea7d0436b38b8f7bb3fb1dbeffb6812c97
|
[
"MIT"
] | 1
|
2020-12-14T07:04:08.000Z
|
2020-12-14T07:04:08.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
A basic AD agent using CARLA waypoints
"""
import sys
import rospy
from nav_msgs.msg import Path
from std_msgs.msg import Float64
from carla_msgs.msg import CarlaEgoVehicleInfo, CarlaEgoVehicleControl
from basic_agent import BasicAgent
class CarlaAdAgent(object):
"""
A basic AD agent using CARLA waypoints
"""
def __init__(self, role_name, target_speed, avoid_risk):
"""
Constructor
"""
self._route_assigned = False
self._global_plan = None
self._agent = None
self._target_speed = target_speed
rospy.on_shutdown(self.on_shutdown)
# wait for ego vehicle
vehicle_info = None
try:
vehicle_info = rospy.wait_for_message(
"/carla/{}/vehicle_info".format(role_name), CarlaEgoVehicleInfo)
except rospy.ROSException:
rospy.logerr("Timeout while waiting for world info!")
sys.exit(1)
self._route_subscriber = rospy.Subscriber(
"/carla/{}/waypoints".format(role_name), Path, self.path_updated)
self._target_speed_subscriber = rospy.Subscriber(
"/carla/{}/target_speed".format(role_name), Float64, self.target_speed_updated)
self.vehicle_control_publisher = rospy.Publisher(
"/carla/{}/vehicle_control_cmd".format(role_name), CarlaEgoVehicleControl, queue_size=1)
self._agent = BasicAgent(role_name, vehicle_info.id, # pylint: disable=no-member
avoid_risk)
def on_shutdown(self):
"""
callback on shutdown
"""
rospy.loginfo("Shutting down, stopping ego vehicle...")
if self._agent:
self.vehicle_control_publisher.publish(self._agent.emergency_stop())
def target_speed_updated(self, target_speed):
"""
callback on new target speed
"""
rospy.loginfo("New target speed received: {}".format(target_speed.data))
self._target_speed = target_speed.data
def path_updated(self, path):
"""
callback on new route
"""
rospy.loginfo("New plan with {} waypoints received.".format(len(path.poses)))
if self._agent:
self.vehicle_control_publisher.publish(self._agent.emergency_stop())
self._global_plan = path
self._route_assigned = False
def run_step(self):
"""
Execute one step of navigation.
"""
control = CarlaEgoVehicleControl()
control.steer = 0.0
control.throttle = 0.0
control.brake = 0.0
control.hand_brake = False
if not self._agent:
rospy.loginfo("Waiting for ego vehicle...")
return control
if not self._route_assigned and self._global_plan:
rospy.loginfo("Assigning plan...")
self._agent._local_planner.set_global_plan( # pylint: disable=protected-access
self._global_plan.poses)
self._route_assigned = True
else:
control, finished = self._agent.run_step(self._target_speed)
if finished:
self._global_plan = None
self._route_assigned = False
return control
def run(self):
"""
Control loop
:return:
"""
r = rospy.Rate(10)
while not rospy.is_shutdown():
if self._global_plan:
control = self.run_step()
if control:
control.steer = -control.steer
self.vehicle_control_publisher.publish(control)
else:
try:
r.sleep()
except rospy.ROSInterruptException:
pass
def main():
"""
main function
:return:
"""
rospy.init_node('carla_ad_agent', anonymous=True)
role_name = rospy.get_param("~role_name", "ego_vehicle")
target_speed = rospy.get_param("~target_speed", 20)
avoid_risk = rospy.get_param("~avoid_risk", True)
controller = CarlaAdAgent(role_name, target_speed, avoid_risk)
try:
controller.run()
finally:
del controller
rospy.loginfo("Done")
if __name__ == "__main__":
main()
| 29.738255
| 100
| 0.606409
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.