hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e1380953e45198fa9e2e06574415a9672cb1b40a
| 99,305
|
py
|
Python
|
myvenv/lib/python3.6/site-packages/pandas/plotting/_core.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 4
|
2018-06-03T14:08:15.000Z
|
2020-09-09T01:56:29.000Z
|
myvenv/lib/python3.6/site-packages/pandas/plotting/_core.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 2
|
2018-01-22T23:21:36.000Z
|
2018-01-22T23:31:27.000Z
|
myvenv/lib/python3.6/site-packages/pandas/plotting/_core.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 5
|
2018-05-19T05:08:51.000Z
|
2021-04-29T16:03:45.000Z
|
# being a bit too dynamic
# pylint: disable=E1101
from __future__ import division
import warnings
import re
from collections import namedtuple
from distutils.version import LooseVersion
import numpy as np
from pandas.util._decorators import cache_readonly
from pandas.core.base import PandasObject
from pandas.core.config import get_option
from pandas.core.dtypes.missing import isna, notna, remove_na_arraylike
from pandas.core.dtypes.common import (
is_list_like,
is_integer,
is_number,
is_hashable,
is_iterator)
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.common import AbstractMethodError, _try_sort, _any_not_none
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.indexes.period import PeriodIndex
from pandas.compat import range, lrange, map, zip, string_types
import pandas.compat as compat
from pandas.io.formats.printing import pprint_thing
from pandas.util._decorators import Appender
from pandas.plotting._compat import (_mpl_ge_1_3_1,
_mpl_ge_1_5_0,
_mpl_ge_2_0_0)
from pandas.plotting._style import (plot_params,
_get_standard_colors)
from pandas.plotting._tools import (_subplots, _flatten, table,
_handle_shared_axes, _get_all_lines,
_get_xlim, _set_ticks_props,
format_date_labels)
try:
from pandas.plotting import _converter
except ImportError:
pass
else:
if get_option('plotting.matplotlib.register_converters'):
_converter.register(explicit=True)
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _gca(rc=None):
import matplotlib.pyplot as plt
with plt.rc_context(rc):
return plt.gca()
def _gcf():
import matplotlib.pyplot as plt
return plt.gcf()
class MPLPlot(object):
"""
Base class for assembling a pandas plot using matplotlib
Parameters
----------
data :
"""
@property
def _kind(self):
"""Specify kind str. Must be overridden in child class"""
raise NotImplementedError
_layout_type = 'vertical'
_default_rot = 0
orientation = None
_pop_attributes = ['label', 'style', 'logy', 'logx', 'loglog',
'mark_right', 'stacked']
_attr_defaults = {'logy': False, 'logx': False, 'loglog': False,
'mark_right': True, 'stacked': False}
def __init__(self, data, kind=None, by=None, subplots=False, sharex=None,
sharey=False, use_index=True,
figsize=None, grid=None, legend=True, rot=None,
ax=None, fig=None, title=None, xlim=None, ylim=None,
xticks=None, yticks=None,
sort_columns=False, fontsize=None,
secondary_y=False, colormap=None,
table=False, layout=None, **kwds):
_converter._WARN = False
self.data = data
self.by = by
self.kind = kind
self.sort_columns = sort_columns
self.subplots = subplots
if sharex is None:
if ax is None:
self.sharex = True
else:
# if we get an axis, the users should do the visibility
# setting...
self.sharex = False
else:
self.sharex = sharex
self.sharey = sharey
self.figsize = figsize
self.layout = layout
self.xticks = xticks
self.yticks = yticks
self.xlim = xlim
self.ylim = ylim
self.title = title
self.use_index = use_index
self.fontsize = fontsize
if rot is not None:
self.rot = rot
# need to know for format_date_labels since it's rotated to 30 by
# default
self._rot_set = True
else:
self._rot_set = False
self.rot = self._default_rot
if grid is None:
grid = False if secondary_y else self.plt.rcParams['axes.grid']
self.grid = grid
self.legend = legend
self.legend_handles = []
self.legend_labels = []
for attr in self._pop_attributes:
value = kwds.pop(attr, self._attr_defaults.get(attr, None))
setattr(self, attr, value)
self.ax = ax
self.fig = fig
self.axes = None
# parse errorbar input if given
xerr = kwds.pop('xerr', None)
yerr = kwds.pop('yerr', None)
self.errors = {}
for kw, err in zip(['xerr', 'yerr'], [xerr, yerr]):
self.errors[kw] = self._parse_errorbars(kw, err)
if not isinstance(secondary_y, (bool, tuple, list, np.ndarray, Index)):
secondary_y = [secondary_y]
self.secondary_y = secondary_y
# ugly TypeError if user passes matplotlib's `cmap` name.
# Probably better to accept either.
if 'cmap' in kwds and colormap:
raise TypeError("Only specify one of `cmap` and `colormap`.")
elif 'cmap' in kwds:
self.colormap = kwds.pop('cmap')
else:
self.colormap = colormap
self.table = table
self.kwds = kwds
self._validate_color_args()
def _validate_color_args(self):
if 'color' not in self.kwds and 'colors' in self.kwds:
warnings.warn(("'colors' is being deprecated. Please use 'color'"
"instead of 'colors'"))
colors = self.kwds.pop('colors')
self.kwds['color'] = colors
if ('color' in self.kwds and self.nseries == 1 and
not is_list_like(self.kwds['color'])):
# support series.plot(color='green')
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds and isinstance(self.kwds['color'], tuple) and
self.nseries == 1 and len(self.kwds['color']) in (3, 4)):
# support RGB and RGBA tuples in series plot
self.kwds['color'] = [self.kwds['color']]
if ('color' in self.kwds or 'colors' in self.kwds) and \
self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
if 'color' in self.kwds and self.style is not None:
if is_list_like(self.style):
styles = self.style
else:
styles = [self.style]
# need only a single match
for s in styles:
if re.match('^[a-z]+?', s) is not None:
raise ValueError(
"Cannot pass 'style' string with a color "
"symbol and 'color' keyword argument. Please"
" use one or the other or pass 'style' "
"without a color symbol")
def _iter_data(self, data=None, keep_index=False, fillna=None):
if data is None:
data = self.data
if fillna is not None:
data = data.fillna(fillna)
# TODO: unused?
# if self.sort_columns:
# columns = _try_sort(data.columns)
# else:
# columns = data.columns
for col, values in data.iteritems():
if keep_index is True:
yield col, values
else:
yield col, values.values
@property
def nseries(self):
if self.data.ndim == 1:
return 1
else:
return self.data.shape[1]
def draw(self):
self.plt.draw_if_interactive()
def generate(self):
self._args_adjust()
self._compute_plot_data()
self._setup_subplots()
self._make_plot()
self._add_table()
self._make_legend()
self._adorn_subplots()
for ax in self.axes:
self._post_plot_logic_common(ax, self.data)
self._post_plot_logic(ax, self.data)
def _args_adjust(self):
pass
def _has_plotted_object(self, ax):
"""check whether ax has data"""
return (len(ax.lines) != 0 or
len(ax.artists) != 0 or
len(ax.containers) != 0)
def _maybe_right_yaxis(self, ax, axes_num):
if not self.on_right(axes_num):
# secondary axes may be passed via ax kw
return self._get_ax_layer(ax)
if hasattr(ax, 'right_ax'):
# if it has right_ax proparty, ``ax`` must be left axes
return ax.right_ax
elif hasattr(ax, 'left_ax'):
# if it has left_ax proparty, ``ax`` must be right axes
return ax
else:
# otherwise, create twin axes
orig_ax, new_ax = ax, ax.twinx()
# TODO: use Matplotlib public API when available
new_ax._get_lines = orig_ax._get_lines
new_ax._get_patches_for_fill = orig_ax._get_patches_for_fill
orig_ax.right_ax, new_ax.left_ax = new_ax, orig_ax
if not self._has_plotted_object(orig_ax): # no data on left y
orig_ax.get_yaxis().set_visible(False)
return new_ax
def _setup_subplots(self):
if self.subplots:
fig, axes = _subplots(naxes=self.nseries,
sharex=self.sharex, sharey=self.sharey,
figsize=self.figsize, ax=self.ax,
layout=self.layout,
layout_type=self._layout_type)
else:
if self.ax is None:
fig = self.plt.figure(figsize=self.figsize)
axes = fig.add_subplot(111)
else:
fig = self.ax.get_figure()
if self.figsize is not None:
fig.set_size_inches(self.figsize)
axes = self.ax
axes = _flatten(axes)
if self.logx or self.loglog:
[a.set_xscale('log') for a in axes]
if self.logy or self.loglog:
[a.set_yscale('log') for a in axes]
self.fig = fig
self.axes = axes
@property
def result(self):
"""
Return result axes
"""
if self.subplots:
if self.layout is not None and not is_list_like(self.ax):
return self.axes.reshape(*self.layout)
else:
return self.axes
else:
sec_true = isinstance(self.secondary_y, bool) and self.secondary_y
all_sec = (is_list_like(self.secondary_y) and
len(self.secondary_y) == self.nseries)
if (sec_true or all_sec):
# if all data is plotted on secondary, return right axes
return self._get_ax_layer(self.axes[0], primary=False)
else:
return self.axes[0]
def _compute_plot_data(self):
data = self.data
if isinstance(data, ABCSeries):
label = self.label
if label is None and data.name is None:
label = 'None'
data = data.to_frame(name=label)
# GH16953, _convert is needed as fallback, for ``Series``
# with ``dtype == object``
data = data._convert(datetime=True, timedelta=True)
numeric_data = data.select_dtypes(include=[np.number,
"datetime",
"datetimetz",
"timedelta"])
try:
is_empty = numeric_data.empty
except AttributeError:
is_empty = not len(numeric_data)
# no empty frames or series allowed
if is_empty:
raise TypeError('Empty {0!r}: no numeric data to '
'plot'.format(numeric_data.__class__.__name__))
self.data = numeric_data
def _make_plot(self):
raise AbstractMethodError(self)
def _add_table(self):
if self.table is False:
return
elif self.table is True:
data = self.data.transpose()
else:
data = self.table
ax = self._get_ax(0)
table(ax, data)
def _post_plot_logic_common(self, ax, data):
"""Common post process for each axes"""
def get_label(i):
try:
return pprint_thing(data.index[i])
except Exception:
return ''
if self.orientation == 'vertical' or self.orientation is None:
if self._need_to_set_index:
xticklabels = [get_label(x) for x in ax.get_xticks()]
ax.set_xticklabels(xticklabels)
self._apply_axis_properties(ax.xaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.yaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
elif self.orientation == 'horizontal':
if self._need_to_set_index:
yticklabels = [get_label(y) for y in ax.get_yticks()]
ax.set_yticklabels(yticklabels)
self._apply_axis_properties(ax.yaxis, rot=self.rot,
fontsize=self.fontsize)
self._apply_axis_properties(ax.xaxis, fontsize=self.fontsize)
if hasattr(ax, 'right_ax'):
self._apply_axis_properties(ax.right_ax.yaxis,
fontsize=self.fontsize)
else: # pragma no cover
raise ValueError
def _post_plot_logic(self, ax, data):
"""Post process for each axes. Overridden in child classes"""
pass
def _adorn_subplots(self):
"""Common post process unrelated to data"""
if len(self.axes) > 0:
all_axes = self._get_subplots()
nrows, ncols = self._get_axes_layout()
_handle_shared_axes(axarr=all_axes, nplots=len(all_axes),
naxes=nrows * ncols, nrows=nrows,
ncols=ncols, sharex=self.sharex,
sharey=self.sharey)
for ax in self.axes:
if self.yticks is not None:
ax.set_yticks(self.yticks)
if self.xticks is not None:
ax.set_xticks(self.xticks)
if self.ylim is not None:
ax.set_ylim(self.ylim)
if self.xlim is not None:
ax.set_xlim(self.xlim)
ax.grid(self.grid)
if self.title:
if self.subplots:
if is_list_like(self.title):
if len(self.title) != self.nseries:
msg = ('The length of `title` must equal the number '
'of columns if using `title` of type `list` '
'and `subplots=True`.\n'
'length of title = {}\n'
'number of columns = {}').format(
len(self.title), self.nseries)
raise ValueError(msg)
for (ax, title) in zip(self.axes, self.title):
ax.set_title(title)
else:
self.fig.suptitle(self.title)
else:
if is_list_like(self.title):
msg = ('Using `title` of type `list` is not supported '
'unless `subplots=True` is passed')
raise ValueError(msg)
self.axes[0].set_title(self.title)
def _apply_axis_properties(self, axis, rot=None, fontsize=None):
labels = axis.get_majorticklabels() + axis.get_minorticklabels()
for label in labels:
if rot is not None:
label.set_rotation(rot)
if fontsize is not None:
label.set_fontsize(fontsize)
@property
def legend_title(self):
if not isinstance(self.data.columns, MultiIndex):
name = self.data.columns.name
if name is not None:
name = pprint_thing(name)
return name
else:
stringified = map(pprint_thing,
self.data.columns.names)
return ','.join(stringified)
def _add_legend_handle(self, handle, label, index=None):
if label is not None:
if self.mark_right and index is not None:
if self.on_right(index):
label = label + ' (right)'
self.legend_handles.append(handle)
self.legend_labels.append(label)
def _make_legend(self):
ax, leg = self._get_ax_legend(self.axes[0])
handles = []
labels = []
title = ''
if not self.subplots:
if leg is not None:
title = leg.get_title().get_text()
handles = leg.legendHandles
labels = [x.get_text() for x in leg.get_texts()]
if self.legend:
if self.legend == 'reverse':
self.legend_handles = reversed(self.legend_handles)
self.legend_labels = reversed(self.legend_labels)
handles += self.legend_handles
labels += self.legend_labels
if self.legend_title is not None:
title = self.legend_title
if len(handles) > 0:
ax.legend(handles, labels, loc='best', title=title)
elif self.subplots and self.legend:
for ax in self.axes:
if ax.get_visible():
ax.legend(loc='best')
def _get_ax_legend(self, ax):
leg = ax.get_legend()
other_ax = (getattr(ax, 'left_ax', None) or
getattr(ax, 'right_ax', None))
other_leg = None
if other_ax is not None:
other_leg = other_ax.get_legend()
if leg is None and other_leg is not None:
leg = other_leg
ax = other_ax
return ax, leg
@cache_readonly
def plt(self):
import matplotlib.pyplot as plt
return plt
@staticmethod
def mpl_ge_1_3_1():
return _mpl_ge_1_3_1()
@staticmethod
def mpl_ge_1_5_0():
return _mpl_ge_1_5_0()
_need_to_set_index = False
def _get_xticks(self, convert_period=False):
index = self.data.index
is_datetype = index.inferred_type in ('datetime', 'date',
'datetime64', 'time')
if self.use_index:
if convert_period and isinstance(index, PeriodIndex):
self.data = self.data.reindex(index=index.sort_values())
x = self.data.index.to_timestamp()._mpl_repr()
elif index.is_numeric():
"""
Matplotlib supports numeric values or datetime objects as
xaxis values. Taking LBYL approach here, by the time
matplotlib raises exception when using non numeric/datetime
values for xaxis, several actions are already taken by plt.
"""
x = index._mpl_repr()
elif is_datetype:
self.data = self.data[notna(self.data.index)]
self.data = self.data.sort_index()
x = self.data.index._mpl_repr()
else:
self._need_to_set_index = True
x = lrange(len(index))
else:
x = lrange(len(index))
return x
@classmethod
def _plot(cls, ax, x, y, style=None, is_errorbar=False, **kwds):
mask = isna(y)
if mask.any():
y = np.ma.array(y)
y = np.ma.masked_where(mask, y)
if isinstance(x, Index):
x = x._mpl_repr()
if is_errorbar:
if 'xerr' in kwds:
kwds['xerr'] = np.array(kwds.get('xerr'))
if 'yerr' in kwds:
kwds['yerr'] = np.array(kwds.get('yerr'))
return ax.errorbar(x, y, **kwds)
else:
# prevent style kwarg from going to errorbar, where it is
# unsupported
if style is not None:
args = (x, y, style)
else:
args = (x, y)
return ax.plot(*args, **kwds)
def _get_index_name(self):
if isinstance(self.data.index, MultiIndex):
name = self.data.index.names
if _any_not_none(*name):
name = ','.join([pprint_thing(x) for x in name])
else:
name = None
else:
name = self.data.index.name
if name is not None:
name = pprint_thing(name)
return name
@classmethod
def _get_ax_layer(cls, ax, primary=True):
"""get left (primary) or right (secondary) axes"""
if primary:
return getattr(ax, 'left_ax', ax)
else:
return getattr(ax, 'right_ax', ax)
def _get_ax(self, i):
# get the twinx ax if appropriate
if self.subplots:
ax = self.axes[i]
ax = self._maybe_right_yaxis(ax, i)
self.axes[i] = ax
else:
ax = self.axes[0]
ax = self._maybe_right_yaxis(ax, i)
ax.get_yaxis().set_visible(True)
return ax
def on_right(self, i):
if isinstance(self.secondary_y, bool):
return self.secondary_y
if isinstance(self.secondary_y, (tuple, list, np.ndarray, Index)):
return self.data.columns[i] in self.secondary_y
def _apply_style_colors(self, colors, kwds, col_num, label):
"""
Manage style and color based on column number and its label.
Returns tuple of appropriate style and kwds which "color" may be added.
"""
style = None
if self.style is not None:
if isinstance(self.style, list):
try:
style = self.style[col_num]
except IndexError:
pass
elif isinstance(self.style, dict):
style = self.style.get(label, style)
else:
style = self.style
has_color = 'color' in kwds or self.colormap is not None
nocolor_style = style is None or re.match('[a-z]+', style) is None
if (has_color or self.subplots) and nocolor_style:
kwds['color'] = colors[col_num % len(colors)]
return style, kwds
def _get_colors(self, num_colors=None, color_kwds='color'):
if num_colors is None:
num_colors = self.nseries
return _get_standard_colors(num_colors=num_colors,
colormap=self.colormap,
color=self.kwds.get(color_kwds))
def _parse_errorbars(self, label, err):
"""
Look for error keyword arguments and return the actual errorbar data
or return the error DataFrame/dict
Error bars can be specified in several ways:
Series: the user provides a pandas.Series object of the same
length as the data
ndarray: provides a np.ndarray of the same length as the data
DataFrame/dict: error values are paired with keys matching the
key in the plotted DataFrame
str: the name of the column within the plotted DataFrame
"""
if err is None:
return None
from pandas import DataFrame, Series
def match_labels(data, e):
e = e.reindex(data.index)
return e
# key-matched DataFrame
if isinstance(err, DataFrame):
err = match_labels(self.data, err)
# key-matched dict
elif isinstance(err, dict):
pass
# Series of error values
elif isinstance(err, Series):
# broadcast error series across data
err = match_labels(self.data, err)
err = np.atleast_2d(err)
err = np.tile(err, (self.nseries, 1))
# errors are a column in the dataframe
elif isinstance(err, string_types):
evalues = self.data[err].values
self.data = self.data[self.data.columns.drop(err)]
err = np.atleast_2d(evalues)
err = np.tile(err, (self.nseries, 1))
elif is_list_like(err):
if is_iterator(err):
err = np.atleast_2d(list(err))
else:
# raw error values
err = np.atleast_2d(err)
err_shape = err.shape
# asymmetrical error bars
if err.ndim == 3:
if (err_shape[0] != self.nseries) or \
(err_shape[1] != 2) or \
(err_shape[2] != len(self.data)):
msg = "Asymmetrical error bars should be provided " + \
"with the shape (%u, 2, %u)" % \
(self.nseries, len(self.data))
raise ValueError(msg)
# broadcast errors to each data series
if len(err) == 1:
err = np.tile(err, (self.nseries, 1))
elif is_number(err):
err = np.tile([err], (self.nseries, len(self.data)))
else:
msg = "No valid %s detected" % label
raise ValueError(msg)
return err
def _get_errorbars(self, label=None, index=None, xerr=True, yerr=True):
from pandas import DataFrame
errors = {}
for kw, flag in zip(['xerr', 'yerr'], [xerr, yerr]):
if flag:
err = self.errors[kw]
# user provided label-matched dataframe of errors
if isinstance(err, (DataFrame, dict)):
if label is not None and label in err.keys():
err = err[label]
else:
err = None
elif index is not None and err is not None:
err = err[index]
if err is not None:
errors[kw] = err
return errors
def _get_subplots(self):
from matplotlib.axes import Subplot
return [ax for ax in self.axes[0].get_figure().get_axes()
if isinstance(ax, Subplot)]
def _get_axes_layout(self):
axes = self._get_subplots()
x_set = set()
y_set = set()
for ax in axes:
# check axes coordinates to estimate layout
points = ax.get_position().get_points()
x_set.add(points[0][0])
y_set.add(points[0][1])
return (len(y_set), len(x_set))
class PlanePlot(MPLPlot):
"""
Abstract class for plotting on plane, currently scatter and hexbin.
"""
_layout_type = 'single'
def __init__(self, data, x, y, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if x is None or y is None:
raise ValueError(self._kind + ' requires and x and y column')
if is_integer(x) and not self.data.columns.holds_integer():
x = self.data.columns[x]
if is_integer(y) and not self.data.columns.holds_integer():
y = self.data.columns[y]
if len(self.data[x]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires x column to be numeric')
if len(self.data[y]._get_numeric_data()) == 0:
raise ValueError(self._kind + ' requires y column to be numeric')
self.x = x
self.y = y
@property
def nseries(self):
return 1
def _post_plot_logic(self, ax, data):
x, y = self.x, self.y
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
class ScatterPlot(PlanePlot):
_kind = 'scatter'
def __init__(self, data, x, y, s=None, c=None, **kwargs):
if s is None:
# hide the matplotlib default for size, in case we want to change
# the handling of this argument later
s = 20
super(ScatterPlot, self).__init__(data, x, y, s=s, **kwargs)
if is_integer(c) and not self.data.columns.holds_integer():
c = self.data.columns[c]
self.c = c
def _make_plot(self):
x, y, c, data = self.x, self.y, self.c, self.data
ax = self.axes[0]
c_is_column = is_hashable(c) and c in self.data.columns
# plot a colorbar only if a colormap is provided or necessary
cb = self.kwds.pop('colorbar', self.colormap or c_is_column)
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'Greys'
cmap = self.plt.cm.get_cmap(cmap)
color = self.kwds.pop("color", None)
if c is not None and color is not None:
raise TypeError('Specify exactly one of `c` and `color`')
elif c is None and color is None:
c_values = self.plt.rcParams['patch.facecolor']
elif color is not None:
c_values = color
elif c_is_column:
c_values = self.data[c].values
else:
c_values = c
if self.legend and hasattr(self, 'label'):
label = self.label
else:
label = None
scatter = ax.scatter(data[x].values, data[y].values, c=c_values,
label=label, cmap=cmap, **self.kwds)
if cb:
img = ax.collections[0]
kws = dict(ax=ax)
if self.mpl_ge_1_3_1():
kws['label'] = c if c_is_column else ''
self.fig.colorbar(img, **kws)
if label is not None:
self._add_legend_handle(scatter, label)
else:
self.legend = False
errors_x = self._get_errorbars(label=x, index=0, yerr=False)
errors_y = self._get_errorbars(label=y, index=0, xerr=False)
if len(errors_x) > 0 or len(errors_y) > 0:
err_kwds = dict(errors_x, **errors_y)
err_kwds['ecolor'] = scatter.get_facecolor()[0]
ax.errorbar(data[x].values, data[y].values,
linestyle='none', **err_kwds)
class HexBinPlot(PlanePlot):
_kind = 'hexbin'
def __init__(self, data, x, y, C=None, **kwargs):
super(HexBinPlot, self).__init__(data, x, y, **kwargs)
if is_integer(C) and not self.data.columns.holds_integer():
C = self.data.columns[C]
self.C = C
def _make_plot(self):
x, y, data, C = self.x, self.y, self.data, self.C
ax = self.axes[0]
# pandas uses colormap, matplotlib uses cmap.
cmap = self.colormap or 'BuGn'
cmap = self.plt.cm.get_cmap(cmap)
cb = self.kwds.pop('colorbar', True)
if C is None:
c_values = None
else:
c_values = data[C].values
ax.hexbin(data[x].values, data[y].values, C=c_values, cmap=cmap,
**self.kwds)
if cb:
img = ax.collections[0]
self.fig.colorbar(img, ax=ax)
def _make_legend(self):
pass
class LinePlot(MPLPlot):
_kind = 'line'
_default_rot = 0
orientation = 'vertical'
def __init__(self, data, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
if self.stacked:
self.data = self.data.fillna(value=0)
self.x_compat = plot_params['x_compat']
if 'x_compat' in self.kwds:
self.x_compat = bool(self.kwds.pop('x_compat'))
def _is_ts_plot(self):
# this is slightly deceptive
return not self.x_compat and self.use_index and self._use_dynamic_x()
def _use_dynamic_x(self):
from pandas.plotting._timeseries import _use_dynamic_x
return _use_dynamic_x(self._get_ax(0), self.data)
def _make_plot(self):
if self._is_ts_plot():
from pandas.plotting._timeseries import _maybe_convert_index
data = _maybe_convert_index(self._get_ax(0), self.data)
x = data.index # dummy, not used
plotf = self._ts_plot
it = self._iter_data(data=data, keep_index=True)
else:
x = self._get_xticks(convert_period=True)
plotf = self._plot
it = self._iter_data()
stacking_id = self._get_stacking_id()
is_errorbar = _any_not_none(*self.errors.values())
colors = self._get_colors()
for i, (label, y) in enumerate(it):
ax = self._get_ax(i)
kwds = self.kwds.copy()
style, kwds = self._apply_style_colors(colors, kwds, i, label)
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label) # .encode('utf-8')
kwds['label'] = label
newlines = plotf(ax, x, y, style=style, column_num=i,
stacking_id=stacking_id,
is_errorbar=is_errorbar,
**kwds)
self._add_legend_handle(newlines[0], label, index=i)
if not _mpl_ge_2_0_0():
lines = _get_all_lines(ax)
left, right = _get_xlim(lines)
ax.set_xlim(left, right)
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, **kwds):
# column_num is used to get the target column from protf in line and
# area plots
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
lines = MPLPlot._plot(ax, x, y_values, style=style, **kwds)
cls._update_stacker(ax, stacking_id, y)
return lines
@classmethod
def _ts_plot(cls, ax, x, data, style=None, **kwds):
from pandas.plotting._timeseries import (_maybe_resample,
_decorate_axes,
format_dateaxis)
# accept x to be consistent with normal plot func,
# x is not passed to tsplot as it uses data.index as x coordinate
# column_num must be in kwds for stacking purpose
freq, data = _maybe_resample(data, ax, kwds)
# Set ax with freq info
_decorate_axes(ax, freq, kwds)
# digging deeper
if hasattr(ax, 'left_ax'):
_decorate_axes(ax.left_ax, freq, kwds)
if hasattr(ax, 'right_ax'):
_decorate_axes(ax.right_ax, freq, kwds)
ax._plot_data.append((data, cls._kind, kwds))
lines = cls._plot(ax, data.index, data.values, style=style, **kwds)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq, data.index)
return lines
def _get_stacking_id(self):
if self.stacked:
return id(self.data)
else:
return None
@classmethod
def _initialize_stacker(cls, ax, stacking_id, n):
if stacking_id is None:
return
if not hasattr(ax, '_stacker_pos_prior'):
ax._stacker_pos_prior = {}
if not hasattr(ax, '_stacker_neg_prior'):
ax._stacker_neg_prior = {}
ax._stacker_pos_prior[stacking_id] = np.zeros(n)
ax._stacker_neg_prior[stacking_id] = np.zeros(n)
@classmethod
def _get_stacked_values(cls, ax, stacking_id, values, label):
if stacking_id is None:
return values
if not hasattr(ax, '_stacker_pos_prior'):
# stacker may not be initialized for subplots
cls._initialize_stacker(ax, stacking_id, len(values))
if (values >= 0).all():
return ax._stacker_pos_prior[stacking_id] + values
elif (values <= 0).all():
return ax._stacker_neg_prior[stacking_id] + values
raise ValueError('When stacked is True, each column must be either '
'all positive or negative.'
'{0} contains both positive and negative values'
.format(label))
@classmethod
def _update_stacker(cls, ax, stacking_id, values):
if stacking_id is None:
return
if (values >= 0).all():
ax._stacker_pos_prior[stacking_id] += values
elif (values <= 0).all():
ax._stacker_neg_prior[stacking_id] += values
def _post_plot_logic(self, ax, data):
condition = (not self._use_dynamic_x() and
data.index.is_all_dates and
not self.subplots or
(self.subplots and self.sharex))
index_name = self._get_index_name()
if condition:
# irregular TS rotated 30 deg. by default
# probably a better place to check / set this.
if not self._rot_set:
self.rot = 30
format_date_labels(ax, rot=self.rot)
if index_name is not None and self.use_index:
ax.set_xlabel(index_name)
class AreaPlot(LinePlot):
_kind = 'area'
def __init__(self, data, **kwargs):
kwargs.setdefault('stacked', True)
data = data.fillna(value=0)
LinePlot.__init__(self, data, **kwargs)
if not self.stacked:
# use smaller alpha to distinguish overlap
self.kwds.setdefault('alpha', 0.5)
if self.logy or self.loglog:
raise ValueError("Log-y scales are not supported in area plot")
@classmethod
def _plot(cls, ax, x, y, style=None, column_num=None,
stacking_id=None, is_errorbar=False, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(y))
y_values = cls._get_stacked_values(ax, stacking_id, y, kwds['label'])
# need to remove label, because subplots uses mpl legend as it is
line_kwds = kwds.copy()
if cls.mpl_ge_1_5_0():
line_kwds.pop('label')
lines = MPLPlot._plot(ax, x, y_values, style=style, **line_kwds)
# get data from the line to get coordinates for fill_between
xdata, y_values = lines[0].get_data(orig=False)
# unable to use ``_get_stacked_values`` here to get starting point
if stacking_id is None:
start = np.zeros(len(y))
elif (y >= 0).all():
start = ax._stacker_pos_prior[stacking_id]
elif (y <= 0).all():
start = ax._stacker_neg_prior[stacking_id]
else:
start = np.zeros(len(y))
if 'color' not in kwds:
kwds['color'] = lines[0].get_color()
rect = ax.fill_between(xdata, start, y_values, **kwds)
cls._update_stacker(ax, stacking_id, y)
# LinePlot expects list of artists
res = [rect] if cls.mpl_ge_1_5_0() else lines
return res
def _add_legend_handle(self, handle, label, index=None):
if not self.mpl_ge_1_5_0():
from matplotlib.patches import Rectangle
# Because fill_between isn't supported in legend,
# specifically add Rectangle handle here
alpha = self.kwds.get('alpha', None)
handle = Rectangle((0, 0), 1, 1, fc=handle.get_color(),
alpha=alpha)
LinePlot._add_legend_handle(self, handle, label, index=index)
def _post_plot_logic(self, ax, data):
LinePlot._post_plot_logic(self, ax, data)
if self.ylim is None:
if (data >= 0).all().all():
ax.set_ylim(0, None)
elif (data <= 0).all().all():
ax.set_ylim(None, 0)
class BarPlot(MPLPlot):
_kind = 'bar'
_default_rot = 90
orientation = 'vertical'
def __init__(self, data, **kwargs):
# we have to treat a series differently than a
# 1-column DataFrame w.r.t. color handling
self._is_series = isinstance(data, ABCSeries)
self.bar_width = kwargs.pop('width', 0.5)
pos = kwargs.pop('position', 0.5)
kwargs.setdefault('align', 'center')
self.tick_pos = np.arange(len(data))
self.bottom = kwargs.pop('bottom', 0)
self.left = kwargs.pop('left', 0)
self.log = kwargs.pop('log', False)
MPLPlot.__init__(self, data, **kwargs)
if self.stacked or self.subplots:
self.tickoffset = self.bar_width * pos
if kwargs['align'] == 'edge':
self.lim_offset = self.bar_width / 2
else:
self.lim_offset = 0
else:
if kwargs['align'] == 'edge':
w = self.bar_width / self.nseries
self.tickoffset = self.bar_width * (pos - 0.5) + w * 0.5
self.lim_offset = w * 0.5
else:
self.tickoffset = self.bar_width * pos
self.lim_offset = 0
self.ax_pos = self.tick_pos - self.tickoffset
def _args_adjust(self):
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
if is_list_like(self.left):
self.left = np.array(self.left)
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.bar(x, y, w, bottom=start, log=log, **kwds)
@property
def _start_base(self):
return self.bottom
def _make_plot(self):
import matplotlib as mpl
colors = self._get_colors()
ncolors = len(colors)
pos_prior = neg_prior = np.zeros(len(self.data))
K = self.nseries
for i, (label, y) in enumerate(self._iter_data(fillna=0)):
ax = self._get_ax(i)
kwds = self.kwds.copy()
if self._is_series:
kwds['color'] = colors
else:
kwds['color'] = colors[i % ncolors]
errors = self._get_errorbars(label=label, index=i)
kwds = dict(kwds, **errors)
label = pprint_thing(label)
if (('yerr' in kwds) or ('xerr' in kwds)) \
and (kwds.get('ecolor') is None):
kwds['ecolor'] = mpl.rcParams['xtick.color']
start = 0
if self.log and (y >= 1).all():
start = 1
start = start + self._start_base
if self.subplots:
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
ax.set_title(label)
elif self.stacked:
mask = y > 0
start = np.where(mask, pos_prior, neg_prior) + self._start_base
w = self.bar_width / 2
rect = self._plot(ax, self.ax_pos + w, y, self.bar_width,
start=start, label=label,
log=self.log, **kwds)
pos_prior = pos_prior + np.where(mask, y, 0)
neg_prior = neg_prior + np.where(mask, 0, y)
else:
w = self.bar_width / K
rect = self._plot(ax, self.ax_pos + (i + 0.5) * w, y, w,
start=start, label=label,
log=self.log, **kwds)
self._add_legend_handle(rect, label, index=i)
def _post_plot_logic(self, ax, data):
if self.use_index:
str_index = [pprint_thing(key) for key in data.index]
else:
str_index = [pprint_thing(key) for key in range(data.shape[0])]
name = self._get_index_name()
s_edge = self.ax_pos[0] - 0.25 + self.lim_offset
e_edge = self.ax_pos[-1] + 0.25 + self.bar_width + self.lim_offset
self._decorate_ticks(ax, name, str_index, s_edge, e_edge)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
ax.set_xlim((start_edge, end_edge))
ax.set_xticks(self.tick_pos)
ax.set_xticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_xlabel(name)
class BarhPlot(BarPlot):
_kind = 'barh'
_default_rot = 0
orientation = 'horizontal'
@property
def _start_base(self):
return self.left
@classmethod
def _plot(cls, ax, x, y, w, start=0, log=False, **kwds):
return ax.barh(x, y, w, left=start, log=log, **kwds)
def _decorate_ticks(self, ax, name, ticklabels, start_edge, end_edge):
# horizontal bars
ax.set_ylim((start_edge, end_edge))
ax.set_yticks(self.tick_pos)
ax.set_yticklabels(ticklabels)
if name is not None and self.use_index:
ax.set_ylabel(name)
class HistPlot(LinePlot):
_kind = 'hist'
def __init__(self, data, bins=10, bottom=0, **kwargs):
self.bins = bins # use mpl default
self.bottom = bottom
# Do not call LinePlot.__init__ which may fill nan
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if is_integer(self.bins):
# create common bin edge
values = (self.data._convert(datetime=True)._get_numeric_data())
values = np.ravel(values)
values = values[~isna(values)]
hist, self.bins = np.histogram(
values, bins=self.bins,
range=self.kwds.get('range', None),
weights=self.kwds.get('weights', None))
if is_list_like(self.bottom):
self.bottom = np.array(self.bottom)
@classmethod
def _plot(cls, ax, y, style=None, bins=None, bottom=0, column_num=0,
stacking_id=None, **kwds):
if column_num == 0:
cls._initialize_stacker(ax, stacking_id, len(bins) - 1)
y = y[~isna(y)]
base = np.zeros(len(bins) - 1)
bottom = bottom + \
cls._get_stacked_values(ax, stacking_id, base, kwds['label'])
# ignore style
n, bins, patches = ax.hist(y, bins=bins, bottom=bottom, **kwds)
cls._update_stacker(ax, stacking_id, n)
return patches
def _make_plot(self):
colors = self._get_colors()
stacking_id = self._get_stacking_id()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
label = pprint_thing(label)
kwds['label'] = label
style, kwds = self._apply_style_colors(colors, kwds, i, label)
if style is not None:
kwds['style'] = style
kwds = self._make_plot_keywords(kwds, y)
artists = self._plot(ax, y, column_num=i,
stacking_id=stacking_id, **kwds)
self._add_legend_handle(artists[0], label, index=i)
def _make_plot_keywords(self, kwds, y):
"""merge BoxPlot/KdePlot properties to passed kwds"""
# y is required for KdePlot
kwds['bottom'] = self.bottom
kwds['bins'] = self.bins
return kwds
def _post_plot_logic(self, ax, data):
if self.orientation == 'horizontal':
ax.set_xlabel('Frequency')
else:
ax.set_ylabel('Frequency')
@property
def orientation(self):
if self.kwds.get('orientation', None) == 'horizontal':
return 'horizontal'
else:
return 'vertical'
class KdePlot(HistPlot):
_kind = 'kde'
orientation = 'vertical'
def __init__(self, data, bw_method=None, ind=None, **kwargs):
MPLPlot.__init__(self, data, **kwargs)
self.bw_method = bw_method
self.ind = ind
def _args_adjust(self):
pass
def _get_ind(self, y):
if self.ind is None:
# np.nanmax() and np.nanmin() ignores the missing values
sample_range = np.nanmax(y) - np.nanmin(y)
ind = np.linspace(np.nanmin(y) - 0.5 * sample_range,
np.nanmax(y) + 0.5 * sample_range, 1000)
else:
ind = self.ind
return ind
@classmethod
def _plot(cls, ax, y, style=None, bw_method=None, ind=None,
column_num=None, stacking_id=None, **kwds):
from scipy.stats import gaussian_kde
from scipy import __version__ as spv
y = remove_na_arraylike(y)
if LooseVersion(spv) >= '0.11.0':
gkde = gaussian_kde(y, bw_method=bw_method)
else:
gkde = gaussian_kde(y)
if bw_method is not None:
msg = ('bw_method was added in Scipy 0.11.0.' +
' Scipy version in use is %s.' % spv)
warnings.warn(msg)
y = gkde.evaluate(ind)
lines = MPLPlot._plot(ax, ind, y, style=style, **kwds)
return lines
def _make_plot_keywords(self, kwds, y):
kwds['bw_method'] = self.bw_method
kwds['ind'] = self._get_ind(y)
return kwds
def _post_plot_logic(self, ax, data):
ax.set_ylabel('Density')
class PiePlot(MPLPlot):
_kind = 'pie'
_layout_type = 'horizontal'
def __init__(self, data, kind=None, **kwargs):
data = data.fillna(value=0)
if (data < 0).any().any():
raise ValueError("{0} doesn't allow negative values".format(kind))
MPLPlot.__init__(self, data, kind=kind, **kwargs)
def _args_adjust(self):
self.grid = False
self.logy = False
self.logx = False
self.loglog = False
def _validate_color_args(self):
pass
def _make_plot(self):
colors = self._get_colors(
num_colors=len(self.data), color_kwds='colors')
self.kwds.setdefault('colors', colors)
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
if label is not None:
label = pprint_thing(label)
ax.set_ylabel(label)
kwds = self.kwds.copy()
def blank_labeler(label, value):
if value == 0:
return ''
else:
return label
idx = [pprint_thing(v) for v in self.data.index]
labels = kwds.pop('labels', idx)
# labels is used for each wedge's labels
# Blank out labels for values of 0 so they don't overlap
# with nonzero wedges
if labels is not None:
blabels = [blank_labeler(l, value) for
l, value in zip(labels, y)]
else:
blabels = None
results = ax.pie(y, labels=blabels, **kwds)
if kwds.get('autopct', None) is not None:
patches, texts, autotexts = results
else:
patches, texts = results
autotexts = []
if self.fontsize is not None:
for t in texts + autotexts:
t.set_fontsize(self.fontsize)
# leglabels is used for legend labels
leglabels = labels if labels is not None else idx
for p, l in zip(patches, leglabels):
self._add_legend_handle(p, l)
class BoxPlot(LinePlot):
_kind = 'box'
_layout_type = 'horizontal'
_valid_return_types = (None, 'axes', 'dict', 'both')
# namedtuple to hold results
BP = namedtuple("Boxplot", ['ax', 'lines'])
def __init__(self, data, return_type='axes', **kwargs):
# Do not call LinePlot.__init__ which may fill nan
if return_type not in self._valid_return_types:
raise ValueError(
"return_type must be {None, 'axes', 'dict', 'both'}")
self.return_type = return_type
MPLPlot.__init__(self, data, **kwargs)
def _args_adjust(self):
if self.subplots:
# Disable label ax sharing. Otherwise, all subplots shows last
# column label
if self.orientation == 'vertical':
self.sharex = False
else:
self.sharey = False
@classmethod
def _plot(cls, ax, y, column_num=None, return_type='axes', **kwds):
if y.ndim == 2:
y = [remove_na_arraylike(v) for v in y]
# Boxplot fails with empty arrays, so need to add a NaN
# if any cols are empty
# GH 8181
y = [v if v.size > 0 else np.array([np.nan]) for v in y]
else:
y = remove_na_arraylike(y)
bp = ax.boxplot(y, **kwds)
if return_type == 'dict':
return bp, bp
elif return_type == 'both':
return cls.BP(ax=ax, lines=bp), bp
else:
return ax, bp
def _validate_color_args(self):
if 'color' in self.kwds:
if self.colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
self.color = self.kwds.pop('color')
if isinstance(self.color, dict):
valid_keys = ['boxes', 'whiskers', 'medians', 'caps']
for key, values in compat.iteritems(self.color):
if key not in valid_keys:
raise ValueError("color dict contains invalid "
"key '{0}' "
"The key must be either {1}"
.format(key, valid_keys))
else:
self.color = None
# get standard colors for default
colors = _get_standard_colors(num_colors=3,
colormap=self.colormap,
color=None)
# use 2 colors by default, for box/whisker and median
# flier colors isn't needed here
# because it can be specified by ``sym`` kw
self._boxes_c = colors[0]
self._whiskers_c = colors[0]
self._medians_c = colors[2]
self._caps_c = 'k' # mpl default
def _get_colors(self, num_colors=None, color_kwds='color'):
pass
def maybe_color_bp(self, bp):
if isinstance(self.color, dict):
boxes = self.color.get('boxes', self._boxes_c)
whiskers = self.color.get('whiskers', self._whiskers_c)
medians = self.color.get('medians', self._medians_c)
caps = self.color.get('caps', self._caps_c)
else:
# Other types are forwarded to matplotlib
# If None, use default colors
boxes = self.color or self._boxes_c
whiskers = self.color or self._whiskers_c
medians = self.color or self._medians_c
caps = self.color or self._caps_c
from matplotlib.artist import setp
setp(bp['boxes'], color=boxes, alpha=1)
setp(bp['whiskers'], color=whiskers, alpha=1)
setp(bp['medians'], color=medians, alpha=1)
setp(bp['caps'], color=caps, alpha=1)
def _make_plot(self):
if self.subplots:
from pandas.core.series import Series
self._return_obj = Series()
for i, (label, y) in enumerate(self._iter_data()):
ax = self._get_ax(i)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=i,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj[label] = ret
label = [pprint_thing(label)]
self._set_ticklabels(ax, label)
else:
y = self.data.values.T
ax = self._get_ax(0)
kwds = self.kwds.copy()
ret, bp = self._plot(ax, y, column_num=0,
return_type=self.return_type, **kwds)
self.maybe_color_bp(bp)
self._return_obj = ret
labels = [l for l, _ in self._iter_data()]
labels = [pprint_thing(l) for l in labels]
if not self.use_index:
labels = [pprint_thing(key) for key in range(len(labels))]
self._set_ticklabels(ax, labels)
def _set_ticklabels(self, ax, labels):
if self.orientation == 'vertical':
ax.set_xticklabels(labels)
else:
ax.set_yticklabels(labels)
def _make_legend(self):
pass
def _post_plot_logic(self, ax, data):
pass
@property
def orientation(self):
if self.kwds.get('vert', True):
return 'vertical'
else:
return 'horizontal'
@property
def result(self):
if self.return_type is None:
return super(BoxPlot, self).result
else:
return self._return_obj
# kinds supported by both dataframe and series
_common_kinds = ['line', 'bar', 'barh',
'kde', 'density', 'area', 'hist', 'box']
# kinds supported by dataframe
_dataframe_kinds = ['scatter', 'hexbin']
# kinds supported only by series or dataframe single column
_series_kinds = ['pie']
_all_kinds = _common_kinds + _dataframe_kinds + _series_kinds
_klasses = [LinePlot, BarPlot, BarhPlot, KdePlot, HistPlot, BoxPlot,
ScatterPlot, HexBinPlot, AreaPlot, PiePlot]
_plot_klass = {}
for klass in _klasses:
_plot_klass[klass._kind] = klass
def _plot(data, x=None, y=None, subplots=False,
ax=None, kind='line', **kwds):
kind = _get_standard_kind(kind.lower().strip())
if kind in _all_kinds:
klass = _plot_klass[kind]
else:
raise ValueError("%r is not a valid plot kind" % kind)
from pandas import DataFrame
if kind in _dataframe_kinds:
if isinstance(data, DataFrame):
plot_obj = klass(data, x=x, y=y, subplots=subplots, ax=ax,
kind=kind, **kwds)
else:
raise ValueError("plot kind %r can only be used for data frames"
% kind)
elif kind in _series_kinds:
if isinstance(data, DataFrame):
if y is None and subplots is False:
msg = "{0} requires either y column or 'subplots=True'"
raise ValueError(msg.format(kind))
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
else:
if isinstance(data, DataFrame):
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data.columns[x]
data = data.set_index(x)
if y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
label = kwds['label'] if 'label' in kwds else y
series = data[y].copy() # Don't modify
series.name = label
for kw in ['xerr', 'yerr']:
if (kw in kwds) and \
(isinstance(kwds[kw], string_types) or
is_integer(kwds[kw])):
try:
kwds[kw] = data[kwds[kw]]
except (IndexError, KeyError, TypeError):
pass
data = series
plot_obj = klass(data, subplots=subplots, ax=ax, kind=kind, **kwds)
plot_obj.generate()
plot_obj.draw()
return plot_obj.result
df_kind = """- 'scatter' : scatter plot
- 'hexbin' : hexbin plot"""
series_kind = ""
df_coord = """x : label or position, default None
y : label or position, default None
Allows plotting of one column versus another"""
series_coord = ""
df_unique = """stacked : boolean, default False in line and
bar plots, and True in area plot. If True, create stacked plot.
sort_columns : boolean, default False
Sort column names to determine plot ordering
secondary_y : boolean or sequence, default False
Whether to plot on the secondary y-axis
If a list/tuple, which columns to plot on secondary y-axis"""
series_unique = """label : label argument to provide to plot
secondary_y : boolean or sequence of ints, default False
If True then y-axis will be on the right"""
df_ax = """ax : matplotlib axes object, default None
subplots : boolean, default False
Make separate subplots for each column
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all axis in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
layout : tuple (optional)
(rows, columns) for the layout of subplots"""
series_ax = """ax : matplotlib axes object
If not passed, uses gca()"""
df_note = """- If `kind` = 'scatter' and the argument `c` is the name of a dataframe
column, the values of that column are used to color each point.
- If `kind` = 'hexbin', you can control the size of the bins with the
`gridsize` argument. By default, a histogram of the counts around each
`(x, y)` point is computed. You can specify alternative aggregations
by passing values to the `C` and `reduce_C_function` arguments.
`C` specifies the value at each `(x, y)` point and `reduce_C_function`
is a function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`)."""
series_note = ""
_shared_doc_df_kwargs = dict(klass='DataFrame', klass_obj='df',
klass_kind=df_kind, klass_coord=df_coord,
klass_ax=df_ax, klass_unique=df_unique,
klass_note=df_note)
_shared_doc_series_kwargs = dict(klass='Series', klass_obj='s',
klass_kind=series_kind,
klass_coord=series_coord, klass_ax=series_ax,
klass_unique=series_unique,
klass_note=series_note)
_shared_docs['plot'] = """
Make plots of %(klass)s using matplotlib / pylab.
*New in version 0.17.0:* Each plot kind has a corresponding method on the
``%(klass)s.plot`` accessor:
``%(klass_obj)s.plot(kind='line')`` is equivalent to
``%(klass_obj)s.plot.line()``.
Parameters
----------
data : %(klass)s
%(klass_coord)s
kind : str
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
%(klass_kind)s
%(klass_ax)s
figsize : a tuple (width, height) in inches
use_index : boolean, default True
Use index as ticks for x axis
title : string or list
Title to use for the plot. If a string is passed, print the string at
the top of the figure. If a list is passed and `subplots` is True,
print each item in the list above the corresponding subplot.
grid : boolean, default None (matlab style default)
Axis grid lines
legend : False/True/'reverse'
Place legend on axis subplots
style : list or dict
matplotlib line style per column
logx : boolean, default False
Use log scaling on x axis
logy : boolean, default False
Use log scaling on y axis
loglog : boolean, default False
Use log scaling on both x and y axes
xticks : sequence
Values to use for the xticks
yticks : sequence
Values to use for the yticks
xlim : 2-tuple/list
ylim : 2-tuple/list
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal plots)
fontsize : int, default None
Font size for xticks and yticks
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that name
from matplotlib.
colorbar : boolean, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin' plots)
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
table : boolean, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data will
be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : same types as yerr.
%(klass_unique)s
mark_right : boolean, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend
kwds : keywords
Options to pass to matplotlib plotting method
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 (center)
%(klass_note)s
"""
@Appender(_shared_docs['plot'] % _shared_doc_df_kwargs)
def plot_frame(data, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False,
**kwds):
return _plot(data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
secondary_y=secondary_y, sort_columns=sort_columns,
**kwds)
@Appender(_shared_docs['plot'] % _shared_doc_series_kwargs)
def plot_series(data, kind='line', ax=None, # Series unique
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, # Series unique
**kwds):
import matplotlib.pyplot as plt
if ax is None and len(plt.get_fignums()) > 0:
ax = _gca()
ax = MPLPlot._get_ax_layer(ax)
return _plot(data, kind=kind, ax=ax,
figsize=figsize, use_index=use_index, title=title,
grid=grid, legend=legend,
style=style, logx=logx, logy=logy, loglog=loglog,
xticks=xticks, yticks=yticks, xlim=xlim, ylim=ylim,
rot=rot, fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr,
label=label, secondary_y=secondary_y,
**kwds)
_shared_docs['boxplot'] = """
Make a box plot from DataFrame column optionally grouped by some columns or
other inputs
Parameters
----------
data : the pandas object holding the data
column : column name or list of names, or vector
Can be any valid input to groupby
by : string or sequence
Column in the DataFrame to group by
ax : Matplotlib axes object, optional
fontsize : int or string
rot : label rotation angle
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
layout : tuple (optional)
(rows, columns) for the layout of the plot
return_type : {None, 'axes', 'dict', 'both'}, default None
The kind of object to return. The default is ``axes``
'axes' returns the matplotlib axes the boxplot is drawn on;
'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot;
'both' returns a namedtuple with the axes and dict.
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned, unless ``return_type`` is None, in which case a NumPy
array of axes is returned with the same shape as ``layout``.
See the prose documentation for more.
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
lines : dict
ax : matplotlib Axes
(ax, lines): namedtuple
Notes
-----
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
"""
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(data, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
# validate return_type:
if return_type not in BoxPlot._valid_return_types:
raise ValueError("return_type must be {'axes', 'dict', 'both'}")
from pandas import Series, DataFrame
if isinstance(data, Series):
data = DataFrame({'x': data})
column = 'x'
def _get_colors():
return _get_standard_colors(color=kwds.get('color'), num_colors=1)
def maybe_color_bp(bp):
if 'color' not in kwds:
from matplotlib.artist import setp
setp(bp['boxes'], color=colors[0], alpha=1)
setp(bp['whiskers'], color=colors[0], alpha=1)
setp(bp['medians'], color=colors[2], alpha=1)
def plot_group(keys, values, ax):
keys = [pprint_thing(x) for x in keys]
values = [np.asarray(remove_na_arraylike(v)) for v in values]
bp = ax.boxplot(values, **kwds)
if fontsize is not None:
ax.tick_params(axis='both', labelsize=fontsize)
if kwds.get('vert', 1):
ax.set_xticklabels(keys, rotation=rot)
else:
ax.set_yticklabels(keys, rotation=rot)
maybe_color_bp(bp)
# Return axes in multiplot case, maybe revisit later # 985
if return_type == 'dict':
return bp
elif return_type == 'both':
return BoxPlot.BP(ax=ax, lines=bp)
else:
return ax
colors = _get_colors()
if column is None:
columns = None
else:
if isinstance(column, (list, tuple)):
columns = column
else:
columns = [column]
if by is not None:
# Prefer array return type for 2-D plots to match the subplot layout
# https://github.com/pandas-dev/pandas/pull/12216#issuecomment-241175580
result = _grouped_plot_by_column(plot_group, data, columns=columns,
by=by, grid=grid, figsize=figsize,
ax=ax, layout=layout,
return_type=return_type)
else:
if return_type is None:
return_type = 'axes'
if layout is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
if ax is None:
rc = {'figure.figsize': figsize} if figsize is not None else {}
ax = _gca(rc)
data = data._get_numeric_data()
if columns is None:
columns = data.columns
else:
data = data[columns]
result = plot_group(columns, data.values.T, ax)
ax.grid(grid)
return result
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot_frame(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None,
return_type=None, **kwds):
import matplotlib.pyplot as plt
_converter._WARN = False
ax = boxplot(self, column=column, by=by, ax=ax, fontsize=fontsize,
grid=grid, rot=rot, figsize=figsize, layout=layout,
return_type=return_type, **kwds)
plt.draw_if_interactive()
return ax
def scatter_plot(data, x, y, by=None, ax=None, figsize=None, grid=False,
**kwargs):
"""
Make a scatter plot from two DataFrame columns
Parameters
----------
data : DataFrame
x : Column name for the x-axis values
y : Column name for the y-axis values
ax : Matplotlib axis object
figsize : A tuple (width, height) in inches
grid : Setting this to True will show the grid
kwargs : other plotting keyword arguments
To be passed to scatter function
Returns
-------
fig : matplotlib.Figure
"""
import matplotlib.pyplot as plt
kwargs.setdefault('edgecolors', 'none')
def plot_group(group, ax):
xvals = group[x].values
yvals = group[y].values
ax.scatter(xvals, yvals, **kwargs)
ax.grid(grid)
if by is not None:
fig = _grouped_plot(plot_group, data, by=by, figsize=figsize, ax=ax)
else:
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
fig = ax.get_figure()
plot_group(data, ax)
ax.set_ylabel(pprint_thing(y))
ax.set_xlabel(pprint_thing(x))
ax.grid(grid)
return fig
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
Parameters
----------
data : DataFrame
column : string or sequence
If passed, will be used to limit data to a subset of columns
by : object, optional
If passed, then used to form histograms for separate groups
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
ax : matplotlib axes object, default None
sharex : boolean, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in; Be aware, that passing in both an ax and sharex=True
will alter all x axis labels for all subplots in a figure!
sharey : boolean, default False
In case subplots=True, share y axis and set some y axis labels to
invisible
figsize : tuple
The size of the figure to create in inches by default
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms
bins : integer, default 10
Number of histogram bins to be used
kwds : other plotting keyword arguments
To be passed to hist function
"""
_converter._WARN = False
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid,
figsize=figsize, sharex=sharex, sharey=sharey,
layout=layout, bins=bins, xlabelsize=xlabelsize,
xrot=xrot, ylabelsize=ylabelsize,
yrot=yrot, **kwds)
return axes
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
data = data[column]
data = data._get_numeric_data()
naxes = len(data.columns)
fig, axes = _subplots(naxes=naxes, ax=ax, squeeze=False,
sharex=sharex, sharey=sharey, figsize=figsize,
layout=layout)
_axes = _flatten(axes)
for i, col in enumerate(_try_sort(data.columns)):
ax = _axes[i]
ax.hist(data[col].dropna().values, bins=bins, **kwds)
ax.set_title(col)
ax.grid(grid)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(wspace=0.3, hspace=0.3)
return axes
def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, figsize=None,
bins=10, **kwds):
"""
Draw histogram of the input series using matplotlib
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups
ax : matplotlib axis object
If not passed, uses gca()
grid : boolean, default True
Whether to show axis grid lines
xlabelsize : int, default None
If specified changes the x-axis label size
xrot : float, default None
rotation of x axis labels
ylabelsize : int, default None
If specified changes the y-axis label size
yrot : float, default None
rotation of y axis labels
figsize : tuple, default None
figure size in inches by default
bins: integer, default 10
Number of histogram bins to be used
kwds : keywords
To be passed to the actual plotting function
Notes
-----
See matplotlib documentation online for more on this
"""
import matplotlib.pyplot as plt
if by is None:
if kwds.get('layout', None) is not None:
raise ValueError("The 'layout' keyword is not supported when "
"'by' is None")
# hack until the plotting interface is a bit more unified
fig = kwds.pop('figure', plt.gcf() if plt.get_fignums() else
plt.figure(figsize=figsize))
if (figsize is not None and tuple(figsize) !=
tuple(fig.get_size_inches())):
fig.set_size_inches(*figsize, forward=True)
if ax is None:
ax = fig.gca()
elif ax.get_figure() != fig:
raise AssertionError('passed axis not bound to passed figure')
values = self.dropna().values
ax.hist(values, bins=bins, **kwds)
ax.grid(grid)
axes = np.array([ax])
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
else:
if 'figure' in kwds:
raise ValueError("Cannot pass 'figure' when using the "
"'by' argument, since a new 'Figure' instance "
"will be created")
axes = grouped_hist(self, by=by, ax=ax, grid=grid, figsize=figsize,
bins=bins, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot, **kwds)
if hasattr(axes, 'ndim'):
if axes.ndim == 1 and len(axes) == 1:
return axes[0]
return axes
def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
layout=None, sharex=False, sharey=False, rot=90, grid=True,
xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
**kwargs):
"""
Grouped histogram
Parameters
----------
data: Series/DataFrame
column: object, optional
by: object, optional
ax: axes, optional
bins: int, default 50
figsize: tuple, optional
layout: optional
sharex: boolean, default False
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
_converter._WARN = False
def plot_group(group, ax):
ax.hist(group.dropna().values, bins=bins, **kwargs)
xrot = xrot or rot
fig, axes = _grouped_plot(plot_group, data, column=column,
by=by, sharex=sharex, sharey=sharey, ax=ax,
figsize=figsize, layout=layout, rot=rot)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9,
hspace=0.5, wspace=0.3)
return axes
def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
rot=0, grid=True, ax=None, figsize=None,
layout=None, **kwds):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots :
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group
column : column name or list of names, or vector
Can be any valid input to groupby
fontsize : int or string
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
(rows, columns) for the layout of the plot
kwds : other plotting keyword arguments to be passed to matplotlib boxplot
function
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
>>> import pandas
>>> import numpy as np
>>> import itertools
>>>
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pandas.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pandas.DataFrame(data, columns=list('ABCD'), index=index)
>>>
>>> grouped = df.groupby(level='lvl1')
>>> boxplot_frame_groupby(grouped)
>>>
>>> grouped = df.unstack(level='lvl1').groupby(level=0, axis=1)
>>> boxplot_frame_groupby(grouped, subplots=False)
"""
_converter._WARN = False
if subplots is True:
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, squeeze=False,
ax=ax, sharex=False, sharey=True,
figsize=figsize, layout=layout)
axes = _flatten(axes)
from pandas.core.series import Series
ret = Series()
for (key, group), ax in zip(grouped, axes):
d = group.boxplot(ax=ax, column=column, fontsize=fontsize,
rot=rot, grid=grid, **kwds)
ax.set_title(pprint_thing(key))
ret.loc[key] = d
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1,
right=0.9, wspace=0.2)
else:
from pandas.core.reshape.concat import concat
keys, frames = zip(*grouped)
if grouped.axis == 0:
df = concat(frames, keys=keys, axis=1)
else:
if len(frames) > 1:
df = frames[0].join(frames[1::])
else:
df = frames[0]
ret = df.boxplot(column=column, fontsize=fontsize, rot=rot,
grid=grid, ax=ax, figsize=figsize,
layout=layout, **kwds)
return ret
def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize=None, sharex=True, sharey=True, layout=None,
rot=0, ax=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
# allowed to specify mpl default with 'default'
warnings.warn("figsize='default' is deprecated. Specify figure"
"size by tuple instead", FutureWarning, stacklevel=4)
figsize = None
grouped = data.groupby(by)
if column is not None:
grouped = grouped[column]
naxes = len(grouped)
fig, axes = _subplots(naxes=naxes, figsize=figsize,
sharex=sharex, sharey=sharey, ax=ax,
layout=layout)
_axes = _flatten(axes)
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
plotf(group, ax, **kwargs)
ax.set_title(pprint_thing(key))
return fig, axes
def _grouped_plot_by_column(plotf, data, columns=None, by=None,
numeric_only=True, grid=False,
figsize=None, ax=None, layout=None,
return_type=None, **kwargs):
grouped = data.groupby(by)
if columns is None:
if not isinstance(by, (list, tuple)):
by = [by]
columns = data._get_numeric_data().columns.difference(by)
naxes = len(columns)
fig, axes = _subplots(naxes=naxes, sharex=True, sharey=True,
figsize=figsize, ax=ax, layout=layout)
_axes = _flatten(axes)
ax_values = []
for i, col in enumerate(columns):
ax = _axes[i]
gp_col = grouped[col]
keys, values = zip(*gp_col)
re_plotf = plotf(keys, values, ax, **kwargs)
ax.set_title(col)
ax.set_xlabel(pprint_thing(by))
ax_values.append(re_plotf)
ax.grid(grid)
from pandas.core.series import Series
result = Series(ax_values, index=columns)
# Return axes in multiplot case, maybe revisit later # 985
if return_type is None:
result = axes
byline = by[0] if len(by) == 1 else by
fig.suptitle('Boxplot grouped by %s' % byline)
fig.subplots_adjust(bottom=0.15, top=0.9, left=0.1, right=0.9, wspace=0.2)
return result
class BasePlotMethods(PandasObject):
def __init__(self, data):
self._data = data
def __call__(self, *args, **kwargs):
raise NotImplementedError
class SeriesPlotMethods(BasePlotMethods):
"""Series plotting accessor and method
Examples
--------
>>> s.plot.line()
>>> s.plot.bar()
>>> s.plot.hist()
Plotting methods can also be accessed by calling the accessor as a method
with the ``kind`` argument:
``s.plot(kind='line')`` is equivalent to ``s.plot.line()``
"""
def __call__(self, kind='line', ax=None,
figsize=None, use_index=True, title=None, grid=None,
legend=False, style=None, logx=False, logy=False,
loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
label=None, secondary_y=False, **kwds):
return plot_series(self._data, kind=kind, ax=ax, figsize=figsize,
use_index=use_index, title=title, grid=grid,
legend=legend, style=style, logx=logx, logy=logy,
loglog=loglog, xticks=xticks, yticks=yticks,
xlim=xlim, ylim=ylim, rot=rot, fontsize=fontsize,
colormap=colormap, table=table, yerr=yerr,
xerr=xerr, label=label, secondary_y=secondary_y,
**kwds)
__call__.__doc__ = plot_series.__doc__
def line(self, **kwds):
"""
Line plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', **kwds)
def bar(self, **kwds):
"""
Vertical bar plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', **kwds)
def barh(self, **kwds):
"""
Horizontal bar plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', **kwds)
def box(self, **kwds):
"""
Boxplot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', **kwds)
def hist(self, bins=10, **kwds):
"""
Histogram
.. versionadded:: 0.17.0
Parameters
----------
bins: integer, default 10
Number of histogram bins to be used
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, **kwds):
"""
Area plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', **kwds)
def pie(self, **kwds):
"""
Pie chart
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.Series.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', **kwds)
class FramePlotMethods(BasePlotMethods):
"""DataFrame plotting accessor and method
Examples
--------
>>> df.plot.line()
>>> df.plot.scatter('x', 'y')
>>> df.plot.hexbin()
These plotting methods can also be accessed by calling the accessor as a
method with the ``kind`` argument:
``df.plot(kind='line')`` is equivalent to ``df.plot.line()``
"""
def __call__(self, x=None, y=None, kind='line', ax=None,
subplots=False, sharex=None, sharey=False, layout=None,
figsize=None, use_index=True, title=None, grid=None,
legend=True, style=None, logx=False, logy=False, loglog=False,
xticks=None, yticks=None, xlim=None, ylim=None,
rot=None, fontsize=None, colormap=None, table=False,
yerr=None, xerr=None,
secondary_y=False, sort_columns=False, **kwds):
return plot_frame(self._data, kind=kind, x=x, y=y, ax=ax,
subplots=subplots, sharex=sharex, sharey=sharey,
layout=layout, figsize=figsize, use_index=use_index,
title=title, grid=grid, legend=legend, style=style,
logx=logx, logy=logy, loglog=loglog, xticks=xticks,
yticks=yticks, xlim=xlim, ylim=ylim, rot=rot,
fontsize=fontsize, colormap=colormap, table=table,
yerr=yerr, xerr=xerr, secondary_y=secondary_y,
sort_columns=sort_columns, **kwds)
__call__.__doc__ = plot_frame.__doc__
def line(self, x=None, y=None, **kwds):
"""
Line plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='line', x=x, y=y, **kwds)
def bar(self, x=None, y=None, **kwds):
"""
Vertical bar plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='bar', x=x, y=y, **kwds)
def barh(self, x=None, y=None, **kwds):
"""
Horizontal bar plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='barh', x=x, y=y, **kwds)
def box(self, by=None, **kwds):
r"""
Boxplot
.. versionadded:: 0.17.0
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
\*\*kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='box', by=by, **kwds)
def hist(self, by=None, bins=10, **kwds):
"""
Histogram
.. versionadded:: 0.17.0
Parameters
----------
by : string or sequence
Column in the DataFrame to group by.
bins: integer, default 10
Number of histogram bins to be used
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='hist', by=by, bins=bins, **kwds)
def kde(self, **kwds):
"""
Kernel Density Estimate plot
.. versionadded:: 0.17.0
Parameters
----------
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='kde', **kwds)
density = kde
def area(self, x=None, y=None, **kwds):
"""
Area plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='area', x=x, y=y, **kwds)
def pie(self, y=None, **kwds):
"""
Pie chart
.. versionadded:: 0.17.0
Parameters
----------
y : label or position, optional
Column to plot.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='pie', y=y, **kwds)
def scatter(self, x, y, s=None, c=None, **kwds):
"""
Scatter plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
s : scalar or array_like, optional
Size of each point.
c : label or position, optional
Color of each point.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
return self(kind='scatter', x=x, y=y, c=c, s=s, **kwds)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None,
**kwds):
"""
Hexbin plot
.. versionadded:: 0.17.0
Parameters
----------
x, y : label or position, optional
Coordinates for each point.
C : label or position, optional
The value at each `(x, y)` point.
reduce_C_function : callable, optional
Function of one argument that reduces all the values in a bin to
a single number (e.g. `mean`, `max`, `sum`, `std`).
gridsize : int, optional
Number of bins.
**kwds : optional
Keyword arguments to pass on to :py:meth:`pandas.DataFrame.plot`.
Returns
-------
axes : matplotlib.AxesSubplot or np.array of them
"""
if reduce_C_function is not None:
kwds['reduce_C_function'] = reduce_C_function
if gridsize is not None:
kwds['gridsize'] = gridsize
return self(kind='hexbin', x=x, y=y, C=C, **kwds)
| 34.397298
| 84
| 0.558562
|
6849a6481c1e9b24fa7a16daf6743df3e2cb1d4e
| 1,485
|
py
|
Python
|
nciauth_girder3/__init__.py
|
knowledgevis/NCIAuth-girder3
|
cb4b7a71ae0aeec068dc3e1a5f321cd700c79ad5
|
[
"Apache-2.0"
] | null | null | null |
nciauth_girder3/__init__.py
|
knowledgevis/NCIAuth-girder3
|
cb4b7a71ae0aeec068dc3e1a5f321cd700c79ad5
|
[
"Apache-2.0"
] | null | null | null |
nciauth_girder3/__init__.py
|
knowledgevis/NCIAuth-girder3
|
cb4b7a71ae0aeec068dc3e1a5f321cd700c79ad5
|
[
"Apache-2.0"
] | null | null | null |
#from girder.constants import SettingKey, SettingDefault
#from girder.constants import SettingDefault
from girder.utility import config, setting_utilities
from . import rest, constants
# added for Girder V3
from girder import plugin
@setting_utilities.validator(constants.PluginSettings.PROVIDERS_ENABLED)
def validateProvidersEnabled(doc):
if not isinstance(doc['value'], (list, tuple)):
raise ValidationException('The enabled providers must be a list.', 'value')
@setting_utilities.validator(constants.PluginSettings.IGNORE_REGISTRATION_POLICY)
def validateIgnoreRegistrationPolicy(doc):
if not isinstance(doc['value'], bool):
raise ValidationException('Ignore registration policy setting must be boolean.', 'value')
@setting_utilities.validator({
constants.PluginSettings.NCI_CLIENT_ID,
constants.PluginSettings.NCI_CLIENT_SECRET,
constants.PluginSettings.NCI_API_URL,
constants.PluginSettings.NCI_RETURN_URL,
# constants.PluginSettings.NCI_LOGIN_URL,
# constants.PluginSettings.NCI_VALIDATION_URL
})
def validateOtherSettings(event):
pass
# the load function goes inside a plugin class declaration now
class GirderPlugin(plugin.GirderPlugin):
DISPLAY_NAME = 'NCIAuth-girder3'
CLIENT_SOURCE_PATH = 'web_client'
def load(self,info):
info['apiRoot'].nciLogin = rest.NCILogin()
print('NCIAuth-girder3: Warning: SettingDefault not enabled!')
#SettingDefault.defaults[constants.PluginSettings.PROVIDERS_ENABLED] = []
| 37.125
| 97
| 0.791919
|
0d5e7f90e99e1ae85937a5c263ecf441b03172e3
| 5,567
|
py
|
Python
|
src/maestral_cocoa/activity.py
|
samschott/maestral-cocoa
|
270574545267dcd32f643f12daf4b5e31edd29bf
|
[
"MIT"
] | 2
|
2022-01-08T11:18:20.000Z
|
2022-01-26T13:41:34.000Z
|
src/maestral_cocoa/activity.py
|
samschott/maestral-cocoa
|
270574545267dcd32f643f12daf4b5e31edd29bf
|
[
"MIT"
] | 4
|
2022-01-05T09:16:30.000Z
|
2022-03-29T09:32:44.000Z
|
src/maestral_cocoa/activity.py
|
samschott/maestral-cocoa
|
270574545267dcd32f643f12daf4b5e31edd29bf
|
[
"MIT"
] | 1
|
2022-01-05T08:56:59.000Z
|
2022-01-05T08:56:59.000Z
|
# -*- coding: utf-8 -*-
from __future__ import annotations
# system imports
import os.path as osp
import asyncio
from datetime import datetime
from typing import Iterable, Any
# external imports
import click
import toga
from toga.sources import Source
from toga.style.pack import Pack
from maestral.models import SyncEvent, ItemType
from maestral.daemon import MaestralProxy
from maestral.utils import sanitize_string
# local imports
from .private.widgets import FreestandingIconButton, Icon, Window
from .private.constants import ImageTemplate
PADDING = 10
ICON_SIZE = 32
WINDOW_SIZE = (700, 600)
class SyncEventRow:
_reveal: FreestandingIconButton | None
def __init__(self, sync_event: SyncEvent) -> None:
self.sync_event = sync_event
dirname, basename = osp.split(self.sync_event.local_path)
dt = datetime.fromtimestamp(self.sync_event.change_time_or_sync_time)
# attributes for table column values
self._basename = basename
self._icon: Icon | None = None
self.location = osp.basename(dirname)
self.type = self.sync_event.change_type.value.capitalize()
self.time = dt.strftime("%d %b %Y %H:%M")
self.username = self.sync_event.change_user_name
self._reveal = None
@property
def filename(self) -> tuple[Icon, str]:
if not self._icon:
if self.sync_event.item_type is ItemType.Folder:
self._icon = Icon(for_path="/usr")
else:
self._icon = Icon(for_path=self.sync_event.local_path)
return self._icon, sanitize_string(self._basename)
@property
def reveal(self) -> FreestandingIconButton:
if not self._reveal:
self._reveal = FreestandingIconButton(
label="",
icon=Icon(template=ImageTemplate.Reveal),
on_press=self.on_reveal_pressed,
enabled=osp.exists(self.sync_event.local_path),
)
return self._reveal
def on_reveal_pressed(self, widget: Any) -> None:
click.launch(self.sync_event.local_path, locate=True)
def refresh(self) -> None:
self.reveal.enabled = osp.exists(self.sync_event.local_path)
class SyncEventSource(Source):
def __init__(self, sync_events: Iterable[SyncEvent] = tuple()) -> None:
super().__init__()
self._rows = [SyncEventRow(e) for e in sync_events]
def __len__(self) -> int:
return len(self._rows)
def __getitem__(self, index: int) -> SyncEventRow:
return self._rows[index]
def add(self, sync_event: SyncEvent) -> None:
row = SyncEventRow(sync_event)
self._rows.append(row)
self._notify("insert", index=len(self._rows) - 1, item=row)
def insert(self, index: int, sync_event: SyncEvent) -> None:
row = SyncEventRow(sync_event)
self._rows.insert(index, row)
self._notify("insert", index=index, item=row)
def remove(self, index: int) -> None:
row = self._rows[index]
self._notify("pre_remove", item=row)
del self._rows[index]
self._notify("remove", item=row)
def clear(self) -> None:
self._rows.clear()
self._notify("clear")
class ActivityWindow(Window):
def __init__(self, mdbx: MaestralProxy, app: toga.App) -> None:
super().__init__(title="Maestral Activity", release_on_close=False, app=app)
self.size = WINDOW_SIZE
self._refresh = False
self._refresh_interval = 1
self._ids: set[str] = set()
self.on_close = self.on_close_pressed
self.mdbx = mdbx
self.table = toga.Table(
headings=["File", "Location", "Change", "Time", " "],
accessors=["filename", "location", "type", "time", "reveal"],
missing_value="--",
on_double_click=self.on_row_clicked,
style=Pack(flex=1),
)
self.table._impl.columns[-1].maxWidth = 25 # TODO: don't use private API
self.content = self.table
self.center()
self._initial_load = False
def on_row_clicked(self, sender: Any, row: SyncEventRow) -> None:
res = click.launch(row.sync_event.local_path)
if res != 0:
self.app.alert(
title="Count not open item",
message="The file or folder no longer exists.",
)
async def periodic_refresh_gui(self, sender: Any = None) -> None:
while self._refresh:
await self.refresh_gui()
await asyncio.sleep(self._refresh_interval)
async def refresh_gui(self) -> None:
needs_refresh = False
for event in self.mdbx.get_history():
if event.id not in self._ids:
self.table.data.insert(0, event)
self._ids.add(event.id)
await asyncio.sleep(0.002)
needs_refresh = True
if needs_refresh:
for row in self.table.data:
row.refresh()
def on_close_pressed(self, sender: Any = None) -> bool:
self._refresh = False
return True
def show(self) -> None:
if not self._initial_load:
sync_events = self.mdbx.get_history()
data_source = SyncEventSource(reversed(sync_events))
self._ids = set(event.id for event in sync_events)
self.table.data = data_source
self._initial_load = True
self._refresh = True
self.app.add_background_task(self.periodic_refresh_gui)
super().show()
| 30.756906
| 84
| 0.625831
|
a705adff06687c26b9e5ff81bcf33b14b64aaebf
| 2,111
|
py
|
Python
|
src/pipelining/pipes/pipe_evaluate_final_anwendungsfeld_sm.py
|
acdh-oeaw/mara_nlp_suite
|
223850ffde46bb9fedce9981d1f7a94f865a6a5a
|
[
"MIT"
] | null | null | null |
src/pipelining/pipes/pipe_evaluate_final_anwendungsfeld_sm.py
|
acdh-oeaw/mara_nlp_suite
|
223850ffde46bb9fedce9981d1f7a94f865a6a5a
|
[
"MIT"
] | null | null | null |
src/pipelining/pipes/pipe_evaluate_final_anwendungsfeld_sm.py
|
acdh-oeaw/mara_nlp_suite
|
223850ffde46bb9fedce9981d1f7a94f865a6a5a
|
[
"MIT"
] | null | null | null |
from etl.gold_data_manager import GoldDataContainer, GoldDataItem
from pipelining.pipe_root import ConfigRoot
from etl import maxqdata_manager, gold_data_transform_rules, gold_data_manager
from pipelining import data_flow_registry
from IPython import embed
from pipelining.pipes import pipe_train_AF
from evaluators import evaluator
import main
class TrainDataConfig(ConfigRoot):
gold_data_transform_rule = gold_data_transform_rules.TransformRule24
class EvalConfig(ConfigRoot):
gold_data_json_path = data_flow_registry.gold_data["g10"]["path"]
gold_data_transform_rule = gold_data_transform_rules.TransformRule25
evaluation_scores_path = data_flow_registry.evaluations_scores["es8"]["path"]
evaluation_diffs_path = data_flow_registry.evaluation_diffs["ed8"]["path"]
class TrainConfig(ConfigRoot):
should_load_model = True
model_def_dict = data_flow_registry.models["mo9"]
trainer_class = model_def_dict["trainer_class"]
model_path = model_def_dict["path"]
def run():
trainer = main.init_trainer(TrainConfig)
gdc_train = pipe_train_AF.load_and_transform_data()
gdc_train = main.transform_gold_data(TrainDataConfig, gdc_train)
gdc_eval_not = main.load_gold_data(EvalConfig)
gdc_eval_not = main.transform_gold_data(EvalConfig, gdc_eval_not)
af_sc = "AF: Soziale Medien"
af_sc_not = "AF: NOT Soziale Medien"
gdc_eval = GoldDataContainer(cats_list=[af_sc], gold_data_item_list=[])
for gdi_not in gdc_eval_not.gold_data_item_list:
gdi = GoldDataItem(article_id=gdi_not.article_id, text=gdi_not.text)
if gdi_not.cats[af_sc_not] == 1:
gdi.cats = {af_sc: 0}
elif gdi_not.cats[af_sc_not] == 0:
gdi.cats = {af_sc: 1}
else:
raise Exception
gdc_eval.gold_data_item_list.append(gdi)
evaluator.calc_and_write_all_tables(
gdc_train=gdc_train,
gdc_eval=gdc_eval,
trainer=trainer,
table_path_score=EvalConfig.evaluation_scores_path,
table_path_diff=EvalConfig.evaluation_diffs_path,
cats_are_exclusive=False
)
| 34.606557
| 81
| 0.755566
|
13ceb7802f436344bd1a2181f29c27abdf4343d3
| 10,771
|
py
|
Python
|
view/gui/flags_box.py
|
galizia-lab/pyview
|
07bef637b0c60fae8830c1b3947e4a7bcd14bb2c
|
[
"BSD-3-Clause"
] | 2
|
2021-11-07T10:17:16.000Z
|
2021-11-07T10:17:19.000Z
|
view/gui/flags_box.py
|
galizia-lab/pyview
|
07bef637b0c60fae8830c1b3947e4a7bcd14bb2c
|
[
"BSD-3-Clause"
] | 5
|
2021-11-03T12:43:03.000Z
|
2021-12-16T10:34:52.000Z
|
view/gui/flags_box.py
|
galizia-lab/pyview
|
07bef637b0c60fae8830c1b3947e4a7bcd14bb2c
|
[
"BSD-3-Clause"
] | 1
|
2021-09-23T15:46:26.000Z
|
2021-09-23T15:46:26.000Z
|
from PyQt5.QtWidgets import QTableWidget, QTabWidget, QLineEdit, QMessageBox, QPushButton, QVBoxLayout, QWidget, \
QSizePolicy, QHeaderView, QMenu, QComboBox, QHBoxLayout, QLabel, QWidget
from PyQt5.QtGui import QGuiApplication, QCursor
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject
from .flags_search import get_flags_index, query
from collections import OrderedDict
from html.parser import HTMLParser
import pandas as pd
class ButtonCopyableLabel(QPushButton):
def __init__(self, label):
super().__init__(label)
def contextMenuEvent(self, QContextMenuEvent):
qmenu = QMenu(self)
copy_action = qmenu.addAction("Copy name")
copy_action.triggered.connect(self.copy_name_to_clipboard)
qmenu.exec(QCursor().pos())
def copy_name_to_clipboard(self):
clipboard = QGuiApplication.clipboard()
clipboard.setText(self.text())
class FlagSubgroupPage(QTableWidget):
return_flag_signal = pyqtSignal(str, str, name="return_flag_signal")
def __init__(self, parent, flags_default_values_descriptions_df):
super().__init__(parent=parent)
self.setRowCount(flags_default_values_descriptions_df.shape[0])
self.setColumnCount(2)
self.setHorizontalHeaderLabels(["Flag Name", "Flag Value"])
self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)
self.flag_values_descriptions_df = flags_default_values_descriptions_df.set_index("Flag Name")
self.flag_values_descriptions_df.rename(columns={"Flag Default Value": "Flag Value"}, inplace=True)
for index, (flag_name, flag_value, flags_description, selectable_options_str, flag_value_type) in \
flags_default_values_descriptions_df.iterrows():
if flag_value_type.find("bool") >= 0:
to_update = "True:\nFalse:\n1: same as True\n0: same as False"
if not pd.isnull(selectable_options_str):
to_update = f"{to_update}\n{selectable_options_str}"
selectable_options_str = to_update
self.flag_values_descriptions_df.loc[flag_name, "Selectable Options"] = selectable_options_str
name_button = ButtonCopyableLabel(flag_name)
name_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
name_button.setToolTip("Click here for a description of flag function and possible choices")
name_button.clicked.connect(self.display_description)
self.setCellWidget(index, 0, name_button)
combobox = QComboBox(self)
combobox.setToolTip(
"Click on flag name (button just to the left) "
"for a description of flag function and possible choices")
combobox.setInsertPolicy(QComboBox.InsertAtBottom)
combobox.setAccessibleName(flag_name)
combobox.setEditable(True)
if not pd.isnull(selectable_options_str):
selectable_options_dict = self.parse_selectable_options_str(selectable_options_str)
combobox.addItems(selectable_options_dict.keys())
combobox.lineEdit().editingFinished.connect(self.return_flag)
combobox.currentIndexChanged.connect(self.return_flag)
self.flag_values_descriptions_df.loc[flag_name, "combobox"] = combobox
# set_flag accesses the column "combobox", so it needs to be set beforehand
self.set_flag(flag_name, flag_value)
self.setCellWidget(index, 1, combobox)
self.flag_values_descriptions_df.loc[flag_name, "button"] = name_button
def parse_selectable_options_str(self, selectable_options_str):
selectable_options_dict = {}
for line in selectable_options_str.splitlines():
if ":" in line:
splits = line.split(":")
k, d = splits[0], ":".join(splits[1:])
selectable_options_dict[k.rstrip().lstrip()] = d
return selectable_options_dict
def set_flag(self, flag_name, flag_value):
self.flag_values_descriptions_df.loc[flag_name, "combobox"].setCurrentText(str(flag_value))
self.flag_values_descriptions_df.loc[flag_name, "Flag Value"] = str(flag_value)
def reset_flag(self, flag_name, flag_value):
self.flag_values_descriptions_df.loc[flag_name, "combobox"].setCurrentText(str(flag_value))
def display_description(self):
sender = QObject.sender(self)
flag_name = sender.text()
flag_descr = self.flag_values_descriptions_df.loc[flag_name, "Flag Description"]
flag_selectable_values = self.flag_values_descriptions_df.loc[flag_name, "Selectable Options"]
descr = flag_descr[:] # make a copy
if not pd.isnull(flag_selectable_values):
descr = f"{descr}\n\nValid values:\n\n{flag_selectable_values}"
QMessageBox.information(self, f"Description of flag '{flag_name}'", descr)
def return_flag(self):
sender_le = QObject.sender(self)
if sender_le is not None:
flag_name = sender_le.accessibleName()
if flag_name in self.flag_values_descriptions_df.index.values: # not sure why this check is needed
self.return_flag_signal.emit(flag_name, sender_le.currentText())
def jump_to_flag(self, flag_name):
flag_index = self.flag_values_descriptions_df.index.values.tolist().index(flag_name)
# this programmatic change will otherwise send currentChanged signal
self.blockSignals(True)
self.setCurrentCell(flag_index, 1)
self.blockSignals(False)
class FlagsDisplayChoiceTabs(QTabWidget):
def __init__(self, parent, flags):
super().__init__(parent=parent)
self.subgroup_pages = OrderedDict()
self.setMovable(True)
self.search_widget = FlagsSearchWidget(self, flags)
self.search_widget.raise_jump_to_flag_signal.connect(self.jump_to_flag)
self.addTab(self.search_widget, "Search")
self.flag_name_subgroup_mapping = {}
for subgroup in flags.get_subgroups():
subgroup_flag_def_subset_df = flags.get_subgroup_definition(subgroup)
subgroup_page = FlagSubgroupPage(parent=None,
flags_default_values_descriptions_df=subgroup_flag_def_subset_df
)
self.flag_name_subgroup_mapping.update({flag_name: subgroup
for flag_name in subgroup_flag_def_subset_df["Flag Name"]})
self.subgroup_pages[subgroup] = subgroup_page
widget = QWidget(self)
vbox = QVBoxLayout()
vbox.addWidget(subgroup_page)
widget.setLayout(vbox)
self.addTab(widget, subgroup)
def block_flags_update_signals(self, b):
for subgroup_page in self.subgroup_pages.values():
subgroup_page.blockSignals(b)
def set_flags(self, flags):
for flag_name, flag_value in flags.items():
# this can happen when a request is raised for updating a deprecated or an unknown flag
if flag_name in self.flag_name_subgroup_mapping:
subgroup = self.flag_name_subgroup_mapping[flag_name]
self.subgroup_pages[subgroup].set_flag(flag_name, flag_value)
def reset_flag(self, flag_name, flag_value):
subgroup = self.flag_name_subgroup_mapping[flag_name]
self.subgroup_pages[subgroup].reset_flag(flag_name, flag_value)
@pyqtSlot(str, name="jump to flag")
def jump_to_flag(self, flag_name):
target_subgroup_name = self.flag_name_subgroup_mapping[flag_name]
target_subgroup_page = self.subgroup_pages[target_subgroup_name]
subgroup_index = list(self.subgroup_pages.keys()).index(target_subgroup_name)
# this programmatic change will otherwise send currentChanged signal
self.blockSignals(True)
self.setCurrentIndex(subgroup_index + 1) # index 0 is search page
self.blockSignals(False)
target_subgroup_page.jump_to_flag(flag_name)
class FlagNameParser(HTMLParser):
def __init__(self, line):
super().__init__()
self.flag_name = None
self.feed(line)
def handle_data(self, data):
self.flag_name = data
class FlagsSearchWidget(QWidget):
raise_jump_to_flag_signal = pyqtSignal(str)
def __init__(self, parent, flags):
super().__init__(parent)
self.search_index = get_flags_index(flags)
vbox = QVBoxLayout(self)
self.query_le = QLineEdit()
self.query_le.setPlaceholderText("--- Search for flags here ---")
self.query_le.textEdited.connect(self.query)
vbox.addWidget(self.query_le)
self.search_results_table = QTableWidget(self)
self.search_results_table.setColumnCount(3)
self.search_results_table.setHorizontalHeaderLabels(["Flag Name", "Flag Subgroup", "Flag Description"])
self.search_results_table.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
vbox.addWidget(self.search_results_table)
self.flag_name_push_button_mapping_2way = {}
@pyqtSlot(str, name="query and refresh")
def query(self, text):
self.flag_name_push_button_mapping_2way = {}
highlights = query(index=self.search_index, query_str=text, max_results=20)
self.search_results_table.clearContents()
self.search_results_table.setRowCount(len(highlights))
for ind, highlight in enumerate(highlights):
self.search_results_table.setCellWidget(ind, 0, QLabel(highlight["flag_name"]))
flag_name_parser = FlagNameParser(highlight["flag_name"])
widget = QWidget()
layout = QHBoxLayout(widget)
layout.addWidget(QLabel(highlight["flag_subgroup"]))
push_button = QPushButton("Go to flag")
self.flag_name_push_button_mapping_2way[flag_name_parser.flag_name] = push_button
self.flag_name_push_button_mapping_2way[push_button] = flag_name_parser.flag_name
push_button.clicked.connect(self.raise_jump_to_flag)
layout.addWidget(push_button)
self.search_results_table.setCellWidget(ind, 1, widget)
self.search_results_table.setCellWidget(ind, 2, QLabel(highlight["flag_description"]))
self.search_results_table.resizeColumnsToContents()
self.search_results_table.resizeRowsToContents()
@pyqtSlot(name="raise jump to flag")
def raise_jump_to_flag(self):
sender = QObject.sender(self)
self.raise_jump_to_flag_signal.emit(self.flag_name_push_button_mapping_2way[sender])
| 40.190299
| 114
| 0.68963
|
2adc86cf7198998e26a7c43dc49885820f9d8806
| 1,143
|
py
|
Python
|
main.py
|
Neira1991/opendata
|
950697707ac9f0abf438634101812c4803195885
|
[
"MIT"
] | null | null | null |
main.py
|
Neira1991/opendata
|
950697707ac9f0abf438634101812c4803195885
|
[
"MIT"
] | null | null | null |
main.py
|
Neira1991/opendata
|
950697707ac9f0abf438634101812c4803195885
|
[
"MIT"
] | null | null | null |
import json
import matplotlib.pyplot as plt
from makegraph.generate_graph import GenerateGraph
import networkx as nx
def run():
# Opening JSON file
f = open('data/matches/2068/structured_data.json', 'r')
p = open('data/matches/2068/match_data.json', 'r')
data = json.load(f)
data_players = json.load(p)
f.close()
p.close()
match_iterator = GenerateGraph(data, data_players['players'])
graph_match = None;
for element in match_iterator:
graph_match = element.graph
enlarge = [(u, v) for (u, v, d) in graph_match.edges(data=True) if d["weight"] > 2]
small = [(u, v) for (u, v, d) in graph_match.edges(data=True) if d["weight"] <= 1]
node_pos = nx.circular_layout(graph_match)
nx.draw_networkx_edges(graph_match, node_pos, edgelist=enlarge)
nx.draw_networkx_edges(graph_match, node_pos, edgelist=small, alpha=0.5, edge_color="b", style="dashed")
nx.draw_networkx_labels(graph_match, node_pos, font_size=20, font_family="sans-serif")
ax = plt.gca()
ax.margins(0.08)
plt.axis("off")
plt.tight_layout()
plt.show()
if __name__ == '__main__':
run()
| 31.75
| 108
| 0.67804
|
d92f9f6589f0568a1ea6d3701bf5d8cab246d462
| 5,020
|
py
|
Python
|
venv/lib/python3.8/site-packages/ansible_test/_internal/payload.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_test/_internal/payload.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/ansible_test/_internal/payload.py
|
saeedya/docker-ansible
|
6fb0cfc6bc4a5925b21380952a5a4502ec02119a
|
[
"Apache-2.0"
] | null | null | null |
"""Payload management for sending Ansible files and test content to other systems (VMs, containers)."""
from __future__ import annotations
import atexit
import os
import stat
import tarfile
import tempfile
import time
import typing as t
from .constants import (
ANSIBLE_BIN_SYMLINK_MAP,
)
from .config import (
IntegrationConfig,
ShellConfig,
)
from .util import (
display,
ANSIBLE_SOURCE_ROOT,
remove_tree,
is_subdir,
)
from .data import (
data_context,
)
from .util_common import (
CommonConfig,
)
# improve performance by disabling uid/gid lookups
tarfile.pwd = None
tarfile.grp = None
def create_payload(args, dst_path): # type: (CommonConfig, str) -> None
"""Create a payload for delegation."""
if args.explain:
return
files = list(data_context().ansible_source)
filters = {}
def make_executable(tar_info): # type: (tarfile.TarInfo) -> t.Optional[tarfile.TarInfo]
"""Make the given file executable."""
tar_info.mode |= stat.S_IXUSR | stat.S_IXOTH | stat.S_IXGRP
return tar_info
if not ANSIBLE_SOURCE_ROOT:
# reconstruct the bin directory which is not available when running from an ansible install
files.extend(create_temporary_bin_files(args))
filters.update(dict((os.path.join('ansible', path[3:]), make_executable) for path in ANSIBLE_BIN_SYMLINK_MAP.values() if path.startswith('../')))
if not data_context().content.is_ansible:
# exclude unnecessary files when not testing ansible itself
files = [f for f in files if
is_subdir(f[1], 'bin/') or
is_subdir(f[1], 'lib/ansible/') or
is_subdir(f[1], 'test/lib/ansible_test/')]
if not isinstance(args, (ShellConfig, IntegrationConfig)):
# exclude built-in ansible modules when they are not needed
files = [f for f in files if not is_subdir(f[1], 'lib/ansible/modules/') or f[1] == 'lib/ansible/modules/__init__.py']
collection_layouts = data_context().create_collection_layouts()
content_files = []
extra_files = []
for layout in collection_layouts:
if layout == data_context().content:
# include files from the current collection (layout.collection.directory will be added later)
content_files.extend((os.path.join(layout.root, path), path) for path in data_context().content.all_files())
else:
# include files from each collection in the same collection root as the content being tested
extra_files.extend((os.path.join(layout.root, path), os.path.join(layout.collection.directory, path)) for path in layout.all_files())
else:
# when testing ansible itself the ansible source is the content
content_files = files
# there are no extra files when testing ansible itself
extra_files = []
for callback in data_context().payload_callbacks:
# execute callbacks only on the content paths
# this is done before placing them in the appropriate subdirectory (see below)
callback(content_files)
# place ansible source files under the 'ansible' directory on the delegated host
files = [(src, os.path.join('ansible', dst)) for src, dst in files]
if data_context().content.collection:
# place collection files under the 'ansible_collections/{namespace}/{collection}' directory on the delegated host
files.extend((src, os.path.join(data_context().content.collection.directory, dst)) for src, dst in content_files)
# extra files already have the correct destination path
files.extend(extra_files)
# maintain predictable file order
files = sorted(set(files))
display.info('Creating a payload archive containing %d files...' % len(files), verbosity=1)
start = time.time()
with tarfile.open(dst_path, mode='w:gz', compresslevel=4, format=tarfile.GNU_FORMAT) as tar:
for src, dst in files:
display.info('%s -> %s' % (src, dst), verbosity=4)
tar.add(src, dst, filter=filters.get(dst))
duration = time.time() - start
payload_size_bytes = os.path.getsize(dst_path)
display.info('Created a %d byte payload archive containing %d files in %d seconds.' % (payload_size_bytes, len(files), duration), verbosity=1)
def create_temporary_bin_files(args): # type: (CommonConfig) -> t.Tuple[t.Tuple[str, str], ...]
"""Create a temporary ansible bin directory populated using the symlink map."""
if args.explain:
temp_path = '/tmp/ansible-tmp-bin'
else:
temp_path = tempfile.mkdtemp(prefix='ansible', suffix='bin')
atexit.register(remove_tree, temp_path)
for name, dest in ANSIBLE_BIN_SYMLINK_MAP.items():
path = os.path.join(temp_path, name)
os.symlink(dest, path)
return tuple((os.path.join(temp_path, name), os.path.join('bin', name)) for name in sorted(ANSIBLE_BIN_SYMLINK_MAP))
| 37.744361
| 153
| 0.6749
|
f2bb97f77659dbc5094d9d784a3a609d312cc868
| 299
|
py
|
Python
|
kaybee/plugins/resources/container.py
|
pauleveritt/kaybee
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
[
"Apache-2.0"
] | 2
|
2017-11-08T19:55:57.000Z
|
2018-12-21T12:41:41.000Z
|
kaybee/plugins/resources/container.py
|
pauleveritt/kaybee
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
[
"Apache-2.0"
] | null | null | null |
kaybee/plugins/resources/container.py
|
pauleveritt/kaybee
|
a00a718aaaa23b2d12db30dfacb6b2b6ec84459c
|
[
"Apache-2.0"
] | 1
|
2018-10-13T08:59:29.000Z
|
2018-10-13T08:59:29.000Z
|
"""
A UserDict container for resources.
This allows us to validate schemas on add plus provide some query-like
methods on the app.resources instance
"""
from collections import UserDict
class ResourcesContainer(UserDict):
def __setitem__(self, name, value):
self.data[name] = value
| 19.933333
| 70
| 0.749164
|
40fc786cbdac618c3d699ce1f9c3e970040d58dd
| 31,068
|
py
|
Python
|
thermo/gpumd/data.py
|
hityingph/thermo
|
08faef51ad4333e816cadf838a224773152421fc
|
[
"MIT"
] | 3
|
2022-01-03T06:30:36.000Z
|
2022-01-06T00:36:57.000Z
|
thermo/gpumd/data.py
|
hityingph/thermo
|
08faef51ad4333e816cadf838a224773152421fc
|
[
"MIT"
] | null | null | null |
thermo/gpumd/data.py
|
hityingph/thermo
|
08faef51ad4333e816cadf838a224773152421fc
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os
import copy
import multiprocessing as mp
from functools import partial
from collections import deque
from .common import __get_path, __get_direction, __check_list, __check_range
__author__ = "Alexander Gabourie"
__email__ = "gabourie@stanford.edu"
#########################################
# Helper Functions
#########################################
def __process_sample(nbins, i):
"""
A helper function for the multiprocessing of kappamode.out files
Args:
nbins (int):
Number of bins used in the GPUMD simulation
i (int):
The current sample from a run to analyze
Returns:
np.ndarray: A 2D array of each bin and output for a sample
"""
out = list()
for j in range(nbins):
out += [float(x) for x in malines[j + i * nbins].split()]
return np.array(out).reshape((nbins,5))
def tail(f, nlines, BLOCK_SIZE=32768):
"""
Reads the last nlines of a file.
Args:
f (filehandle):
File handle of file to be read
nlines (int):
Number of lines to be read from end of file
BLOCK_SIZE (int):
Size of block (in bytes) to be read per read operation.
Performance depend on this parameter and file size.
Returns:
list: List of ordered final nlines of file
Additional Information:
Since GPUMD output files are mostly append-only, this becomes
useful when a simulation prematurely ends (i.e. cluster preempts
run, but simulation restarts elsewhere). In this case, it is not
necessary to clean the directory before re-running. File outputs
will be too long (so there still is a storage concern), but the
proper data can be extracted from the end of file.
This may also be useful if you want to only grab data from the
final m number of runs of the simulation
"""
# BLOCK_SIZE is in bytes (must decode to string)
f.seek(0, 2)
bytes_remaining = f.tell()
idx = -BLOCK_SIZE
blocks = list()
# Make no assumptions about line length
lines_left = nlines
eof = False
first = True
num_lines = 0
# BLOCK_size is smaller than file
if BLOCK_SIZE <= bytes_remaining:
while lines_left > 0 and not eof:
if bytes_remaining > BLOCK_SIZE:
f.seek(idx, 2)
blocks.append(f.read(BLOCK_SIZE))
else: # if reached end of file
f.seek(0, 0)
blocks.append(f.read(bytes_remaining))
eof = True
idx -= BLOCK_SIZE
bytes_remaining -= BLOCK_SIZE
num_lines = blocks[-1].count(b'\n')
if first:
lines_left -= num_lines - 1
first = False
else:
lines_left -= num_lines
# since whitespace removed from eof, must compare to 1 here
if eof and lines_left > 1:
raise ValueError("More lines requested than exist.")
# Corrects for reading too many lines with large buffer
if bytes_remaining > 0:
skip = 1 + abs(lines_left)
blocks[-1] = blocks[-1].split(b'\n', skip)[skip]
text = b''.join(reversed(blocks)).strip()
else: # BLOCK_SIZE is bigger than file
f.seek(0, 0)
block = f.read()
num_lines = block.count(b'\n')
if num_lines < nlines:
raise ValueError("More lines requested than exist.")
skip = num_lines - nlines
text = block.split(b'\n', skip)[skip].strip()
return text.split(b'\n')
def __modal_analysis_read(nbins, nsamples, datapath,
ndiv, multiprocessing, ncore, block_size):
global malines
# Get full set of results
datalines = nbins * nsamples
with open(datapath, 'rb') as f:
if multiprocessing:
malines = tail(f, datalines, BLOCK_SIZE=block_size)
else:
malines = deque(tail(f, datalines, BLOCK_SIZE=block_size))
if multiprocessing: # TODO Improve memory efficiency of multiprocessing
if not ncore:
ncore = mp.cpu_count()
func = partial(__process_sample, nbins)
pool = mp.Pool(ncore)
data = np.array(pool.map(func, range(nsamples)), dtype='float32').transpose((1, 0, 2))
pool.close()
else: # Faster if single thread
data = np.zeros((nbins, nsamples, 5), dtype='float32')
for j in range(nsamples):
for i in range(nbins):
measurements = malines.popleft().split()
data[i, j, 0] = float(measurements[0])
data[i, j, 1] = float(measurements[1])
data[i, j, 2] = float(measurements[2])
data[i, j, 3] = float(measurements[3])
data[i, j, 4] = float(measurements[4])
del malines
if ndiv:
nbins = int(np.ceil(data.shape[0] / ndiv)) # overwrite nbins
npad = nbins * ndiv - data.shape[0]
data = np.pad(data, [(0, npad), (0, 0), (0, 0)])
data = np.sum(data.reshape((-1, ndiv, data.shape[1], data.shape[2])), axis=1)
return data
def __basic_reader(points, data, labels):
start = 0
out = dict()
for i, npoints in enumerate(points):
end = start + npoints
run = dict()
for j, key in enumerate(labels):
run[key] = data[j][start:end].to_numpy(dtype='float')
start = end
out['run{}'.format(i)] = run
return out
def __basic_frame_loader(n, directory, filename):
path = __get_path(directory, filename)
data = pd.read_csv(path, delim_whitespace=True, header=None).to_numpy(dtype='float')
if not (data.shape[0] / n).is_integer():
raise ValueError("An integer number of frames cannot be created. Please check n.")
return data.reshape(-1, n, 3)
#########################################
# Data-loading Related
#########################################
def load_omega2(directory=None, filename='omega2.out'):
"""
Loads data from omega2.out GPUMD output file.\n
Args:
directory (str):
Directory to load force file from
filename (str):
Name of force data file
Returns:
Numpy array of shape (N_kpoints,3*N_basis) in units of THz. N_kpoints is number of k points in kpoint.in and
N_basis is the number of basis atoms defined in basis.in
"""
path = __get_path(directory, filename)
data = pd.read_csv(path, delim_whitespace=True, header=None).to_numpy(dtype='float')
data = np.sqrt(data)/(2*np.pi)
return data
def load_force(n, directory=None, filename='force.out'):
"""
Loads data from force.out GPUMD output file.\n
Currently supports loading a single run.
Args:
n (int):
Number of atoms force is output for
directory (str):
Directory to load force file from
filename (str):
Name of force data file
Returns:
Numpy array of shape (-1,n,3) containing all forces (ev/A) from filename
"""
return __basic_frame_loader(n, directory, filename)
def load_velocity(n, directory=None, filename='velocity.out'):
"""
Loads data from velocity.out GPUMD output file.\n
Currently supports loading a single run.
Args:
n (int):
Number of atoms velocity is output for
directory (str):
Directory to load velocity file from
filename (str):
Name of velocity data file
Returns:
Numpy array of shape (-1,n,3) containing all forces (A/ps) from filename
"""
return __basic_frame_loader(n, directory, filename)
def load_compute(quantities=None, directory=None, filename='compute.out'):
"""
Loads data from compute.out GPUMD output file.\n
Currently supports loading a single run.
Args:
quantities (str or list(str)):
Quantities to extract from compute.out Accepted quantities are:\n
['T', 'U', 'F', 'W', 'jp', 'jk']. \n
Other quantity will be ignored.\n
T=temperature, U=potential, F=force, W=virial, jp=heat current (potential), jk=heat current (kinetic)
directory (str):
Directory to load compute file from
filename (str):
file to load compute from
Returns:
Dictionary containing the data from compute.out
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,T,U,F,W,jp,jk,Ein,Eout
**units**,K,eV,|c1|,eV,|c2|,|c2|,eV,eV
.. |c1| replace:: eVA\ :sup:`-1`
.. |c2| replace:: eV\ :sup:`3/2` amu\ :sup:`-1/2`
"""
# TODO Add input checking
if not quantities:
return None
compute_path = __get_path(directory, filename)
data = pd.read_csv(compute_path, delim_whitespace=True, header=None)
num_col = len(data.columns)
q_count = {'T': 1, 'U': 1, 'F': 3, 'W': 3, 'jp': 3, 'jk': 3}
out = dict()
count = 0
for value in quantities:
count += q_count[value]
m = int(num_col / count)
if 'T' in quantities:
m = int((num_col - 2) / count)
out['Ein'] = np.array(data.iloc[:, -2])
out['Eout'] = np.array(data.iloc[:, -1])
out['m'] = m
start = 0
for quantity in q_count.keys():
if quantity in quantities:
end = start + q_count[quantity]*m
out[quantity] = data.iloc[:, start:end].to_numpy(dtype='float')
start = end
return out
def load_thermo(directory=None, filename='thermo.out'):
"""
Loads data from thermo.out GPUMD output file.
Args:
directory (str):
Directory to load thermal data file from
filename (str):
Name of thermal data file
Returns:
'output' dictionary containing the data from thermo.out
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,T,K,U,Px,Py,Pz,Lx,Ly,Lz,ax,ay,az,bx,by,bz,cx,cy,cz
**units**,K,eV,eV,GPa,GPa,GPa,A,A,A,A,A,A,A,A,A,A,A,A
"""
thermo_path = __get_path(directory, filename)
data = pd.read_csv(thermo_path, delim_whitespace=True, header=None)
labels = ['T', 'K', 'U', 'Px', 'Py', 'Pz', 'Pyz', 'Pxz', 'Pxy']
# Orthogonal
if data.shape[1] == 12:
labels += ['Lx', 'Ly', 'Lz']
elif data.shape[1] == 18:
labels += ['ax', 'ay', 'az', 'bx', 'by', 'bz', 'cx', 'cy', 'cz']
out = dict()
for i in range(data.shape[1]):
out[labels[i]] = data[i].to_numpy(dtype='float')
return out
def load_heatmode(nbins, nsamples, directory=None,
inputfile='heatmode.out', directions='xyz',
outputfile='heatmode.npy', ndiv=None, save=False,
multiprocessing=False, ncore=None, block_size=65536, return_data=True):
"""
Loads data from heatmode.out GPUMD file. Option to save as binary file for fast re-load later.
WARNING: If using multiprocessing, memory usage may be significantly larger than file size
Args:
nbins (int):
Number of bins used during the GPUMD simulation
nsamples (int):
Number of times heat flux was sampled with GKMA during GPUMD simulation
directory (str):
Name of directory storing the input file to read
inputfile (str):
Modal heat flux file output by GPUMD
directions (str):
Directions to gather data from. Any order of 'xyz' is accepted. Excluding directions also allowed (i.e. 'xz'
is accepted)
outputfile (str):
File name to save read data to. Output file is a binary dictionary. Loading from a binary file is much
faster than re-reading data files and saving is recommended
ndiv (int):
Integer used to shrink number of bins output. If originally have 10 bins, but want 5, ndiv=2. nbins/ndiv
need not be an integer
save (bool):
Toggle saving data to binary dictionary. Loading from save file is much faster and recommended
multiprocessing (bool):
Toggle using multi-core processing for conversion of text file
ncore (bool):
Number of cores to use for multiprocessing. Ignored if multiprocessing is False
block_size (int):
Size of block (in bytes) to be read per read operation. File reading performance depend on this parameter
and file size
return_data (bool):
Toggle returning the loaded modal heat flux data. If this is False, the user should ensure that
save is True
Returns:
dict: Dictionary with all modal heat fluxes requested
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,nbins, nsamples, jmxi, jmxo, jmyi, jmyo, jmz
**units**,N/A, N/A,|jm1|,|jm1|,|jm1|,|jm1|,|jm1|
.. |jm1| replace:: eV\ :sup:`3/2` amu\ :sup:`-1/2` *x*\ :sup:`-1`
Here *x* is the size of the bins in THz. For example, if there are 4 bins per THz, *x* = 0.25 THz.
"""
jm_path = __get_path(directory, inputfile)
out_path = __get_path(directory, outputfile)
data = __modal_analysis_read(nbins, nsamples, jm_path, ndiv, multiprocessing, ncore, block_size)
out = dict()
directions = __get_direction(directions)
if 'x' in directions:
out['jmxi'] = data[:, :, 0]
out['jmxo'] = data[:, :, 1]
if 'y' in directions:
out['jmyi'] = data[:, :, 2]
out['jmyo'] = data[:, :, 3]
if 'z' in directions:
out['jmz'] = data[:, :, 4]
out['nbins'] = nbins
out['nsamples'] = nsamples
if save:
np.save(out_path, out)
if return_data:
return out
return
def load_kappamode(nbins, nsamples, directory=None,
inputfile='kappamode.out', directions='xyz',
outputfile='kappamode.npy', ndiv=None, save=False,
multiprocessing=False, ncore=None, block_size=65536, return_data=True):
"""
Loads data from kappamode.out GPUMD file. Option to save as binary file for fast re-load later.
WARNING: If using multiprocessing, memory useage may be significantly larger than file size
Args:
nbins (int):
Number of bins used during the GPUMD simulation
nsamples (int):
Number of times thermal conductivity was sampled with HNEMA during GPUMD simulation
directory (str):
Name of directory storing the input file to read
inputfile (str):
Modal thermal conductivity file output by GPUMD
directions (str):
Directions to gather data from. Any order of 'xyz' is accepted. Excluding directions also allowed (i.e. 'xz'
is accepted)
outputfile (str):
File name to save read data to. Output file is a binary dictionary. Loading from a binary file is much
faster than re-reading data files and saving is recommended
ndiv (int):
Integer used to shrink number of bins output. If originally have 10 bins, but want 5, ndiv=2. nbins/ndiv
need not be an integer
save (bool):
Toggle saving data to binary dictionary. Loading from save file is much faster and recommended
multiprocessing (bool):
Toggle using multi-core processing for conversion of text file
ncore (bool):
Number of cores to use for multiprocessing. Ignored if multiprocessing is False
block_size (int):
Size of block (in bytes) to be read per read operation. File reading performance depend on this parameter
and file size
return_data (bool):
Toggle returning the loaded modal thermal conductivity data. If this is False, the user should ensure that
save is True
Returns:
dict: Dictionary with all modal thermal conductivities requested
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,nbins,nsamples,kmxi,kmxo,kmyi,kmyo,kmz
**units**,N/A,N/A,|hn1|,|hn1|,|hn1|,|hn1|,|hn1|
.. |hn1| replace:: Wm\ :sup:`-1` K\ :sup:`-1` *x*\ :sup:`-1`
Here *x* is the size of the bins in THz. For example, if there are 4 bins per THz, *x* = 0.25 THz.
"""
km_path = __get_path(directory, inputfile)
out_path = __get_path(directory, outputfile)
data = __modal_analysis_read(nbins, nsamples, km_path, ndiv, multiprocessing, ncore, block_size)
out = dict()
directions = __get_direction(directions)
if 'x' in directions:
out['kmxi'] = data[:, :, 0]
out['kmxo'] = data[:, :, 1]
if 'y' in directions:
out['kmyi'] = data[:, :, 2]
out['kmyo'] = data[:, :, 3]
if 'z' in directions:
out['kmz'] = data[:, :, 4]
out['nbins'] = nbins
out['nsamples'] = nsamples
if save:
np.save(out_path, out)
if return_data:
return out
return
def load_saved_kappamode(filename='kappamode.npy', directory=None):
"""
Loads data saved by the 'load_kappamode' function and returns the original dictionary.
Args:
filename (str):
Name of the file to load
directory (str):
Directory the data file is located in
Returns:
dict: Dictionary with all modal thermal conductivities previously requested
"""
path = __get_path(directory, filename)
return np.load(path, allow_pickle=True).item()
def load_saved_heatmode(filename='heatmode.npy', directory=None):
"""
Loads data saved by the 'load_heatmode' or 'get_gkma_kappa' function and returns the original dictionary.
Args:
filename (str):
Name of the file to load
directory (str):
Directory the data file is located in
Returns:
dict: Dictionary with all modal heat flux previously requested
"""
path = __get_path(directory, filename)
return np.load(path, allow_pickle=True).item()
def load_sdc(Nc, directory=None, filename='sdc.out'):
"""
Loads data from sdc.out GPUMD output file.
Args:
Nc (int or list(int)):
Number of time correlation points the VAC/SDC is computed for
directory (str):
Directory to load 'sdc.out' file from (dir. of simulation)
filename (str):
File to load SDC from
Returns:
dict(dict):
Dictonary with SDC/VAC data. The outermost dictionary stores each individual run
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,t,VACx,VACy,VACz,SDCx,SDCy,SDCz
**units**,ps,|sd1|,|sd1|,|sd1|,|sd2|,|sd2|,|sd2|
.. |sd1| replace:: A\ :sup:`2` ps\ :sup:`-2`
.. |sd2| replace:: A\ :sup:`2` ps\ :sup:`-1`
"""
Nc = __check_list(Nc, varname='Nc', dtype=int)
sdc_path = __get_path(directory, filename)
data = pd.read_csv(sdc_path, delim_whitespace=True, header=None)
__check_range(Nc, data.shape[0])
labels = ['t', 'VACx', 'VACy', 'VACz', 'SDCx', 'SDCy', 'SDCz']
return __basic_reader(Nc, data, labels)
def load_vac(Nc, directory=None, filename='mvac.out'):
"""
Loads data from mvac.out GPUMD output file.
Args:
Nc (int or list(int)):
Number of time correlation points the VAC is computed for
directory (str):
Directory to load 'mvac.out' file from
filename (str):
File to load VAC from
Returns:
dict(dict):
Dictonary with VAC data. The outermost dictionary stores each individual run
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,t,VACx,VACy,VACz
**units**,ps,|v1|,|v1|,|v1|
.. |v1| replace:: A\ :sup:`2` ps\ :sup:`-2`
"""
Nc = __check_list(Nc, varname='Nc', dtype=int)
sdc_path = __get_path(directory, filename)
data = pd.read_csv(sdc_path, delim_whitespace=True, header=None)
__check_range(Nc, data.shape[0])
labels = ['t', 'VACx', 'VACy', 'VACz']
return __basic_reader(Nc, data, labels)
def load_dos(num_dos_points, directory=None, filename='dos.out'):
"""
Loads data from dos.out GPUMD output file.
Args:
num_dos_points (int or list(int)):
Number of frequency points the DOS is computed for.
directory (str):
Directory to load 'dos.out' file from (dir. of simulation)
filename (str):
File to load DOS from.
Returns:
dict(dict)): Dictonary with DOS data. The outermost dictionary stores
each individual run.
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,nu,DOSx,DOSy,DOSz
**units**,THz,|d1|,|d1|,|d1|
.. |d1| replace:: THz\ :sup:`-1`
"""
num_dos_points = __check_list(num_dos_points, varname='num_dos_points', dtype=int)
dos_path = __get_path(directory, filename)
data = pd.read_csv(dos_path, delim_whitespace=True, header=None)
__check_range(num_dos_points, data.shape[0])
labels = ['nu', 'DOSx', 'DOSy', 'DOSz']
out = __basic_reader(num_dos_points, data, labels)
for key in out.keys():
out[key]['nu'] /= (2 * np.pi)
return out
def load_shc(Nc, num_omega, directory=None, filename='shc.out'):
"""
Loads the data from shc.out GPUMD output file.
Args:
Nc (int or list(int)):
Maximum number of correlation steps. If multiple shc runs, can provide a list of Nc.
num_omega (int or list(int)):
Number of frequency points. If multiple shc runs, can provide a list of num_omega.
directory (str):
Directory to load 'shc.out' file from (dir. of simulation)
filename (str):
File to load SHC from.
Returns:
dict: Dictionary of in- and out-of-plane shc results (average)
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,t, Ki, Ko, nu, jwi, jwo
**units**,ps, |sh1|,|sh1|, THz, |sh2|, |sh2|
.. |sh1| replace:: A eV ps\ :sup:`-1`
.. |sh2| replace:: A eV ps\ :sup:`-1` THz\ :sup:`-1`
"""
Nc = __check_list(Nc, varname='Nc', dtype=int)
num_omega = __check_list(num_omega, varname='num_omega', dtype=int)
if not len(Nc) == len(num_omega):
raise ValueError('Nc and num_omega must be the same length.')
shc_path = __get_path(directory, filename)
data = pd.read_csv(shc_path, delim_whitespace=True, header=None)
__check_range(np.array(Nc) * 2 - 1 + np.array(num_omega), data.shape[0])
if not all([i>0 for i in Nc]) or not all([i>0 for i in num_omega]):
raise ValueError('Only strictly positive numbers are allowed.')
labels_corr = ['t', 'Ki', 'Ko']
labels_omega = ['nu', 'jwi', 'jwo']
out = dict()
start = 0
for i, varlen in enumerate(zip(Nc, num_omega)):
run = dict()
Nc_i = varlen[0] * 2 - 1
num_omega_i = varlen[1]
end = start + Nc_i
for j, key in enumerate(labels_corr):
run[key] = data[j][start:end].to_numpy(dtype='float')
start = end
end += num_omega_i
for j, key in enumerate(labels_omega):
run[key] = data[j][start:end].to_numpy(dtype='float')
run['nu'] /= (2 * np.pi)
start = end
out['run{}'.format(i)] = run
return out
def load_kappa(directory=None, filename='kappa.out'):
"""
Loads data from kappa.out GPUMD output file which contains HNEMD kappa.
Args:
directory (str):
Directory containing kappa data file
filename (str):
The kappa data file
Returns:
dict: A dictionary with keys corresponding to the columns in 'kappa.out'
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,kxi, kxo, kyi, kyo, kz
**units**,|k1|,|k1|,|k1|,|k1|,|k1|
.. |k1| replace:: Wm\ :sup:`-1` K\ :sup:`-1`
"""
kappa_path = __get_path(directory, filename)
data = pd.read_csv(kappa_path, delim_whitespace=True, header=None)
labels = ['kxi', 'kxo', 'kyi', 'kyo', 'kz']
out = dict()
for i, key in enumerate(labels):
out[key] = data[i].to_numpy(dtype='float')
return out
def load_hac(Nc, output_interval, directory=None,filename='hac.out'):
"""
Loads data from hac.out GPUMD output file.
Args:
Nc (int or list(int)):
Number of correlation steps
output_interval (int or list(int)):
Output interval for HAC and RTC data
directory (str):
Directory containing hac data file
filename (str):
The hac data file
Returns:
dict: A dictionary containing the data from hac runs
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,t, kxi, kxo, kyi, kyo, kz, jxijx, jxojx, jyijy, jyojy, jzjz
**units**,ps,|h1|,|h1|,|h1|,|h1|,|h1|,|h2|,|h2|,|h2|,|h2|,|h2|
.. |h1| replace:: Wm\ :sup:`-1` K\ :sup:`-1`
.. |h2| replace:: eV\ :sup:`3` amu\ :sup:`-1`
"""
Nc = __check_list(Nc, varname='Nc', dtype=int)
output_interval = __check_list(output_interval, varname='output_interval', dtype=int)
if not len(Nc) == len(output_interval):
raise ValueError('Nc and output_interval must be the same length.')
npoints = [int(x / y) for x, y in zip(Nc, output_interval)]
hac_path = __get_path(directory, filename)
data = pd.read_csv(hac_path, delim_whitespace=True, header=None)
__check_range(npoints, data.shape[0])
labels = ['t', 'jxijx', 'jxojx', 'jyijy', 'jyojy', 'jzjz',
'kxi', 'kxo', 'kyi', 'kyo', 'kz']
start = 0
out = dict()
for i, varlen in enumerate(npoints):
end = start + varlen
run = dict()
for j, key in enumerate(labels):
run[key] = data[j][start:end].to_numpy(dtype='float')
start = end
out['run{}'.format(i)] = run
return out
def get_frequency_info(bin_f_size, eigfile='eigenvector.out', directory=None):
"""
Gathers eigen-frequency information from the eigenvector file and sorts
it appropriately based on the selected frequency bins (identical to
internal GPUMD representation).
Args:
bin_f_size (float):
The frequency-based bin size (in THz)
eigfile (str):
The filename of the eigenvector output/input file created by GPUMD
phonon package
directory (str):
Directory eigfile is stored
Returns:
dict: Dictionary with the system eigen-freqeuency information along
with binning information
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,fq,fmax,fmin,shift,nbins,bin_count,bin_f_size
**units**,THz,THz,THz,N/A,N/A,N/A,THz
"""
if not directory:
eigpath = os.path.join(os.getcwd(), eigfile)
else:
eigpath = os.path.join(directory, eigfile)
with open(eigpath, 'r') as f:
om2 = [float(x) for x in f.readline().split()]
epsilon = 1.e-6 # tolerance for float errors
fq = np.sign(om2) * np.sqrt(abs(np.array(om2))) / (2 * np.pi)
fmax = (np.floor(np.abs(fq[-1]) / bin_f_size) + 1) * bin_f_size
fmin = np.floor(np.abs(fq[0]) / bin_f_size) * bin_f_size
shift = int(np.floor(np.abs(fmin) / bin_f_size + epsilon))
nbins = int(np.floor((fmax - fmin) / bin_f_size + epsilon))
bin_count = np.zeros(nbins)
for freq in fq:
bin_count[int(np.floor(np.abs(freq) / bin_f_size) - shift)] += 1
return {'fq': fq, 'fmax': fmax, 'fmin': fmin, 'shift': shift,
'nbins': nbins, 'bin_count': bin_count, 'bin_f_size': bin_f_size}
def reduce_frequency_info(freq, ndiv=1):
"""
Recalculates frequency binning information based on how many times larger bins are wanted.
Args:
freq (dict): Dictionary with frequency binning information from the get_frequency_info function output
ndiv (int):
Integer used to shrink number of bins output. If originally have 10 bins, but want 5, ndiv=2. nbins/ndiv
need not be an integer
Returns:
dict: Dictionary with the system eigen freqeuency information along with binning information
"""
epsilon = 1.e-6 # tolerance for float errors
freq = copy.deepcopy(freq)
freq['bin_f_size'] = freq['bin_f_size'] * ndiv
freq['fmax'] = (np.floor(np.abs(freq['fq'][-1]) / freq['bin_f_size']) + 1) * freq['bin_f_size']
nbins_new = int(np.ceil(freq['nbins'] / ndiv - epsilon))
npad = nbins_new * ndiv - freq['nbins']
freq['nbins'] = nbins_new
freq['bin_count'] = np.pad(freq['bin_count'], [(0, npad)])
freq['bin_count'] = np.sum(freq['bin_count'].reshape(-1, ndiv), axis=1)
freq['ndiv'] = ndiv
return freq
def load_loss(directory=None, filename='loss.out'):
"""
Loads the loss data from loss.out of NEP output file.
Args:
directory (str):
Directory to load thermal data file from
filename (str):
Name of thermal data file
Returns:
'output' dictionary containing the data from thermo.out
.. csv-table:: Output dictionary
:stub-columns: 1
**key**,Step,Total,L1,L2,E_train,F_train,V_train,E_test,F_test,V_test
**units**,1, 1, 1, 1,eV/atom, eV/A,eV/atom,eV/atom, eV/A,eV/atom
"""
loss_path = __get_path(directory, filename)
data = pd.read_csv(loss_path, delim_whitespace=True, header=None)
labels = ['Step', 'Total', 'L1', 'L2',
'E_train', 'F_train', 'V_train',
'E_test', 'F_test', 'V_test']
out = dict()
for i in range(data.shape[1]):
out[labels[i]] = data[i].to_numpy(dtype='float')
return out
def load_train(directory=None, filename=None):
"""
Loads data from NEP output file including the energy, force,
and virial out file of train and test dataset.
Args:
directory (str):
Directory to load thermal data file from
filename (str):
Name of thermal data file
Returns:
'output' dictionary containing the data from thermo.out
.. csv-table:: Output dictionary
:stub-columns: 1
**key**, Energy, Force, Virial
**units**,eV/atom, eV/A, eV/atom
"""
train_path = __get_path(directory, filename)
data = pd.read_csv(train_path, delim_whitespace=True, header=None)
if filename in ("energy_train.out", "energy_test.out"):
labels = ['E_nep', 'E_reference']
elif filename in ("virial_train.out", "virial_test.out"):
labels = ['V_nep', 'V_reference']
elif filename in ("force_train.out", "force_test.out"):
labels = ['F_nep_x', 'F_nep_y', 'F_nep_z',
'F_ref_x', 'F_ref_y', 'F_ref_z',]
else:
raise ValueError("The out filename is not correct, please check it again !")
out = dict()
for i in range(data.shape[1]):
out[labels[i]] = data[i].to_numpy(dtype='float')
return out
| 32.028866
| 120
| 0.601262
|
f1170f1f784e4369b6867e5901bb595c35bcb6a7
| 4,950
|
py
|
Python
|
schema.py
|
ines/spacy-graphql
|
a71b51c668fa434a645fdbed90bf538e59f3cb4a
|
[
"MIT"
] | 87
|
2018-08-01T15:24:47.000Z
|
2022-02-01T18:10:35.000Z
|
schema.py
|
ines/spacy-graphql
|
a71b51c668fa434a645fdbed90bf538e59f3cb4a
|
[
"MIT"
] | null | null | null |
schema.py
|
ines/spacy-graphql
|
a71b51c668fa434a645fdbed90bf538e59f3cb4a
|
[
"MIT"
] | 10
|
2018-08-01T23:33:27.000Z
|
2020-01-07T23:15:15.000Z
|
from graphene import ObjectType, Field, List, String, Boolean, Int, Float
class Token(ObjectType):
"""An individual token — a word, punctuation symbol, whitespace, etc."""
text = String(description="Verbatim text")
text_with_ws = String(description="Text with trailing space, if present")
orth = Int(description="ID of the verbatim text content")
i = Int(description="Index of the token within the parent Doc")
idx = Int(description="Character offset of the token within parent Doc")
head_i = Int(description="Index of the token's head")
lower = Int(description="Lowercase form")
lower_ = String(description="Lowercase form")
shape = Int(description="Transform of token text, to show orthographic features")
shape_ = String(description="Transform of token text, to show orthographic features")
lemma = Int(description="Base form of the token")
lemma_ = String(description="Base form of the token")
norm = Int(description="Normalized form of the token")
norm_ = String(description="Normalized form of the token")
pos = Int(description="Coarse-grained part-of-speech tag")
pos_ = String(description="Coarse-grained part-of-speech tag")
tag = Int(description="Fine-grained part-of-speech tag")
tag_ = String(description="Fine-grained part-of-speech tag")
dep = Int(description="Dependency label")
dep_ = String(description="Dependency label")
ent_type = Int(description="Named entity type")
ent_type_ = String(description="Named entity type")
ent_iob = Int(description="IOB code of named entity tag")
ent_iob_ = String(description="IOB code of named entity tag")
is_alpha = Boolean(description="Does the token consist of alphabetic characters?")
is_ascii = Boolean(description="Does the token consist of ASCII characters?")
is_digit = Boolean(description="Does the token consist of digits?")
is_lower = Boolean(description="Is the token lowercase?")
is_upper = Boolean(description="Is the token uppercase?")
is_title = Boolean(description="Is the token titlecase?")
is_punct = Boolean(description="Is the token punctuation?")
is_left_punct = Boolean(description="Is the token left punctuation?")
is_right_punct = Boolean(description="Is the token right punctuation?")
is_space = Boolean(description="Does the token consist of whitespace characters?")
is_bracket = Boolean(description="Is the token a bracket?")
is_quote = Boolean(description="Is the token a quotation mark?")
is_stop = Boolean(description="Is the token a stop word?")
like_num = Boolean(description="Does the token resemble a number?")
like_url = Boolean(description="Does the token resemble a URL?")
like_email = Boolean(description="Does the token resemble an email address?")
class Span(ObjectType):
"""A slice from a Doc object"""
text = String(description="Verbatim text")
text_with_ws = String(description="Text with trailing space, if present")
start = Int(description="The token offset for the start of the span")
end = Int(description="The token offset for the end of the span")
start_char = Int(description="The character offset for the start of the span")
end_char = Int(description="The character offset for the end of the span.")
label = Int(description="The span's label")
label_ = String(description="The span's label")
class Cat(ObjectType):
"""A text category predicted by the text classifier"""
label = String(description="The name of the category")
score = Float(description="The score predicted for the category")
class Doc(ObjectType):
"""A sequence of Token objects and a container for accessing linguistic
annotations."""
text = String(description="Verbatim text")
text_with_ws = String(description="Text with trailing space, if present")
tokens = List(Token, description="The tokens in the document")
ents = List(Span, description="The named entities in the document")
sents = List(Span, description="The sentences in the document")
cats = List(Cat, description="The text classification categories, if available")
class Meta(ObjectType):
"""The current model's meta information."""
lang = String(description="Model language")
name = String(description="Model name")
license = String(description="Model license")
author = String(description="Model author")
url = String(description="Model author URL")
email = String(description="Model author email")
description = String(description="Model description")
pipeline = List(String, description="Names of model pipeline components")
sources = List(String, description="Training data sources")
class NLP(ObjectType):
"""Container for processing results and meta information."""
doc = Field(Doc, description="The processed document")
meta = Field(Meta, description="The current model's meta information")
| 49.5
| 89
| 0.724242
|
6be0d720a8cb7e0f129f6c27a1253c2ba04603a8
| 914
|
py
|
Python
|
DJCore.py
|
ammasyaa/bot-presence
|
1111f2159973d0abd3eec1862728a6a898ab0a1b
|
[
"MIT"
] | null | null | null |
DJCore.py
|
ammasyaa/bot-presence
|
1111f2159973d0abd3eec1862728a6a898ab0a1b
|
[
"MIT"
] | null | null | null |
DJCore.py
|
ammasyaa/bot-presence
|
1111f2159973d0abd3eec1862728a6a898ab0a1b
|
[
"MIT"
] | null | null | null |
import discord
import os
from discord.ext import commands
bot = commands.AutoShardedBot(case_insensitive=True, command_prefix=commands.when_mentioned_or(os.getenv('PREFIX')))
bot.remove_command('help')
bot.initials = ('modules.misc', 'modules.music', 'modules.handler', 'modules.owner')
bot.owner = int(os.getenv('OWNER'))
bot.color = int(os.getenv('COLOR'), 16)
@bot.check
async def _bot_protection(ctx):
return not ctx.author.bot
@bot.event
async def on_ready():
print(f'Bot is ready! Logged as in: {bot.user}')
await bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.playing, name=f"Red Dead Online"))
if __name__ == "__main__":
for extension in bot.initials:
try:
bot.load_extension(extension)
except Exception as e:
print(f'Failed to load extension {extension}: {e}')
bot.run(os.getenv('TOKEN'))
| 32.642857
| 146
| 0.71663
|
932eeb4269b05020b8251453bfc5a400c85cd3e1
| 33,612
|
py
|
Python
|
reviewboard/diffviewer/parser.py
|
b1pb1p/reviewboard
|
b13aca3b88bc16d3c4258adce5df79cd1da577d3
|
[
"MIT"
] | null | null | null |
reviewboard/diffviewer/parser.py
|
b1pb1p/reviewboard
|
b13aca3b88bc16d3c4258adce5df79cd1da577d3
|
[
"MIT"
] | null | null | null |
reviewboard/diffviewer/parser.py
|
b1pb1p/reviewboard
|
b13aca3b88bc16d3c4258adce5df79cd1da577d3
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
import io
import logging
import re
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.translation import ugettext as _
from djblets.util.properties import AliasProperty, TypedProperty
from reviewboard.deprecation import RemovedInReviewBoard50Warning
from reviewboard.diffviewer.errors import DiffParserError
from reviewboard.scmtools.core import Revision
logger = logging.getLogger(__name__)
class ParsedDiffFile(object):
"""A parsed file from a diff.
This stores information on a single file represented in a diff, including
the contents of that file's diff, as parsed by :py:class:`DiffParser` or
one of its subclasses.
Parsers should set the attributes on this based on the contents of the
diff, and should add any data found in the diff.
This class is meant to be used internally and by subclasses of
:py:class:`DiffParser`.
Attributes:
binary (bool);
Whether this represents a binary file.
copied (bool):
Whether this represents a file that has been copied. The file
may or may not be modified in the process.
deleted (bool):
Whether this represents a file that has been deleted.
delete_count (int):
The number of delete (``-``) lines found in the file.
insert_count (int):
The number of insert (``+``) lines found in the file.
is_symlink (bool):
Whether this represents a file that is a symbolic link to another
file.
moved (bool):
Whether this represents a file that has been moved/renamed. The
file may or may not be modified in the process.
parser (DiffParser):
The diff parser that parsed this file.
skip (bool):
Whether this file should be skipped by the parser. If any of the
parser methods set this, the file will stop parsing and will be
excluded from results.
"""
#: The parsed original name of the file.
#:
#: Type:
#: bytes
orig_filename = TypedProperty(bytes)
#: The parsed file details of the original file.
#:
#: This will usually be a revision.
#:
#: Type:
#: bytes or reviewboard.scmtools.core.Revision
orig_file_details = TypedProperty((bytes, Revision))
#: The parsed modified name of the file.
#:
#: This may be the same as :py:attr:`orig_filename`.
#:
#: Type:
#: bytes
modified_filename = TypedProperty(bytes)
#: The parsed file details of the modified file.
#:
#: This will usually be a revision.
#:
#: Type:
#: bytes or reviewboard.scmtools.core.Revision
modified_file_details = TypedProperty((bytes, Revision))
#: The parsed value for an Index header.
#:
#: If present in the diff, this usually contains a filename, but may
#: contain other content as well, depending on the variation of the diff
#: format.
#:
#: Type:
#: bytes
index_header_value = TypedProperty(bytes)
#: The parsed original name of the file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`orig_filename` instead.
origFile = AliasProperty('orig_filename',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed file details of the original file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`orig_file_details` instead.
origInfo = AliasProperty('orig_file_details',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed original name of the file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`modified_filename` instead.
newFile = AliasProperty('modified_filename',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed file details of the modified file.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`modified_file_details` instead.
newInfo = AliasProperty('modified_file_details',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
#: The parsed value for an Index header.
#:
#: Deprecated:
#: 4.0:
#: Use :py:attr:`index_header_value` instead.
index = AliasProperty('index_header_value',
convert_to_func=force_bytes,
deprecated=True,
deprecation_warning=RemovedInReviewBoard50Warning)
def __init__(self, parser=None):
"""Initialize the parsed file information.
Args:
parser (reviewboard.diffviewer.parser.DiffParser, optional):
The diff parser that parsed this file.
"""
if parser is None:
RemovedInReviewBoard50Warning.warn(
'Diff parsers must pass themselves as a parameter when'
'creating a ParsedDiffFile. This will be mandatory in '
'Review Board 5.0.')
self.parser = parser
self.binary = False
self.deleted = False
self.moved = False
self.copied = False
self.is_symlink = False
self.insert_count = 0
self.delete_count = 0
self.skip = False
self.extra_data = {}
self._data_io = io.BytesIO()
self._data = None
self._deprecated_info = {}
def __setitem__(self, key, value):
"""Set information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to set.
value (object):
The value to set.
"""
self._warn_old_usage_deprecation()
self._deprecated_info[key] = value
setattr(self, key, value)
def __getitem__(self, key):
"""Return information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to access attributes
instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to retrieve.
Returns:
object:
The resulting value.
Raises:
KeyError:
The key is invalid.
"""
self._warn_old_usage_deprecation()
return self._deprecated_info[key]
def __contains__(self, key):
"""Return whether an old parsed file key has been explicitly set.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to check attribute values
instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to check.
Returns:
bool:
``True`` if the key has been explicitly set by a diff parser.
``False`` if it has not.
"""
self._warn_old_usage_deprecation()
return key in self._deprecated_info
def set(self, key, value):
"""Set information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to set.
value (object):
The value to set.
"""
self._warn_old_usage_deprecation()
self._deprecated_info[key] = value
setattr(self, key, value)
def get(self, key, default=None):
"""Return information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to access attributes
instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
key (str):
The key to retrieve.
default (object, optional):
The default value to return.
Returns:
object:
The resulting value.
"""
self._warn_old_usage_deprecation()
return self._deprecated_info.get(key, default)
def update(self, items):
"""Update information on the parsed file from a diff.
This is a legacy implementation used to help diff parsers retain
compatibility with the old dictionary-based ways of setting parsed
file information. Callers should be updated to set individual
attributes instead.
Deprecated:
4.0:
This will be removed in Review Board 5.0.
Args:
items (dict):
The keys and values to set.
"""
self._warn_old_usage_deprecation()
for key, value in six.iteritems(items):
self._deprecated_info[key] = value
setattr(self, key, value)
@property
def data(self):
"""The data for this diff.
This must be accessed after :py:meth:`finalize` has been called.
"""
if self._data is None:
raise ValueError('ParsedDiffFile.data cannot be accessed until '
'finalize() is called.')
return self._data
def finalize(self):
"""Finalize the parsed diff.
This makes the diff data available to consumers and closes the buffer
for writing.
"""
self._data = self._data_io.getvalue()
self._data_io.close()
def prepend_data(self, data):
"""Prepend data to the buffer.
Args:
data (bytes):
The data to prepend.
"""
if data:
new_data_io = io.BytesIO()
new_data_io.write(data)
new_data_io.write(self._data_io.getvalue())
self._data_io.close()
self._data_io = new_data_io
def append_data(self, data):
"""Append data to the buffer.
Args:
data (bytes):
The data to append.
"""
if data:
self._data_io.write(data)
def _warn_old_usage_deprecation(self):
"""Warn that a DiffParser is populating information in an old way."""
if self.parser is None:
message = (
'Diff parsers must be updated to populate attributes on a '
'ParsedDiffFile, instead of setting the information in a '
'dictionary. This will be required in Review Board 5.0.'
)
else:
message = (
'%r must be updated to populate attributes on a '
'ParsedDiffFile, instead of setting the information in a '
'dictionary. This will be required in Review Board 5.0.'
% type(self.parser)
)
RemovedInReviewBoard50Warning.warn(message, stacklevel=3)
class DiffParser(object):
"""Parses diff files, allowing subclasses to specialize parsing behavior.
This class provides the base functionality for parsing Unified Diff files.
It looks for common information present in many variations of diffs,
such as ``Index:`` lines, in order to extract files and their modified
content from a diff.
Subclasses can extend the parsing behavior to extract additional metadata
or handle special representations of changes. They may want to override the
following methods:
* :py:meth:`parse_special_header`
* :py:meth:`parse_diff_header`
* :py:meth:`parse_filename_header`
* :py:meth:`parse_after_headers`
* :py:meth:`get_orig_commit_id`
* :py:meth:`normalize_diff_filename`
"""
#: A separator string below an Index header.
#:
#: This is commonly found immediately below an ``Index:`` header, meant
#: to help locate the beginning of the metadata or changes made to a file.
#:
#: Its presence and location is not guaranteed.
INDEX_SEP = b'=' * 67
def __init__(self, data):
"""Initialize the parser.
Args:
data (bytes):
The diff content to parse.
Raises:
TypeError:
The provided ``data`` argument was not a ``bytes`` type.
"""
from reviewboard.diffviewer.diffutils import split_line_endings
if not isinstance(data, bytes):
raise TypeError(
_('%s expects bytes values for "data", not %s')
% (type(self).__name__, type(data)))
self.base_commit_id = None
self.new_commit_id = None
self.data = data
self.lines = split_line_endings(data)
def parse(self):
"""Parse the diff.
This will parse the content of the file, returning any files that
were found.
Returns:
list of ParsedDiffFile:
The resulting list of files.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing part of the diff. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
logger.debug('%s.parse: Beginning parse of diff, size = %s',
type(self).__name__, len(self.data))
preamble = io.BytesIO()
self.files = []
parsed_file = None
i = 0
# Go through each line in the diff, looking for diff headers.
while i < len(self.lines):
next_linenum, new_file = self.parse_change_header(i)
if new_file:
# This line is the start of a new file diff.
#
# First, finalize the last one.
if self.files:
self.files[-1].finalize()
parsed_file = new_file
# We need to prepend the preamble, if we have one.
parsed_file.prepend_data(preamble.getvalue())
preamble.close()
preamble = io.BytesIO()
self.files.append(parsed_file)
i = next_linenum
else:
if parsed_file:
i = self.parse_diff_line(i, parsed_file)
else:
preamble.write(self.lines[i])
preamble.write(b'\n')
i += 1
if self.files:
self.files[-1].finalize()
preamble.close()
logger.debug('%s.parse: Finished parsing diff.', type(self).__name__)
return self.files
def parse_diff_line(self, linenum, parsed_file):
"""Parse a line of data in a diff.
This will append the line to the parsed file's data, and if the
content represents active changes to a file, its insert/delete counts
will be updated to reflect them.
Args:
linenum (int):
The 0-based line number.
parsed_file (ParsedDiffFile):
The current parsed diff file info.
Returns:
int:
The next line number to parse.
"""
line = self.lines[linenum]
if (parsed_file.orig_filename is not None and
parsed_file.modified_filename is not None):
if line.startswith(b'-'):
parsed_file.delete_count += 1
elif line.startswith(b'+'):
parsed_file.insert_count += 1
parsed_file.append_data(line)
parsed_file.append_data(b'\n')
return linenum + 1
def parse_change_header(self, linenum):
"""Parse a header before a change to a file.
This will attempt to parse the following information, starting at the
specified line in the diff:
1. Any special file headers (such as ``Index:`` lines) through
:py:meth:`parse_special_header`
2. A standard Unified Diff file header (through
:py:meth:`parse_diff_header`)
3. Any content after the header (through
:py:meth:`parse_after_headers`)
If the special or diff headers are able to populate the original and
modified filenames and revisions/file details, and none of the methods
above mark the file as skipped (by setting
:py:attr:`ParsedDiffFile.skip`), then this will finish by appending
all parsed data and returning a parsed file entry.
Subclasses that need to control parsing logic should override one or
more of the above methods.
Args:
linenum (int):
The line number to begin parsing.
Returns:
tuple:
A tuple containing the following:
1. The next line number to parse
2. The populated :py:class:`ParsedDiffFile` instance for this file
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the change header. This may be
a corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
parsed_file = ParsedDiffFile(parser=self)
start = linenum
linenum = self.parse_special_header(linenum, parsed_file)
linenum = self.parse_diff_header(linenum, parsed_file)
skip = (
parsed_file.skip or
parsed_file.orig_filename is None or
parsed_file.orig_file_details is None or
parsed_file.modified_filename is None or
parsed_file.modified_file_details is None
)
if skip:
return linenum, None
# If we have enough information to represent a header, build the
# file to return.
if linenum < len(self.lines):
linenum = self.parse_after_headers(linenum, parsed_file)
if parsed_file.skip:
return linenum, None
# The header is part of the diff, so make sure it gets in the
# diff content.
for line in self.lines[start:linenum]:
parsed_file.append_data(line)
parsed_file.append_data(b'\n')
return linenum, parsed_file
def parse_special_header(self, linenum, parsed_file):
"""Parse a special diff header marking the start of a new file's info.
This attempts to locate an ``Index:`` line at the specified line
number, which usually indicates the beginning of file's information in
a diff (for Unified Diff variants that support it). By default, this
method expects the line to be found at ``linenum``.
If present, the value found immediately after the ``Index:`` will be
stored in :py:attr:`ParsedDiffFile.index_header_value`, allowing
subclasses to make a determination based on its contents (which may
vary between types of diffs, but should include at least a filename.
If the ``Index:`` line is not present, this won't do anything by
default.
Subclasses can override this to parse additional information before the
standard diff header. They may also set :py:attr:`ParsedFileDiff.skip`
to skip the rest of this file and begin parsing a new entry at the
returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the special header. This may be
a corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
try:
index_line = self.lines[linenum]
is_index = index_line.startswith(b'Index: ')
except IndexError:
is_index = False
if is_index:
# Try to find the "====" line.
temp_linenum = linenum + 1
while temp_linenum + 1 < len(self.lines):
line = self.lines[temp_linenum]
if line == self.INDEX_SEP:
# We found the line. This is looking like a valid diff
# for CVS, Subversion, and other systems. Try to parse
# the data from the line.
try:
parsed_file.index_header_value = \
index_line.split(None, 1)[1]
# Set these for backwards-compatibility.
#
# This should be removed in Review Board 5.0.
parsed_file._deprecated_info['index'] = \
parsed_file.index_header_value
except ValueError:
raise DiffParserError('Malformed Index line', linenum)
linenum = temp_linenum + 1
break
elif line.startswith((b'---', b'+++')):
# We never found that line, but we did hit the start of
# a diff file. We can't treat the "Index:" line as special
# in this case.
break
temp_linenum += 1
return linenum
def parse_diff_header(self, linenum, parsed_file):
"""Parse a standard header before changes made to a file.
This attempts to parse the ``---`` (original) and ``+++`` (modified)
file lines, which are usually present right before any changes to the
file. By default, this method expects the ``---`` line to be found at
``linenum``.
If found, this will populate :py:attr:`ParsedDiffFile.orig_filename`,
:py:attr:`ParsedDiffFile.orig_file_details`,
:py:attr:`ParsedDiffFile.modified_filename`, and
:py:attr:`ParsedDiffFile.modified_file_details`.
This calls out to :py:meth:`parse_filename_header` to help parse
the contents immediately after the ``---`` or ``+++``.
Subclasses can override this to parse these lines differently, or to
to process the results of these lines (such as converting special
filenames to states like "deleted" or "new file"). They may also set
:py:class:`ParsedFileDiff.skip` to skip the rest of this file and begin
parsing a new entry at the returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
try:
line1 = self.lines[linenum]
line2 = self.lines[linenum + 1]
is_diff_header = (
# Unified diff headers
(line1.startswith(b'--- ') and line2.startswith(b'+++ ')) or
# Context diff headers
(line1.startswith(b'*** ') and line2.startswith(b'--- ') and
not line1.endswith(b' ****'))
)
except IndexError:
is_diff_header = False
if is_diff_header:
# This is a unified or context diff header. Parse the
# file and extra info.
try:
(parsed_file.orig_filename,
parsed_file.orig_file_details) = \
self.parse_filename_header(self.lines[linenum][4:],
linenum)
linenum += 1
(parsed_file.modified_filename,
parsed_file.modified_file_details) = \
self.parse_filename_header(self.lines[linenum][4:],
linenum)
# Set these for backwards-compatibility.
#
# This should be removed in Review Board 5.0.
parsed_file._deprecated_info['origFile'] = \
parsed_file.orig_filename
parsed_file._deprecated_info['origInfo'] = \
parsed_file.orig_file_details
parsed_file._deprecated_info['newFile'] = \
parsed_file.modified_filename
parsed_file._deprecated_info['newInfo'] = \
parsed_file.modified_file_details
linenum += 1
except ValueError:
raise DiffParserError(
'The diff file is missing revision information',
linenum)
return linenum
def parse_after_headers(self, linenum, parsed_file):
"""Parse information after a diff header but before diff data.
This attempts to parse the information found after
:py:meth:`parse_diff_headers` is called, but before gathering any lines
that are part of the diff contents. It's intended for the few diff
formats that may place content at this location.
By default, this does nothing.
Subclasses can override this to provide custom parsing of any lines
that may exist here. They may also set :py:class:`ParsedFileDiff.skip`
to skip the rest of this file and begin parsing a new entry at the
returned line number.
Args:
linenum (int):
The line number to begin parsing.
parsed_file (ParsedDiffFile):
The file currently being parsed.
Returns:
int:
The next line number to parse.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
return linenum
def parse_filename_header(self, s, linenum):
"""Parse the filename found in a diff filename line.
This parses the value after a ``---`` or ``+++`` indicator (or a
special variant handled by a subclass), normalizing the filename and
any following file details, and returning both for processing and
storage.
Often times, the file details will be a revision for the original
file, but this is not guaranteed, and is up to the variation of the
diff format.
By default, this will assume that a filename and file details are
separated by either a single tab, or two or more spaces. If neither
are found, this will fail to parse.
This must parse only the provided value, and cannot parse subsequent
lines.
Subclasses can override this behavior to parse these lines another
way, or to normalize filenames (handling escaping or filenames with
spaces as needed by that particular diff variation).
Args:
s (bytes):
The value to parse.
linenum (int):
The line number containing the value to parse.
Returns:
tuple:
A tuple containing:
1. The filename (as bytes)
2. The additional file information (as bytes)
Raises:
reviewboard.diffviewer.errors.DiffParserError:
There was an error parsing the diff header. This may be a
corrupted diff, or an error in the parsing implementation.
Details are in the error message.
"""
if b'\t' in s:
# There's a \t separating the filename and info. This is the
# best case scenario, since it allows for filenames with spaces
# without much work.
return s.split(b'\t', 1)
# There's spaces being used to separate the filename and info.
# This is technically wrong, so all we can do is assume that
# 1) the filename won't have multiple consecutive spaces, and
# 2) there's at least 2 spaces separating the filename and info.
if b' ' in s:
return re.split(br' +', s, 1)
raise DiffParserError('No valid separator after the filename was '
'found in the diff header',
linenum)
def raw_diff(self, diffset_or_commit):
"""Return a raw diff as a string.
This takes a DiffSet or DiffCommit and generates a new, single diff
file that represents all the changes made. It's used to regenerate
a diff and serve it up for other tools or processes to use.
Subclasses can override this to provide any special logic for building
the diff.
Args:
diffset_or_commit (reviewboard.diffviewer.models.diffset.DiffSet or
reviewboard.diffviewer.models.diffcommit
.DiffCommit):
The DiffSet or DiffCommit to render.
If passing in a DiffSet, only the cumulative diff's file
contents will be returned.
If passing in a DiffCommit, only that commit's file contents
will be returned.
Returns:
bytes:
The diff composed of all the component FileDiffs.
Raises:
TypeError:
The provided ``diffset_or_commit`` wasn't of a supported type.
"""
if hasattr(diffset_or_commit, 'cumulative_files'):
filediffs = diffset_or_commit.cumulative_files
elif hasattr(diffset_or_commit, 'files'):
filediffs = diffset_or_commit.files.all()
else:
raise TypeError('%r is not a valid value. Please pass a DiffSet '
'or DiffCommit.'
% diffset_or_commit)
return b''.join(
filediff.diff
for filediff in filediffs
)
def get_orig_commit_id(self):
"""Return the commit ID of the original revision for the diff.
By default, this returns ``None``. Subclasses would override this if
they work with repositories that always look up changes to a file by
the ID of the commit that made the changes instead of a per-file
revision or ID.
Non-``None`` values returned by this method will override the values
being stored in :py:attr:`FileDiff.source_revision
<reviewboard.diffviewer.models.filediff.FileDiff.source_revision>`.
Implementations would likely want to parse out the commit ID from
some prior header and return it here. By the time this is called, all
files will have been parsed already.
Returns:
bytes:
The commit ID used to override the source revision of any created
:py:class:`~reviewboard.diffviewer.models.filediff.FileDiff`
instances.
"""
return None
def normalize_diff_filename(self, filename):
"""Normalize filenames in diffs.
This returns a normalized filename suitable for populating in
:py:attr:`FileDiff.source_file
<reviewboard.diffviewer.models.filediff.FileDiff.source_file>` or
:py:attr:`FileDiff.dest_file
<reviewboard.diffviewer.models.filediff.FileDiff.dest_file>`, or
for when presenting a filename to the UI.
By default, this strips off any leading slashes, which might occur due
to differences in various diffing methods or APIs.
Subclasses can override this to provide additional methods of
normalization.
Args:
filename (unicode):
The filename to normalize.
Returns:
unicode:
The normalized filename.
"""
if filename.startswith('/'):
return filename[1:]
else:
return filename
| 34.50924
| 79
| 0.587885
|
3767af49cd6a236c8ca1b59cc3456bfaebe6a1f5
| 1,624
|
py
|
Python
|
mycloud/drive/filesystem/metadata_manager.py
|
ThomasGassmann/swisscom-my-cloud-backup
|
97e222c45a54197c82c8f3a5d59aa20bf3382ed8
|
[
"MIT"
] | 4
|
2019-11-28T22:10:43.000Z
|
2022-01-23T15:18:26.000Z
|
mycloud/drive/filesystem/metadata_manager.py
|
ThomasGassmann/swisscom-my-cloud-backup
|
97e222c45a54197c82c8f3a5d59aa20bf3382ed8
|
[
"MIT"
] | 18
|
2019-01-20T22:30:48.000Z
|
2020-06-09T21:16:07.000Z
|
mycloud/drive/filesystem/metadata_manager.py
|
thomasgassmann/mycloud-cli
|
97e222c45a54197c82c8f3a5d59aa20bf3382ed8
|
[
"MIT"
] | null | null | null |
from mycloud.common import get_string_generator
from mycloud.constants import METADATA_FILE_NAME
from mycloud.drive.filesystem.file_metadata import FileMetadata
from mycloud.drive.filesystem.translatable_path import TranslatablePath
from mycloud.mycloudapi import MyCloudRequestExecutor, ObjectResourceBuilder
from mycloud.mycloudapi.requests.drive import (GetObjectRequest,
PutObjectRequest)
class MetadataManager:
def __init__(self, request_executor: MyCloudRequestExecutor):
self._request_executor = request_executor
async def get_metadata(self, path: TranslatablePath):
metadata_path = MetadataManager._get_metadata_path(path)
get_request = GetObjectRequest(metadata_path)
response = await self._request_executor.execute(get_request)
text = await response.result.text()
return None if response.result.status == 404 else FileMetadata.from_json(text)
async def update_metadata(self, path: TranslatablePath, metadata: FileMetadata):
metadata_path = MetadataManager._get_metadata_path(path)
json_representation = FileMetadata.to_json(metadata)
byte_generator = get_string_generator(json_representation)
put_request = PutObjectRequest(metadata_path, byte_generator)
_ = await self._request_executor.execute(put_request)
@staticmethod
def _get_metadata_path(path: TranslatablePath):
full_path = path.calculate_remote()
metadata_path = ObjectResourceBuilder.combine_cloud_path(
full_path, METADATA_FILE_NAME)
return metadata_path
| 46.4
| 86
| 0.759236
|
37d5f9daa8e6b4d0194745f765c55b0f973da231
| 2,940
|
py
|
Python
|
tests/test_compositions/test_composition.py
|
rddaz2013/pysketcher
|
9d4079baf0aa04f8fa80dc6edcf03bf1c14f70a4
|
[
"MIT"
] | 27
|
2020-09-03T16:59:32.000Z
|
2022-03-11T08:21:25.000Z
|
tests/test_compositions/test_composition.py
|
ReblochonMasque/pysketcher-1
|
9e804f4855edc6962b68e92091f35c2e960df813
|
[
"MIT"
] | 395
|
2020-09-05T06:32:54.000Z
|
2022-03-31T12:06:55.000Z
|
tests/test_compositions/test_composition.py
|
ReblochonMasque/pysketcher-1
|
9e804f4855edc6962b68e92091f35c2e960df813
|
[
"MIT"
] | 4
|
2021-04-19T09:23:06.000Z
|
2021-11-12T20:21:30.000Z
|
from hypothesis import given
from hypothesis.strategies import floats, sampled_from
import pytest
from pysketcher import Line, Point, Shape, Style, Text
from pysketcher.composition import Composition
class TestCompositionStyle:
@pytest.fixture(scope="module")
def composition(self):
shape1 = Line(Point(0, 1), Point(1, 1))
shape2 = Line(Point(1, 1), Point(0, 2))
text = Text("This is a test.", Point(2, 2))
composition = Composition(
{
"shape1": shape1,
"shape2": shape2,
"test": text,
}
)
return composition
@given(sampled_from(Style.LineStyle))
def test_line_style(self, composition: Composition, line_style: Style.LineStyle):
composition.style.line_style = line_style
assert composition["shape1"].style.line_style == line_style
assert composition["shape2"].style.line_style == line_style
@given(floats(allow_nan=False, allow_infinity=False))
def test_line_width(self, composition: Composition, line_width: float):
composition.style.line_width = line_width
assert composition["shape1"].style.line_width == line_width
assert composition["shape2"].style.line_width == line_width
@given(sampled_from(Style.Color))
def test_line_color(self, composition: Composition, line_color: Style.Color):
composition.style.line_color = line_color
assert composition["shape1"].style.line_color == line_color
assert composition["shape2"].style.line_color == line_color
@given(sampled_from(Style.Color))
def test_fill_color(self, composition: Composition, fill_color: Style.Color):
composition.style.fill_color = fill_color
assert composition["shape1"].style.fill_color == fill_color
assert composition["shape2"].style.fill_color == fill_color
@given(sampled_from(Style.FillPattern))
def test_fill_pattern(
self, composition: Composition, fill_pattern: Style.FillPattern
):
composition.style.fill_pattern = fill_pattern
assert composition["shape1"].style.fill_pattern == fill_pattern
assert composition["shape2"].style.fill_pattern == fill_pattern
@given(sampled_from(Style.ArrowStyle))
def test_arrow(self, composition: Composition, arrow: Style.ArrowStyle):
composition.style.arrow = arrow
assert composition["shape1"].style.arrow == arrow
assert composition["shape2"].style.arrow == arrow
@given(floats(allow_nan=False, allow_infinity=False))
def test_shadow(self, composition: Composition, shadow: float):
composition.style.shadow = shadow
assert composition["shape1"].style.shadow == shadow
assert composition["shape2"].style.shadow == shadow
def test_iteration(self, composition: Composition):
for shape in composition:
assert isinstance(shape, Shape)
| 41.408451
| 85
| 0.690816
|
2e469afc59ca855e8c6bc6a237f7491b3038e42d
| 1,878
|
py
|
Python
|
a1fbox/calllist.py
|
mvo5/a1fbox
|
7d1d648717d58538a2f14d711cff9e584e901d09
|
[
"MIT"
] | null | null | null |
a1fbox/calllist.py
|
mvo5/a1fbox
|
7d1d648717d58538a2f14d711cff9e584e901d09
|
[
"MIT"
] | null | null | null |
a1fbox/calllist.py
|
mvo5/a1fbox
|
7d1d648717d58538a2f14d711cff9e584e901d09
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from collections import Counter
from time import sleep
from callinfo import CallInfo
from callprefix import CallPrefix
from fritzconn import FritzConn
from fritzconnection.lib.fritzcall import FritzCall
from phonebook import Phonebook
if __name__ == "__main__":
# Quick example how to use only
# Initialize by using parameters from config file
fritzconn = FritzConn()
fc = FritzCall(fc=fritzconn)
cp = CallPrefix(fc=fritzconn)
pb = Phonebook(fc=fritzconn)
calls = fc.get_missed_calls(update=True)
missed_list = []
for call in calls:
number = call.Called if call.type == 3 else call.Caller
missed_list.append(number)
counts = Counter(missed_list)
print("\nMissed calls, ordered by count:")
print(counts)
calls = fc.get_calls(update=True)
numbers = set()
for call in calls:
number = call.Called if call.type == 3 else call.Caller
if number: # If CLIR / Anon, there is no number
if not number.startswith('0'):
number = cp.area_code + number
numbers.add(number)
print(f'\nAll {len(calls)} calls, uniqued {len(numbers)}:')
print(numbers)
anylist = pb.get_all_numbers_for_pb_ids([0, 1, 2]) # White- or blacklist
print('\nWhite- or blacklisted:')
unknowns = set()
for number in numbers:
name = pb.get_name_for_number_in_dict(number, anylist, area_code=cp.area_code)
if name:
print(f'{number} {name}')
else:
unknowns.add(number)
# Idea: rate & info ... auto-block .. or add good names to whitelist?
print('\nResolving Unknowns:')
for unknown in unknowns:
ci = CallInfo(unknown)
ci.get_cascade_score()
print(ci)
sleep(10) # Anti-DDOS needed for tellows and wemgehoert, otherwise you get blocked or captcha
| 30.786885
| 102
| 0.657082
|
fca0e7dcdc96233c4aba182b784b4ddcf64c734c
| 507
|
py
|
Python
|
plotly/validators/scatter3d/marker/colorbar/_tickvalssrc.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 12
|
2020-04-18T18:10:22.000Z
|
2021-12-06T10:11:15.000Z
|
plotly/validators/scatter3d/marker/colorbar/_tickvalssrc.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 27
|
2020-04-28T21:23:12.000Z
|
2021-06-25T15:36:38.000Z
|
plotly/validators/scatter3d/marker/colorbar/_tickvalssrc.py
|
gnestor/plotly.py
|
a8ae062795ddbf9867b8578fe6d9e244948c15ff
|
[
"MIT"
] | 6
|
2020-04-18T23:07:08.000Z
|
2021-11-18T07:53:06.000Z
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='tickvalssrc',
parent_name='scatter3d.marker.colorbar',
**kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
| 26.684211
| 70
| 0.617357
|
70f8134cc8f4a42b7d9133d5a22461fe393337c1
| 3,097
|
py
|
Python
|
python/dllib/examples/autograd/custom.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | 3
|
2021-07-14T01:28:47.000Z
|
2022-03-02T01:16:32.000Z
|
python/dllib/examples/autograd/custom.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
python/dllib/examples/autograd/custom.py
|
DirkFi/BigDL
|
7493209165c046116470b9a1e1c8f527915d6f1e
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.dllib.nncontext import *
from bigdl.dllib.keras.autograd import *
from bigdl.dllib.keras.layers import *
from bigdl.dllib.keras.models import *
from optparse import OptionParser
def mean_absolute_error(y_true, y_pred):
result = mean(abs(y_true - y_pred), axis=1)
return result
def add_one_func(x):
return x + 1.0
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--nb_epoch", dest="nb_epoch", default="5")
parser.add_option("--batch_size", type=int,dest="batch_size", default=512)
parser.add_option("--cluster-mode", dest="clusterMode", default="local")
(options, args) = parser.parse_args(sys.argv)
conf = {}
if options.clusterMode.startswith("yarn"):
hadoop_conf = os.environ.get("HADOOP_CONF_DIR")
assert hadoop_conf, "Directory path to hadoop conf not found for yarn-client mode. Please " \
"set the environment variable HADOOP_CONF_DIR"
spark_conf = create_spark_conf().set("spark.executor.memory", "5g") \
.set("spark.executor.cores", 2) \
.set("spark.executor.instances", 2) \
.set("spark.driver.memory", "4g")
spark_conf.setAll(conf)
if options.clusterMode == "yarn-client":
sc = init_nncontext(spark_conf, cluster_mode="yarn-client", hadoop_conf=hadoop_conf)
else:
sc = init_nncontext(spark_conf, cluster_mode="yarn-cluster", hadoop_conf=hadoop_conf)
elif options.clusterMode == "local":
spark_conf = SparkConf().set("spark.driver.memory", "10g")\
.set("spark.driver.cores", 4)
sc = init_nncontext(spark_conf, cluster_mode="local")
elif options.clusterMode == "spark-submit":
sc = init_nncontext(cluster_mode="spark-submit")
data_len = 1000
X_ = np.random.uniform(0, 1, (1000, 2))
Y_ = ((2 * X_).sum(1) + 0.4).reshape([data_len, 1])
a = Input(shape=(2,))
b = Dense(1)(a)
c = Lambda(function=add_one_func)(b)
model = Model(input=a, output=c)
model.compile(optimizer=SGD(learningrate=1e-2),
loss=mean_absolute_error)
model.set_tensorboard('./log', 'customized layer and loss')
model.fit(x=X_,
y=Y_,
batch_size=options.batch_size,
nb_epoch=int(options.nb_epoch),
distributed=True)
model.save_graph_topology('./log')
w = model.get_weights()
print(w)
pred = model.predict_local(X_)
print("finished...")
sc.stop()
| 34.411111
| 101
| 0.661608
|
43d410ed1629317c0ed17df2fcac9119300bb4df
| 4,614
|
py
|
Python
|
code_v1.2/prepare_peakCalling.py
|
Xinfeng-Yao/APEC
|
0596e2c8947fd9eb3b4cb5acee8d298ebbe8408a
|
[
"BSD-2-Clause"
] | 24
|
2019-03-08T03:06:14.000Z
|
2021-12-13T06:38:59.000Z
|
code_v1.2/prepare_peakCalling.py
|
Xinfeng-Yao/APEC
|
0596e2c8947fd9eb3b4cb5acee8d298ebbe8408a
|
[
"BSD-2-Clause"
] | 1
|
2020-06-30T04:53:34.000Z
|
2020-07-14T01:25:32.000Z
|
code_v1.2/prepare_peakCalling.py
|
Xinfeng-Yao/APEC
|
0596e2c8947fd9eb3b4cb5acee8d298ebbe8408a
|
[
"BSD-2-Clause"
] | 6
|
2019-06-14T03:17:00.000Z
|
2021-12-18T05:49:48.000Z
|
#!/usr/bin/python
#
import warnings
warnings.filterwarnings("ignore")
#
import os,numpy,sys,subprocess
from optparse import OptionParser
import subroutines
#
#
opts = OptionParser()
usage = "Call peaks\nusage: %prog -s project --blist blacklist.bed --fa genome_chr.fa --tss tssFile --logq 3"
opts = OptionParser(usage=usage, version="%prog 1.0")
opts.add_option("-s", help="The project folder.")
opts.add_option("--picard", default="../reference/picard.jar",
help="The picard.jar file path, default=../reference/picard.jar")
opts.add_option("--blist", default='../reference/hg19_blacklist.JDB.bed',
help="Blacklist.bed, default=../reference/hg19_blacklist.JDB.bed")
opts.add_option("--fa", default='../reference/hg19_chr.fa',
help="Genome_chr.fa, default=../reference/hg19_chr.fa")
opts.add_option('--tss', default='../reference/hg19_refseq_genes_TSS.txt',
help='TSS file, default=../reference/hg19_refseq_genes_TSS.txt')
opts.add_option('--ref', default='hg19', help='Name of genome reference, default=hg19')
opts.add_option('--logq', default='3',
help='Threshold of -log(p-value) for top peaks, default=3.')
options, arguments = opts.parse_args()
#
workspace_folder = options.s + '/work/'
peak_folder = options.s + '/peak/'
genome_fasta = options.fa
tssFile = options.tss
if not os.path.exists(peak_folder): subprocess.check_call('mkdir ' + peak_folder, shell=True)
#
#
print('!!!!!! merge all marked bam files !!!!!!')
bam_folder = [x for x in os.listdir(workspace_folder)]
bam_folder.sort()
print('cells number:', len(bam_folder))
marked_bam = []
#merged_raw = peak_folder + 'mergeAll.raw.bam'
merged_bam = peak_folder + 'mergeAll.bam'
for folder in bam_folder:
path = workspace_folder + folder + '/'
if len(folder.split('.'))<=1:
marked_bam.extend([path + x for x in os.listdir(path) if x[-10:]=='marked.bam'])
if len(marked_bam)<=1000:
marked_bam = ' '.join(marked_bam)
subprocess.check_call('samtools merge -f ' + merged_bam + ' ' + marked_bam, shell=True)
else:
n_batch = len(marked_bam)//1000 + 1
temps = []
for i_batch in range(0, n_batch):
temp_bam = peak_folder+'temp_'+str(i_batch)+'.bam'
temps.append(temp_bam)
start, end = i_batch*1000, min((i_batch+1)*1000, len(marked_bam))
marked = ' '.join(marked_bam[start:end])
subprocess.check_call('samtools merge -f ' + temp_bam + ' ' + marked, shell=True)
subprocess.check_call('samtools index ' + temp_bam, shell=True)
all_temp = ' '.join(temps)
subprocess.check_call('samtools merge -f ' + merged_bam + ' ' + all_temp, shell=True)
#
subprocess.check_call('samtools index ' + merged_bam, shell=True)
print('!!!!!! merge done !!!!!!')
#
hist_log = peak_folder + 'mergeAll.hist.log'
hist_pdf = peak_folder + 'mergeAll.hist.pdf'
subprocess.check_call('java -XX:+UseSerialGC -Xmx1g -jar '+options.picard+' CollectInsertSizeMetrics VALIDATION_STRINGENCY=SILENT I='
+ merged_bam + ' O=' + hist_log + ' H=' + hist_pdf + ' W=1000', shell=True)
#
refSeqTSS = peak_folder + 'mergeAll.RefSeqTSS'
subroutines.draw_TSS_insert(tssFile, merged_bam, refSeqTSS)
#
print('!!!!!! call peak by macs2 !!!!!!')
peak_file = peak_folder + 'peaks'
subprocess.check_call('macs2 callpeak --nomodel -t ' + merged_bam + ' -n '
+ peak_file + ' --nolambda --keep-dup all --call-summits', shell=True)
print('!!!!!! call peak done !!!!!!')
#
summit = peak_folder + 'peaks_summits.bed'
filtered_peak = peak_folder + 'filtered_peaks.bed'
if options.blist:
print('!!!!!! filter peaks !!!!!!')
subprocess.check_call('bedtools intersect -v -a ' + summit + ' -b ' + options.blist
+ " | sort -k5 -nr > " + filtered_peak, shell=True)
print('!!!!!! filter peaks done !!!!!!')
else:
subprocess.check_call('sort -k5 -nr ' + summit + ' > ' + filtered_peak, shell=True)
print('!!!!!! get top peaks by q-value !!!!!!')
fold_rank = numpy.loadtxt(filtered_peak, 'str', delimiter='\t')
fold_rank[:, 1] = numpy.array(list(map(int, fold_rank[:, 1]))) - 249 # 250
fold_rank[:, 2] = numpy.array(list(map(int, fold_rank[:, 2]))) + 250
toppeaks = peak_folder + 'temp01.bed'
top_peaks = peak_folder + 'top_peaks.bed'
with open(toppeaks, 'w') as output:
for peak in fold_rank:
if float(peak[-1])>=float(options.logq):
# print >> output, peak[0]+'\t'+peak[1]+'\t'+peak[2]
output.write(peak[0]+'\t'+peak[1]+'\t'+peak[2]+'\n')
subprocess.check_call('bedtools sort -i ' + toppeaks + ' > ' + top_peaks, shell=True)
print('!!!!!! get top peaks done !!!!!!')
#
#
| 44.365385
| 133
| 0.657997
|
0cef925bd8246224363aee57d548faff6bfc591e
| 547
|
py
|
Python
|
pyscripts/knn.py
|
mathewjhan/ivyhacks
|
e966754170e2638f5ac6714df33692e6ce294e88
|
[
"MIT"
] | null | null | null |
pyscripts/knn.py
|
mathewjhan/ivyhacks
|
e966754170e2638f5ac6714df33692e6ce294e88
|
[
"MIT"
] | null | null | null |
pyscripts/knn.py
|
mathewjhan/ivyhacks
|
e966754170e2638f5ac6714df33692e6ce294e88
|
[
"MIT"
] | 2
|
2020-11-02T07:33:52.000Z
|
2021-01-02T07:11:02.000Z
|
from sklearn.neighbors import NearestNeighbors
import numpy as np
import pandas as pd
def knn():
userdata = pd.read_csv('userdata.csv')
ids = userdata['user_id'].to_numpy()
X = userdata.iloc[:, 4:].to_numpy()
nbrs = NearestNeighbors(n_neighbors=len(X), algorithm='auto').fit(X)
_, indices = nbrs.kneighbors(X)
similarity = {}
for i in range(indices.shape[0]):
similarity[ids[i]] = []
for j in range(1, indices.shape[1]):
similarity[ids[i]].append(ids[indices[i][j]])
return similarity
| 30.388889
| 72
| 0.64351
|
5f7a6ac5169917c134354511aaff0b1c903ffdd9
| 2,547
|
py
|
Python
|
analysis/api/utils/custom.py
|
truemrwalker/mads-app
|
79481293af2c0ce5533ab9ebd24868965c3c0031
|
[
"MIT"
] | null | null | null |
analysis/api/utils/custom.py
|
truemrwalker/mads-app
|
79481293af2c0ce5533ab9ebd24868965c3c0031
|
[
"MIT"
] | 2
|
2021-04-22T06:57:27.000Z
|
2021-08-06T03:19:42.000Z
|
analysis/api/utils/custom.py
|
truemrwalker/mads-app
|
79481293af2c0ce5533ab9ebd24868965c3c0031
|
[
"MIT"
] | 2
|
2021-02-12T01:19:44.000Z
|
2021-05-14T06:54:34.000Z
|
import logging
import numpy as np
logger = logging.getLogger(__name__)
def get_custom(data):
# includeNoneVals = False
# if "undefinedIsIncluded" in data['view']['settings']:
# includeNoneVals = data['view']['settings']['undefinedIsIncluded']
# bins = data['view']['settings']['bins']
result = {}
# if type(data['data'][0]) is str:
# if(None in data['data']):
# if includeNoneVals:
# data['data'] = ['Undefined' if v is None else v for v in data['data']]
# else:
# data['data'] = [x for x in data['data'] if x is not None]
# unique_elements, counts_elements = np.unique(data['data'], return_counts=True)
# result['values'] = counts_elements
# result['dimensions'] = [str(ue) for ue in unique_elements]
# else:
# if(None in data['data']):
# if includeNoneVals:
# data['data'] = ['Undefined' if v is None else v for v in data['data']]
# else:
# data['data'] = [x for x in data['data'] if x is not None]
# unique_elements, counts_elements = np.unique(data['data'], return_counts=True)
# if bins == 0 or bins == len(unique_elements):
# result['values'] = counts_elements
# result['dimensions'] = [str(ue) for ue in unique_elements]
# else:
# hist, bin_edges = np.histogram(data['data'], bins=bins)
# result['values'] = hist
# floatsExists = False
# for x in bin_edges:
# if not (x.is_integer()):
# floatsExists = True
# break
# if floatsExists:
# result['dimensions'] = ["{:.2f}".format(x) + " - " + "{:.2f}".format(bin_edges[idx+1]) for idx, x in enumerate(bin_edges) if (idx+1) < (len(bin_edges))]
# else:
# result['dimensions'] = ["{:0.0f}".format(x) for x in bin_edges]
# x_array = np.array(data['data'])
# indices = []
# l = len(bin_edges)
# for i in range(l - 1):
# left = bin_edges[i]
# right = bin_edges[i + 1]
# ids = []
# if i == l - 2:
# ids = list(np.where((left <= x_array) & (x_array <= right))[0])
# else:
# ids = list(np.where((left <= x_array) & (x_array < right))[0])
# indices.append(ids)
# result['indices'] = indices
return result
| 41.754098
| 171
| 0.498233
|
020f833081f1bda0738d4a20fc85b27d4295e932
| 3,694
|
py
|
Python
|
apps/category/tests/test_mappers.py
|
tuanquanghpvn/rest_exam
|
134f5e7bf44f8e5b0c601e04410693067aef6c4e
|
[
"MIT"
] | null | null | null |
apps/category/tests/test_mappers.py
|
tuanquanghpvn/rest_exam
|
134f5e7bf44f8e5b0c601e04410693067aef6c4e
|
[
"MIT"
] | null | null | null |
apps/category/tests/test_mappers.py
|
tuanquanghpvn/rest_exam
|
134f5e7bf44f8e5b0c601e04410693067aef6c4e
|
[
"MIT"
] | null | null | null |
from apps.base.mappers import (PagingRequestSchema, PagingResponseSchema)
from apps.category.mappers import (CategorySchema, GetCategoryRequestSchema, PostCategoryRequestSchema,
PutCategoryRequestSchema, DeleteCategoryRequestSchema)
from contracts.base import (LIMIT_NAME, OFFSET_NAME, SORT_NAME, COUNT_NAME, CURRENT_NAME, PREV_NAME, NEXT_NAME)
from contracts.category import (CATEGORY_ID, NAME, SLUG, GetCategoryRequest, PostCategoryRequest, PutCategoryRequest,
DeleteCategoryRequest)
TEST_COUNT = 10
TEST_CURRENT = "http://sample"
TEST_PREV = "http://sample/prev"
TEST_NEXT = "http://sample/next"
TEST_LIMIT = 10
TEST_OFFSET = 10
TEST_SORT = "sort"
TEST_PATH = "http://sample/path"
PAGING_REQUEST = {
LIMIT_NAME: TEST_LIMIT,
OFFSET_NAME: TEST_OFFSET,
SORT_NAME: TEST_SORT
}
PAGING_RESPONSE = {
COUNT_NAME: TEST_COUNT,
CURRENT_NAME: TEST_CURRENT,
PREV_NAME: TEST_PREV,
NEXT_NAME: TEST_NEXT
}
TEST_CATEGORY_ID = 1
TEST_CATEGORY_NAME = 'Category name 1'
TEST_CATEGORY_SLUG = 'category-name-1'
TEST_CATEGORY = {
CATEGORY_ID: TEST_CATEGORY_ID,
NAME: TEST_CATEGORY_NAME,
SLUG: TEST_CATEGORY_SLUG
}
TEST_CATEGORY_POST = {
NAME: TEST_CATEGORY_NAME,
SLUG: TEST_CATEGORY_SLUG
}
TEST_CATEGORY_PUT = {
NAME: TEST_CATEGORY_NAME,
SLUG: TEST_CATEGORY_SLUG
}
def _test_category(category, has_id=False, has_name=False, has_slug=False):
assert category.id == (TEST_CATEGORY_ID if has_id else None)
assert category.name == (TEST_CATEGORY_NAME if has_name else None)
assert category.slug == (TEST_CATEGORY_SLUG if has_slug else None)
class TestPagingRequestSchema(object):
def test_load(self):
actual, errors = PagingRequestSchema().load(PAGING_REQUEST)
assert actual == PAGING_REQUEST
assert errors == {}
class TestPagingResponseSchema(object):
def test_load(self):
actual, errors = PagingResponseSchema().load(PAGING_RESPONSE)
assert actual == PAGING_RESPONSE
assert errors == {}
class TestCategorySchema(object):
def test_load(self):
actual, errors = CategorySchema().load(TEST_CATEGORY)
assert actual == TEST_CATEGORY
assert errors == {}
class TestGetCategorySchema(object):
def test_load(self):
request = dict(PAGING_REQUEST)
actual, errors = GetCategoryRequestSchema().load(request)
assert isinstance(actual, GetCategoryRequest)
assert errors == {}
class TestPostCategorySchema(object):
def test_load(self):
expected = {
NAME: TEST_CATEGORY_NAME,
SLUG: TEST_CATEGORY_SLUG
}
actual, errors = PostCategoryRequestSchema().load(expected)
assert isinstance(actual, PostCategoryRequest)
assert errors == {}
_test_category(category=actual, has_name=True, has_slug=True)
class TestPutCategorySchema(object):
def test_load(self):
expected = {
CATEGORY_ID: TEST_CATEGORY_ID,
NAME: TEST_CATEGORY_NAME,
SLUG: TEST_CATEGORY_SLUG
}
actual, errors = PutCategoryRequestSchema().load(expected)
assert isinstance(actual, PutCategoryRequest)
assert errors == {}
_test_category(category=actual, has_id=True, has_name=True, has_slug=True)
class TestDeleteCategorySchema(object):
def test_load(self):
expected = {
CATEGORY_ID: TEST_CATEGORY_ID,
}
actual, errors = DeleteCategoryRequestSchema().load(expected)
assert isinstance(actual, DeleteCategoryRequest)
assert errors == {}
_test_category(category=actual, has_id=True)
| 31.042017
| 117
| 0.699783
|
12bc67d0dd1b92c28735f9f657acb1701550c160
| 2,567
|
py
|
Python
|
test_kitti_depth.py
|
xuyufan936831611/vo_imu
|
8a5753384b4a5c08dc83edf718d76a2ac308a298
|
[
"MIT"
] | null | null | null |
test_kitti_depth.py
|
xuyufan936831611/vo_imu
|
8a5753384b4a5c08dc83edf718d76a2ac308a298
|
[
"MIT"
] | null | null | null |
test_kitti_depth.py
|
xuyufan936831611/vo_imu
|
8a5753384b4a5c08dc83edf718d76a2ac308a298
|
[
"MIT"
] | null | null | null |
from __future__ import division
import tensorflow as tf
import numpy as np
import os
# import scipy.misc
import PIL.Image as pil
from SfMLearner import SfMLearner
flags = tf.app.flags
flags.DEFINE_integer("batch_size", 4, "The size of of a sample batch")
flags.DEFINE_integer("img_height", 128, "Image height")
flags.DEFINE_integer("img_width", 416, "Image width")
flags.DEFINE_string("dataset_dir", None, "Dataset directory")
flags.DEFINE_string("output_dir", None, "Output directory")
flags.DEFINE_string("ckpt_file", None, "checkpoint file")
FLAGS = flags.FLAGS
def main(_):
with open('data/kitti/test_files_eigen.txt', 'r') as f:
test_files = f.readlines()
test_files = [FLAGS.dataset_dir + t[:-1] for t in test_files]
if not os.path.exists(FLAGS.output_dir):
os.makedirs(FLAGS.output_dir)
basename = os.path.basename(FLAGS.ckpt_file)
output_file = FLAGS.output_dir + '/' + basename
sfm = SfMLearner()
sfm.setup_inference(img_height=FLAGS.img_height,
img_width=FLAGS.img_width,
batch_size=FLAGS.batch_size,
mode='depth')
saver = tf.train.Saver([var for var in tf.model_variables()])
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
saver.restore(sess, FLAGS.ckpt_file)
pred_all = []
for t in range(0, len(test_files), FLAGS.batch_size):
if t % 100 == 0:
print('processing %s: %d/%d' % (basename, t, len(test_files)))
inputs = np.zeros(
(FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, 3),
dtype=np.uint8)
for b in range(FLAGS.batch_size):
idx = t + b
if idx >= len(test_files):
break
fh = open(test_files[idx], 'r')
raw_im = pil.open(fh)
scaled_im = raw_im.resize((FLAGS.img_width, FLAGS.img_height), pil.ANTIALIAS)
inputs[b] = np.array(scaled_im)
# im = scipy.misc.imread(test_files[idx])
# inputs[b] = scipy.misc.imresize(im, (FLAGS.img_height, FLAGS.img_width))
pred = sfm.inference(inputs, sess, mode='depth')
for b in range(FLAGS.batch_size):
idx = t + b
if idx >= len(test_files):
break
pred_all.append(pred['depth'][b,:,:])
np.save(output_file, pred_all)
if __name__ == '__main__':
tf.app.run()
| 41.403226
| 93
| 0.600312
|
7d5cd0c1cd9c42c9060933211ff9482c58989605
| 21,585
|
py
|
Python
|
chia/util/keychain.py
|
Water-Networks/chia-blockchain
|
58d2c427397dd45f36aa9e964fd6a11422b4eda4
|
[
"Apache-2.0"
] | null | null | null |
chia/util/keychain.py
|
Water-Networks/chia-blockchain
|
58d2c427397dd45f36aa9e964fd6a11422b4eda4
|
[
"Apache-2.0"
] | null | null | null |
chia/util/keychain.py
|
Water-Networks/chia-blockchain
|
58d2c427397dd45f36aa9e964fd6a11422b4eda4
|
[
"Apache-2.0"
] | null | null | null |
import colorama
import os
import pkg_resources
import sys
import unicodedata
from bitstring import BitArray # pyright: reportMissingImports=false
from blspy import AugSchemeMPL, G1Element, PrivateKey # pyright: reportMissingImports=false
from chia.util.hash import std_hash
from chia.util.keyring_wrapper import KeyringWrapper
from hashlib import pbkdf2_hmac
from pathlib import Path
from secrets import token_bytes
from time import sleep
from typing import Any, Dict, List, Optional, Tuple
CURRENT_KEY_VERSION = "1.8"
DEFAULT_USER = f"user-chia-{CURRENT_KEY_VERSION}" # e.g. user-chia-1.8
DEFAULT_SERVICE = f"chia-{DEFAULT_USER}" # e.g. chia-user-chia-1.8
DEFAULT_PASSPHRASE_PROMPT = (
colorama.Fore.YELLOW + colorama.Style.BRIGHT + "(Unlock Keyring)" + colorama.Style.RESET_ALL + " Passphrase: "
) # noqa: E501
FAILED_ATTEMPT_DELAY = 0.5
MAX_KEYS = 100
MAX_RETRIES = 3
MIN_PASSPHRASE_LEN = 8
class KeyringIsLocked(Exception):
pass
class KeyringRequiresMigration(Exception):
pass
class KeyringCurrentPassphraseIsInvalid(Exception):
pass
class KeyringMaxUnlockAttempts(Exception):
pass
def supports_keyring_passphrase() -> bool:
# TODO: Enable once all platforms are supported and GUI work is finalized (including migration)
return False or os.environ.get("CHIA_PASSPHRASE_SUPPORT", "").lower() in ["1", "true"]
# from sys import platform
# return platform == "linux"
def supports_os_passphrase_storage() -> bool:
return sys.platform in ["darwin", "win32", "cygwin"]
def passphrase_requirements() -> Dict[str, Any]:
"""
Returns a dictionary specifying current passphrase requirements
"""
if not supports_keyring_passphrase:
return {}
return {"is_optional": True, "min_length": MIN_PASSPHRASE_LEN} # lgtm [py/clear-text-logging-sensitive-data]
def set_keys_root_path(keys_root_path: Path) -> None:
"""
Used to set the keys_root_path prior to instantiating the KeyringWrapper shared instance.
"""
KeyringWrapper.set_keys_root_path(keys_root_path)
def obtain_current_passphrase(prompt: str = DEFAULT_PASSPHRASE_PROMPT, use_passphrase_cache: bool = False) -> str:
"""
Obtains the master passphrase for the keyring, optionally using the cached
value (if previously set). If the passphrase isn't already cached, the user is
prompted interactively to enter their passphrase a max of MAX_RETRIES times
before failing.
"""
from chia.cmds.passphrase_funcs import prompt_for_passphrase
if use_passphrase_cache:
passphrase, validated = KeyringWrapper.get_shared_instance().get_cached_master_passphrase()
if passphrase:
# If the cached passphrase was previously validated, we assume it's... valid
if validated:
return passphrase
# Cached passphrase needs to be validated
if KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase):
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase, validated=True)
return passphrase
else:
# Cached passphrase is bad, clear the cache
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(None)
# Prompt interactively with up to MAX_RETRIES attempts
for i in range(MAX_RETRIES):
colorama.init()
passphrase = prompt_for_passphrase(prompt)
if KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase):
# If using the passphrase cache, and the user inputted a passphrase, update the cache
if use_passphrase_cache:
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase, validated=True)
return passphrase
sleep(FAILED_ATTEMPT_DELAY)
print("Incorrect passphrase\n")
raise KeyringMaxUnlockAttempts("maximum passphrase attempts reached")
def unlocks_keyring(use_passphrase_cache=False):
"""
Decorator used to unlock the keyring interactively, if necessary
"""
def inner(func):
def wrapper(*args, **kwargs):
try:
if KeyringWrapper.get_shared_instance().has_master_passphrase():
obtain_current_passphrase(use_passphrase_cache=use_passphrase_cache)
except Exception as e:
print(f"Unable to unlock the keyring: {e}")
sys.exit(1)
return func(*args, **kwargs)
return wrapper
return inner
def bip39_word_list() -> str:
return pkg_resources.resource_string(__name__, "english.txt").decode()
def generate_mnemonic() -> str:
mnemonic_bytes = token_bytes(32)
mnemonic = bytes_to_mnemonic(mnemonic_bytes)
return mnemonic
def bytes_to_mnemonic(mnemonic_bytes: bytes) -> str:
if len(mnemonic_bytes) not in [16, 20, 24, 28, 32]:
raise ValueError(
f"Data length should be one of the following: [16, 20, 24, 28, 32], but it is {len(mnemonic_bytes)}."
)
word_list = bip39_word_list().splitlines()
CS = len(mnemonic_bytes) // 4
checksum = BitArray(bytes(std_hash(mnemonic_bytes)))[:CS]
bitarray = BitArray(mnemonic_bytes) + checksum
mnemonics = []
assert len(bitarray) % 11 == 0
for i in range(0, len(bitarray) // 11):
start = i * 11
end = start + 11
bits = bitarray[start:end]
m_word_position = bits.uint
m_word = word_list[m_word_position]
mnemonics.append(m_word)
return " ".join(mnemonics)
def bytes_from_mnemonic(mnemonic_str: str) -> bytes:
mnemonic: List[str] = mnemonic_str.split(" ")
if len(mnemonic) not in [12, 15, 18, 21, 24]:
raise ValueError("Invalid mnemonic length")
word_list = {word: i for i, word in enumerate(bip39_word_list().splitlines())}
bit_array = BitArray()
for i in range(0, len(mnemonic)):
word = mnemonic[i]
if word not in word_list:
raise ValueError(f"'{word}' is not in the mnemonic dictionary; may be misspelled")
value = word_list[word]
bit_array.append(BitArray(uint=value, length=11))
CS: int = len(mnemonic) // 3
ENT: int = len(mnemonic) * 11 - CS
assert len(bit_array) == len(mnemonic) * 11
assert ENT % 32 == 0
entropy_bytes = bit_array[:ENT].bytes
checksum_bytes = bit_array[ENT:]
checksum = BitArray(std_hash(entropy_bytes))[:CS]
assert len(checksum_bytes) == CS
if checksum != checksum_bytes:
raise ValueError("Invalid order of mnemonic words")
return entropy_bytes
def mnemonic_to_seed(mnemonic: str, passphrase: str) -> bytes:
"""
Uses BIP39 standard to derive a seed from entropy bytes.
"""
salt_str: str = "mnemonic" + passphrase
salt = unicodedata.normalize("NFKD", salt_str).encode("utf-8")
mnemonic_normalized = unicodedata.normalize("NFKD", mnemonic).encode("utf-8")
seed = pbkdf2_hmac("sha512", mnemonic_normalized, salt, 2048)
assert len(seed) == 64
return seed
def default_keychain_user() -> str:
return DEFAULT_USER
def default_keychain_service() -> str:
return DEFAULT_SERVICE
def get_private_key_user(user: str, index: int) -> str:
"""
Returns the keychain user string for a key index.
"""
return f"wallet-{user}-{index}"
class Keychain:
"""
The keychain stores two types of keys: private keys, which are PrivateKeys from blspy,
and private key seeds, which are bytes objects that are used as a seed to construct
PrivateKeys. Private key seeds are converted to mnemonics when shown to users.
Both types of keys are stored as hex strings in the python keyring, and the implementation of
the keyring depends on OS. Both types of keys can be added, and get_private_keys returns a
list of all keys.
"""
def __init__(self, user: Optional[str] = None, service: Optional[str] = None):
self.user = user if user is not None else default_keychain_user()
self.service = service if service is not None else default_keychain_service()
self.keyring_wrapper = KeyringWrapper.get_shared_instance()
@unlocks_keyring(use_passphrase_cache=True)
def _get_pk_and_entropy(self, user: str) -> Optional[Tuple[G1Element, bytes]]:
"""
Returns the keychain contents for a specific 'user' (key index). The contents
include an G1Element and the entropy required to generate the private key.
Note that generating the actual private key also requires the passphrase.
"""
read_str = self.keyring_wrapper.get_passphrase(self.service, user)
if read_str is None or len(read_str) == 0:
return None
str_bytes = bytes.fromhex(read_str)
return (
G1Element.from_bytes(str_bytes[: G1Element.SIZE]),
str_bytes[G1Element.SIZE :], # flake8: noqa
)
def _get_free_private_key_index(self) -> int:
"""
Get the index of the first free spot in the keychain.
"""
index = 0
while True:
pk = get_private_key_user(self.user, index)
pkent = self._get_pk_and_entropy(pk)
if pkent is None:
return index
index += 1
@unlocks_keyring(use_passphrase_cache=True)
def add_private_key(self, mnemonic: str, passphrase: str) -> PrivateKey:
"""
Adds a private key to the keychain, with the given entropy and passphrase. The
keychain itself will store the public key, and the entropy bytes,
but not the passphrase.
"""
seed = mnemonic_to_seed(mnemonic, passphrase)
entropy = bytes_from_mnemonic(mnemonic)
index = self._get_free_private_key_index()
key = AugSchemeMPL.key_gen(seed)
fingerprint = key.get_g1().get_fingerprint()
if fingerprint in [pk.get_fingerprint() for pk in self.get_all_public_keys()]:
# Prevents duplicate add
return key
self.keyring_wrapper.set_passphrase(
self.service,
get_private_key_user(self.user, index),
bytes(key.get_g1()).hex() + entropy.hex(),
)
return key
def get_first_private_key(self, passphrases: List[str] = [""]) -> Optional[Tuple[PrivateKey, bytes]]:
"""
Returns the first key in the keychain that has one of the passed in passphrases.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
for pp in passphrases:
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, pp)
key = AugSchemeMPL.key_gen(seed)
if key.get_g1() == pk:
return (key, ent)
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return None
def get_private_key_by_fingerprint(
self, fingerprint: int, passphrases: List[str] = [""]
) -> Optional[Tuple[PrivateKey, bytes]]:
"""
Return first private key which have the given public key fingerprint.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
for pp in passphrases:
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, pp)
key = AugSchemeMPL.key_gen(seed)
if pk.get_fingerprint() == fingerprint:
return (key, ent)
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return None
def get_all_private_keys(self, passphrases: List[str] = [""]) -> List[Tuple[PrivateKey, bytes]]:
"""
Returns all private keys which can be retrieved, with the given passphrases.
A tuple of key, and entropy bytes (i.e. mnemonic) is returned for each key.
"""
all_keys: List[Tuple[PrivateKey, bytes]] = []
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
for pp in passphrases:
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, pp)
key = AugSchemeMPL.key_gen(seed)
if key.get_g1() == pk:
all_keys.append((key, ent))
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return all_keys
def get_all_public_keys(self) -> List[G1Element]:
"""
Returns all public keys.
"""
all_keys: List[Tuple[G1Element, bytes]] = []
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
all_keys.append(pk)
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return all_keys
def get_first_public_key(self) -> Optional[G1Element]:
"""
Returns the first public key.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
return pk
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return None
def delete_key_by_fingerprint(self, fingerprint: int):
"""
Deletes all keys which have the given public key fingerprint.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
if pk.get_fingerprint() == fingerprint:
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
def delete_all_keys(self):
"""
Deletes all keys from the keychain.
"""
index = 0
delete_exception = False
pkent = None
while True:
try:
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
except Exception:
# Some platforms might throw on no existing key
delete_exception = True
# Stop when there are no more keys to delete
if (pkent is None or delete_exception) and index > MAX_KEYS:
break
index += 1
index = 0
delete_exception = True
pkent = None
while True:
try:
pkent = self._get_pk_and_entropy(
get_private_key_user(self.user, index)
) # changed from _get_fingerprint_and_entropy to _get_pk_and_entropy - GH
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
except Exception:
# Some platforms might throw on no existing key
delete_exception = True
# Stop when there are no more keys to delete
if (pkent is None or delete_exception) and index > MAX_KEYS:
break
index += 1
@staticmethod
def is_keyring_locked() -> bool:
"""
Returns whether the keyring is in a locked state. If the keyring doesn't have a master passphrase set,
or if a master passphrase is set and the cached passphrase is valid, the keyring is "unlocked"
"""
# Unlocked: If a master passphrase isn't set, or if the cached passphrase is valid
if not Keychain.has_master_passphrase() or (
Keychain.has_cached_passphrase()
and Keychain.master_passphrase_is_valid(Keychain.get_cached_master_passphrase())
):
return False
# Locked: Everything else
return True
@staticmethod
def needs_migration() -> bool:
"""
Returns a bool indicating whether the underlying keyring needs to be migrated to the new
format for passphrase support.
"""
return KeyringWrapper.get_shared_instance().using_legacy_keyring()
@staticmethod
def handle_migration_completed():
"""
When migration completes outside of the current process, we rely on a notification to inform
the current process that it needs to reset/refresh its keyring. This allows us to stop using
the legacy keyring in an already-running daemon if migration is completed using the CLI.
"""
KeyringWrapper.get_shared_instance().refresh_keyrings()
@staticmethod
def migrate_legacy_keyring(passphrase: Optional[str] = None, cleanup_legacy_keyring: bool = False) -> None:
"""
Begins legacy keyring migration in a non-interactive manner
"""
if passphrase is not None and passphrase != "":
KeyringWrapper.get_shared_instance().set_master_passphrase(
current_passphrase=None, new_passphrase=passphrase, write_to_keyring=False, allow_migration=False
)
KeyringWrapper.get_shared_instance().migrate_legacy_keyring(cleanup_legacy_keyring=cleanup_legacy_keyring)
@staticmethod
def passphrase_is_optional() -> bool:
"""
Returns whether a user-supplied passphrase is optional, as specified by the passphrase requirements.
"""
return passphrase_requirements().get("is_optional", False)
@staticmethod
def minimum_passphrase_length() -> int:
"""
Returns the minimum passphrase length, as specified by the passphrase requirements.
"""
return passphrase_requirements().get("min_length", 0)
@staticmethod
def passphrase_meets_requirements(passphrase: Optional[str]) -> bool:
"""
Returns whether the provided passphrase satisfies the passphrase requirements.
"""
# Passphrase is not required and None was provided
if (passphrase is None or passphrase == "") and Keychain.passphrase_is_optional():
return True
# Passphrase meets the minimum length requirement
if passphrase is not None and len(passphrase) >= Keychain.minimum_passphrase_length():
return True
return False
@staticmethod
def has_master_passphrase() -> bool:
"""
Returns a bool indicating whether the underlying keyring data
is secured by a passphrase.
"""
return KeyringWrapper.get_shared_instance().has_master_passphrase()
@staticmethod
def master_passphrase_is_valid(passphrase: str, force_reload: bool = False) -> bool:
"""
Checks whether the provided passphrase can unlock the keyring. If force_reload
is true, the keyring payload will be re-read from the backing file. If false,
the passphrase will be checked against the in-memory payload.
"""
return KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase, force_reload=force_reload)
@staticmethod
def has_cached_passphrase() -> bool:
"""
Returns whether the master passphrase has been cached (it may need to be validated)
"""
return KeyringWrapper.get_shared_instance().has_cached_master_passphrase()
@staticmethod
def get_cached_master_passphrase() -> str:
"""
Returns the cached master passphrase
"""
passphrase, _ = KeyringWrapper.get_shared_instance().get_cached_master_passphrase()
return passphrase
@staticmethod
def set_cached_master_passphrase(passphrase: Optional[str]) -> None:
"""
Caches the provided master passphrase
"""
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase)
@staticmethod
def set_master_passphrase(
current_passphrase: Optional[str],
new_passphrase: str,
*,
allow_migration: bool = True,
save_passphrase: bool = False,
) -> None:
"""
Encrypts the keyring contents to new passphrase, provided that the current
passphrase can decrypt the contents
"""
KeyringWrapper.get_shared_instance().set_master_passphrase(
current_passphrase, new_passphrase, allow_migration=allow_migration, save_passphrase=save_passphrase
)
@staticmethod
def remove_master_passphrase(current_passphrase: Optional[str]) -> None:
"""
Removes the user-provided master passphrase, and replaces it with the default
master passphrase. The keyring contents will remain encrypted, but to the
default passphrase.
"""
KeyringWrapper.get_shared_instance().remove_master_passphrase(current_passphrase)
| 37.151463
| 117
| 0.649571
|
83ef2dbc3031bc08f0ac190551b9d5d9eb51289d
| 40,694
|
py
|
Python
|
examples/flax/language-modeling/run_t5_mlm_flax.py
|
bhavika/transformers
|
65cf33e7e53cd46313f3655f274b3f6ca0fd679d
|
[
"Apache-2.0"
] | 1
|
2022-03-16T13:02:15.000Z
|
2022-03-16T13:02:15.000Z
|
examples/flax/language-modeling/run_t5_mlm_flax.py
|
bhavika/transformers
|
65cf33e7e53cd46313f3655f274b3f6ca0fd679d
|
[
"Apache-2.0"
] | 2
|
2022-03-14T10:13:16.000Z
|
2022-03-14T11:50:27.000Z
|
examples/flax/language-modeling/run_t5_mlm_flax.py
|
bhavika/transformers
|
65cf33e7e53cd46313f3655f274b3f6ca0fd679d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pretraining the library models for T5-like span-masked language modeling on a text file or a dataset.
Here is the full list of checkpoints on the hub that can be pretrained by this script:
https://huggingface.co/models?filter=t5
"""
import json
import logging
import os
import sys
import time
from dataclasses import asdict, dataclass, field
# You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments.
from enum import Enum
from itertools import chain
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
from datasets import load_dataset
from tqdm import tqdm
import flax
import jax
import jax.numpy as jnp
import optax
from flax import jax_utils, traverse_util
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard
from huggingface_hub import Repository
from transformers import (
CONFIG_MAPPING,
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
AutoTokenizer,
BatchEncoding,
FlaxT5ForConditionalGeneration,
HfArgumentParser,
PreTrainedTokenizerBase,
T5Config,
is_tensorboard_available,
set_seed,
)
from transformers.file_utils import get_full_repo_name
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class TrainingArguments:
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
push_to_hub: bool = field(
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
)
hub_model_id: str = field(
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
)
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
def __post_init__(self):
if self.output_dir is not None:
self.output_dir = os.path.expanduser(self.output_dir)
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
the token values by removing their value.
"""
d = asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
d[k] = [x.value for x in v]
if k.endswith("_token"):
d[k] = f"<{k.upper()}>"
return d
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: Optional[str] = field(
default=None,
metadata={
"help": "The model checkpoint for weights initialization."
"Don't set if you want to train a model from scratch."
},
)
model_type: Optional[str] = field(
default=None,
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": "Floating-point format in which the model weights should be initialized and trained. Choose one of `[float32, float16, bfloat16]`."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
train_ref_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input train ref data file for whole word masking in Chinese."},
)
validation_ref_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
validation_split_percentage: Optional[int] = field(
default=5,
metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total input sequence length after tokenization and masking. Sequences longer than this will be truncated. Default to the max input length of the model."
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
mlm_probability: float = field(
default=0.15, metadata={"help": "Ratio of tokens to mask for span masked language modeling loss"}
)
mean_noise_span_length: float = field(
default=3.0,
metadata={"help": "Mean span length of masked tokens"},
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def compute_input_and_target_lengths(inputs_length, noise_density, mean_noise_span_length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2466>`__ .
Training parameters to avoid padding with random_spans_noise_mask.
When training a model with random_spans_noise_mask, we would like to set the other
training hyperparmeters in a way that avoids padding.
This function helps us compute these hyperparameters.
We assume that each noise span in the input is replaced by extra_tokens_per_span_inputs sentinel tokens,
and each non-noise span in the targets is replaced by extra_tokens_per_span_targets sentinel tokens.
This function tells us the required number of tokens in the raw example (for split_tokens())
as well as the length of the encoded targets. Note that this function assumes
the inputs and targets will have EOS appended and includes that in the reported length.
Args:
inputs_length: an integer - desired length of the tokenized inputs sequence
noise_density: a float
mean_noise_span_length: a float
Returns:
tokens_length: length of original text in tokens
targets_length: an integer - length in tokens of encoded targets sequence
"""
def _tokens_length_to_inputs_length_targets_length(tokens_length):
num_noise_tokens = int(round(tokens_length * noise_density))
num_nonnoise_tokens = tokens_length - num_noise_tokens
num_noise_spans = int(round(num_noise_tokens / mean_noise_span_length))
# inputs contain all nonnoise tokens, sentinels for all noise spans
# and one EOS token.
_input_length = num_nonnoise_tokens + num_noise_spans + 1
_output_length = num_noise_tokens + num_noise_spans + 1
return _input_length, _output_length
tokens_length = inputs_length
while _tokens_length_to_inputs_length_targets_length(tokens_length + 1)[0] <= inputs_length:
tokens_length += 1
inputs_length, targets_length = _tokens_length_to_inputs_length_targets_length(tokens_length)
# minor hack to get the targets length to be equal to inputs length
# which is more likely to have been set to a nice round number.
if noise_density == 0.5 and targets_length > inputs_length:
tokens_length -= 1
targets_length -= 1
return tokens_length, targets_length
@flax.struct.dataclass
class FlaxDataCollatorForT5MLM:
"""
Data collator used for T5 span-masked language modeling.
It is made sure that after masking the inputs are of length `data_args.max_seq_length` and targets are also of fixed length.
For more information on how T5 span-masked language modeling works, one can take a look
at the `official paper <https://arxiv.org/pdf/1910.10683.pdf>`__
or the `official code for preprocessing <https://github.com/google-research/text-to-text-transfer-transformer/blob/master/t5/data/preprocessors.py>`__ .
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
noise_density (:obj:`float`):
The probability with which to (randomly) mask tokens in the input.
mean_noise_span_length (:obj:`float`):
The average span length of the masked tokens.
input_length (:obj:`int`):
The expected input length after masking.
target_length (:obj:`int`):
The expected target length after masking.
pad_token_id: (:obj:`int`):
The pad token id of the model
decoder_start_token_id: (:obj:`int):
The decoder start token id of the model
"""
tokenizer: PreTrainedTokenizerBase
noise_density: float
mean_noise_span_length: float
input_length: int
target_length: int
pad_token_id: int
decoder_start_token_id: int
def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]:
# convert list to dict and tensorize input
batch = BatchEncoding(
{k: np.array([examples[i][k] for i in range(len(examples))]) for k, v in examples[0].items()}
)
input_ids = batch["input_ids"]
batch_size, expandend_input_length = input_ids.shape
mask_indices = np.asarray([self.random_spans_noise_mask(expandend_input_length) for i in range(batch_size)])
labels_mask = ~mask_indices
input_ids_sentinel = self.create_sentinel_ids(mask_indices.astype(np.int8))
labels_sentinel = self.create_sentinel_ids(labels_mask.astype(np.int8))
batch["input_ids"] = self.filter_input_ids(input_ids, input_ids_sentinel)
batch["labels"] = self.filter_input_ids(input_ids, labels_sentinel)
if batch["input_ids"].shape[-1] != self.input_length:
raise ValueError(
f"`input_ids` are incorrectly preprocessed. `input_ids` length is {batch['input_ids'].shape[-1]}, but should be {self.target_length}."
)
if batch["labels"].shape[-1] != self.target_length:
raise ValueError(
f"`labels` are incorrectly preprocessed. `labels` length is {batch['labels'].shape[-1]}, but should be {self.target_length}."
)
# to check that tokens are correctly proprocessed, one can run `self.tokenizer.batch_decode(input_ids)` and `self.tokenizer.batch_decode(labels)` here...
batch["decoder_input_ids"] = shift_tokens_right(
batch["labels"], self.pad_token_id, self.decoder_start_token_id
)
return batch
def create_sentinel_ids(self, mask_indices):
"""
Sentinel ids creation given the indices that should be masked.
The start indices of each mask are replaced by the sentinel ids in increasing
order. Consecutive mask indices to be deleted are replaced with `-1`.
"""
start_indices = mask_indices - np.roll(mask_indices, 1, axis=-1) * mask_indices
start_indices[:, 0] = mask_indices[:, 0]
sentinel_ids = np.where(start_indices != 0, np.cumsum(start_indices, axis=-1), start_indices)
sentinel_ids = np.where(sentinel_ids != 0, (len(self.tokenizer) - sentinel_ids), 0)
sentinel_ids -= mask_indices - start_indices
return sentinel_ids
def filter_input_ids(self, input_ids, sentinel_ids):
"""
Puts sentinel mask on `input_ids` and fuse consecutive mask tokens into a single mask token by deleting.
This will reduce the sequence length from `expanded_inputs_length` to `input_length`.
"""
batch_size = input_ids.shape[0]
input_ids_full = np.where(sentinel_ids != 0, sentinel_ids, input_ids)
# input_ids tokens and sentinel tokens are >= 0, tokens < 0 are
# masked tokens coming after sentinel tokens and should be removed
input_ids = input_ids_full[input_ids_full >= 0].reshape((batch_size, -1))
input_ids = np.concatenate(
[input_ids, np.full((batch_size, 1), self.tokenizer.eos_token_id, dtype=np.int32)], axis=-1
)
return input_ids
def random_spans_noise_mask(self, length):
"""This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2682>`__ .
Noise mask consisting of random spans of noise tokens.
The number of noise tokens and the number of noise spans and non-noise spans
are determined deterministically as follows:
num_noise_tokens = round(length * noise_density)
num_nonnoise_spans = num_noise_spans = round(num_noise_tokens / mean_noise_span_length)
Spans alternate between non-noise and noise, beginning with non-noise.
Subject to the above restrictions, all masks are equally likely.
Args:
length: an int32 scalar (length of the incoming token sequence)
noise_density: a float - approximate density of output mask
mean_noise_span_length: a number
Returns:
a boolean tensor with shape [length]
"""
orig_length = length
num_noise_tokens = int(np.round(length * self.noise_density))
# avoid degeneracy by ensuring positive numbers of noise and nonnoise tokens.
num_noise_tokens = min(max(num_noise_tokens, 1), length - 1)
num_noise_spans = int(np.round(num_noise_tokens / self.mean_noise_span_length))
# avoid degeneracy by ensuring positive number of noise spans
num_noise_spans = max(num_noise_spans, 1)
num_nonnoise_tokens = length - num_noise_tokens
# pick the lengths of the noise spans and the non-noise spans
def _random_segmentation(num_items, num_segments):
"""Partition a sequence of items randomly into non-empty segments.
Args:
num_items: an integer scalar > 0
num_segments: an integer scalar in [1, num_items]
Returns:
a Tensor with shape [num_segments] containing positive integers that add
up to num_items
"""
mask_indices = np.arange(num_items - 1) < (num_segments - 1)
np.random.shuffle(mask_indices)
first_in_segment = np.pad(mask_indices, [[1, 0]])
segment_id = np.cumsum(first_in_segment)
# count length of sub segments assuming that list is sorted
_, segment_length = np.unique(segment_id, return_counts=True)
return segment_length
noise_span_lengths = _random_segmentation(num_noise_tokens, num_noise_spans)
nonnoise_span_lengths = _random_segmentation(num_nonnoise_tokens, num_noise_spans)
interleaved_span_lengths = np.reshape(
np.stack([nonnoise_span_lengths, noise_span_lengths], axis=1), [num_noise_spans * 2]
)
span_starts = np.cumsum(interleaved_span_lengths)[:-1]
span_start_indicator = np.zeros((length,), dtype=np.int8)
span_start_indicator[span_starts] = True
span_num = np.cumsum(span_start_indicator)
is_noise = np.equal(span_num % 2, 1)
return is_noise[:orig_length]
def generate_batch_splits(samples_idx: jnp.ndarray, batch_size: int) -> jnp.ndarray:
num_samples = len(samples_idx)
samples_to_remove = num_samples % batch_size
if samples_to_remove != 0:
samples_idx = samples_idx[:-samples_to_remove]
sections_split = num_samples // batch_size
batch_idx = np.split(samples_idx, sections_split)
return batch_idx
def write_train_metric(summary_writer, train_metrics, train_time, step):
summary_writer.scalar("train_time", train_time, step)
train_metrics = get_metrics(train_metrics)
for key, vals in train_metrics.items():
tag = f"train_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
def write_eval_metric(summary_writer, eval_metrics, step):
for metric_name, value in eval_metrics.items():
summary_writer.scalar(f"eval_{metric_name}", value, step)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
level=logging.INFO,
datefmt="[%X]",
)
# Log on each process the small summary:
logger = logging.getLogger(__name__)
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Handle the repository creation
if training_args.push_to_hub:
if training_args.hub_model_id is None:
repo_name = get_full_repo_name(
Path(training_args.output_dir).absolute().name, token=training_args.hub_token
)
else:
repo_name = training_args.hub_model_id
repo = Repository(training_args.output_dir, clone_from=repo_name)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
datasets["train"] = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{data_args.validation_split_percentage}%]",
cache_dir=model_args.cache_dir,
)
datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{data_args.validation_split_percentage}%:]",
cache_dir=model_args.cache_dir,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
if model_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
elif model_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if model_args.config_name:
config = T5Config.from_pretrained(
model_args.config_name, cache_dir=model_args.cache_dir, vocab_size=len(tokenizer)
)
elif model_args.model_name_or_path:
config = T5Config.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
column_names = datasets["train"].column_names
else:
column_names = datasets["validation"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts.
# Since we make sure that all sequences are of the same length, no attention_mask is needed.
def tokenize_function(examples):
return tokenizer(examples[text_column_name], return_attention_mask=False)
tokenized_datasets = datasets.map(
tokenize_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
)
# T5-like span masked language modeling will fuse consecutively masked tokens to a single sentinel token.
# To ensure that the input length is `max_seq_length`, we need to increase the maximum length
# according to `mlm_probability` and `mean_noise_span_length`. We can also define the label length accordingly.
expanded_inputs_length, targets_length = compute_input_and_target_lengths(
inputs_length=max_seq_length,
noise_density=data_args.mlm_probability,
mean_noise_span_length=data_args.mean_noise_span_length,
)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of expanded_inputs_length.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= expanded_inputs_length:
total_length = (total_length // expanded_inputs_length) * expanded_inputs_length
# Split by chunks of max_len.
result = {
k: [t[i : i + expanded_inputs_length] for i in range(0, total_length, expanded_inputs_length)]
for k, t in concatenated_examples.items()
}
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
# might be slower to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
tokenized_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Enable tensorboard only on the master node
has_tensorboard = is_tensorboard_available()
if has_tensorboard and jax.process_index() == 0:
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
except ImportError as ie:
has_tensorboard = False
logger.warning(
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
)
else:
logger.warning(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
dropout_rngs = jax.random.split(rng, jax.local_device_count())
if model_args.model_name_or_path:
model = FlaxT5ForConditionalGeneration.from_pretrained(
model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)
)
else:
config.vocab_size = len(tokenizer)
model = FlaxT5ForConditionalGeneration(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype))
# Data collator
# This one will take care of randomly masking the tokens.
data_collator = FlaxDataCollatorForT5MLM(
tokenizer=tokenizer,
noise_density=data_args.mlm_probability,
mean_noise_span_length=data_args.mean_noise_span_length,
input_length=max_seq_length,
target_length=targets_length,
pad_token_id=model.config.pad_token_id,
decoder_start_token_id=model.config.decoder_start_token_id,
)
# Store some constant
num_epochs = int(training_args.num_train_epochs)
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
# Create learning rate schedule
warmup_fn = optax.linear_schedule(
init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
)
decay_fn = optax.linear_schedule(
init_value=training_args.learning_rate,
end_value=0,
transition_steps=num_train_steps - training_args.warmup_steps,
)
linear_decay_lr_schedule_fn = optax.join_schedules(
schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
flat_mask = {
path: (path[-1] != "bias" and path[-2:] not in [("layer_norm", "scale"), ("final_layer_norm", "scale")])
for path in flat_params
}
return traverse_util.unflatten_dict(flat_mask)
# create adam optimizer
if training_args.adafactor:
# We use the default parameters here to initialize adafactor,
# For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
optimizer = optax.adafactor(
learning_rate=linear_decay_lr_schedule_fn,
)
else:
optimizer = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
# Setup train state
state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer)
# Define gradient update step fn
def train_step(state, batch, dropout_rng):
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
def loss_fn(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
# compute loss
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])).mean()
return loss
grad_fn = jax.value_and_grad(loss_fn)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
metrics = jax.lax.pmean(
{"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
)
return new_state, metrics, new_dropout_rng
# Create parallel version of the train step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
# Define eval fn
def eval_step(params, batch):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
# compute loss
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1]))
# compute accuracy
accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels)
# summarize metrics
metrics = {"loss": loss.mean(), "accuracy": accuracy.mean()}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return metrics
p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
# Replicate the train state on each device
state = jax_utils.replicate(state)
train_time = 0
epochs = tqdm(range(num_epochs), desc="Epoch ... ", position=0)
for epoch in epochs:
# ======================== Training ================================
train_start = time.time()
train_metrics = []
# Create sampling rng
rng, input_rng = jax.random.split(rng)
# Generate an epoch by shuffling sampling indices from the train dataset
num_train_samples = len(tokenized_datasets["train"])
train_samples_idx = np.random.permutation(np.arange(num_train_samples))
train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
# Gather the indexes for creating the batch and do a training step
for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)):
samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
model_inputs = data_collator(samples)
# Model forward
model_inputs = shard(model_inputs.data)
state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
train_metrics.append(train_metric)
cur_step = epoch * (num_train_samples // train_batch_size) + step
if cur_step % training_args.logging_steps == 0 and cur_step > 0:
# Save metrics
train_metric = jax_utils.unreplicate(train_metric)
train_time += time.time() - train_start
if has_tensorboard and jax.process_index() == 0:
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
epochs.write(
f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate: {train_metric['learning_rate'].mean()})"
)
train_metrics = []
if cur_step % training_args.eval_steps == 0 and cur_step > 0:
# ======================== Evaluating ==============================
num_eval_samples = len(tokenized_datasets["validation"])
eval_samples_idx = jnp.arange(num_eval_samples)
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
eval_metrics = []
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
model_inputs = data_collator(samples)
# Model forward
model_inputs = shard(model_inputs.data)
metrics = p_eval_step(state.params, model_inputs)
eval_metrics.append(metrics)
# get eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(jnp.mean, eval_metrics)
# Update progress bar
epochs.write(f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})")
# Save metrics
if has_tensorboard and jax.process_index() == 0:
write_eval_metric(summary_writer, eval_metrics, cur_step)
if cur_step % training_args.save_steps == 0 and cur_step > 0:
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
model.save_pretrained(training_args.output_dir, params=params)
tokenizer.save_pretrained(training_args.output_dir)
if training_args.push_to_hub:
repo.push_to_hub(commit_message=f"Saving weights and logs of step {cur_step}", blocking=False)
# Eval after training
if training_args.do_eval:
num_eval_samples = len(tokenized_datasets["validation"])
eval_samples_idx = jnp.arange(num_eval_samples)
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
eval_metrics = []
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
model_inputs = data_collator(samples)
# Model forward
model_inputs = shard(model_inputs.data)
metrics = p_eval_step(state.params, model_inputs)
eval_metrics.append(metrics)
# get eval metrics
eval_metrics = get_metrics(eval_metrics)
eval_metrics = jax.tree_map(lambda metric: jnp.mean(metric).item(), eval_metrics)
if jax.process_index() == 0:
eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()}
path = os.path.join(training_args.output_dir, "eval_results.json")
with open(path, "w") as f:
json.dump(eval_metrics, f, indent=4, sort_keys=True)
if __name__ == "__main__":
main()
| 44.916115
| 209
| 0.675063
|
427c9369ab053d0e3043bf36625be835def10bff
| 965
|
py
|
Python
|
Task2F.py
|
otss2/1CW-Computing-Project
|
6e19d04ec0ca1f8bcaf047424b74a9bcde96f85b
|
[
"MIT"
] | null | null | null |
Task2F.py
|
otss2/1CW-Computing-Project
|
6e19d04ec0ca1f8bcaf047424b74a9bcde96f85b
|
[
"MIT"
] | null | null | null |
Task2F.py
|
otss2/1CW-Computing-Project
|
6e19d04ec0ca1f8bcaf047424b74a9bcde96f85b
|
[
"MIT"
] | null | null | null |
from floodsystem import plot
from floodsystem.plot import plot_water_level_with_fit, plot_water_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level
from floodsystem.datafetcher import fetch_measure_levels
import matplotlib.pyplot as plt
import datetime
from datetime import timedelta
from floodsystem.analysis import *
def run():
"""Requirements for Task 2F"""
# Build list of stations to plot
stations = build_station_list()
update_water_levels(stations)
N = 5
stations_to_plot = stations_highest_rel_level(stations, N)
dt = timedelta(days=2)
for station in stations_to_plot:
dates, levels = fetch_measure_levels(station.measure_id, dt)
if levels:
plot_water_level_with_fit(station, dates, levels, 4)
if __name__ == "__main__":
print("*** Task 2F: CUED Part IA Flood Warning System ***")
run()
| 27.571429
| 75
| 0.752332
|
f7dcd4b15929f7e4d59cb222f357b0b502573463
| 717
|
py
|
Python
|
setup.py
|
ryneches/Shand
|
ec7f8c914d7099b96e755c75b5313cbae1cab3c5
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
ryneches/Shand
|
ec7f8c914d7099b96e755c75b5313cbae1cab3c5
|
[
"BSD-3-Clause"
] | 2
|
2016-02-17T03:16:00.000Z
|
2016-02-17T03:16:42.000Z
|
setup.py
|
ryneches/Shand
|
ec7f8c914d7099b96e755c75b5313cbae1cab3c5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
setup(
name='shand',
version='0.1',
description='A pipeline for investigating cospeciation in microbiomes',
scripts=['scripts/shand'],
url='http://github.com/ryneches/Shand',
author='Russell Neches',
author_email='ryneches@ucdavis.edu',
license='BSD',
packages=['shand'],
install_requires=[
'pandas',
'screed',
'hat_trie',
'scikit-bio',
'pyprind',
'psutil',
'cython'
],
zip_safe=False,
ext_modules = cythonize( 'shand/quicktree.pyx' ),
test_suite = 'nose.collector'
)
| 24.724138
| 75
| 0.633194
|
5f9287402641495d5edad30daa2d68e821d8f7c7
| 21,472
|
py
|
Python
|
pymatgen/analysis/tests/test_reaction_calculator.py
|
naik-aakash/pymatgen
|
394e0d71bf1d1025fcf75498cbb16aa3f41ce78c
|
[
"MIT"
] | 1
|
2022-03-24T04:12:16.000Z
|
2022-03-24T04:12:16.000Z
|
pymatgen/analysis/tests/test_reaction_calculator.py
|
naik-aakash/pymatgen
|
394e0d71bf1d1025fcf75498cbb16aa3f41ce78c
|
[
"MIT"
] | null | null | null |
pymatgen/analysis/tests/test_reaction_calculator.py
|
naik-aakash/pymatgen
|
394e0d71bf1d1025fcf75498cbb16aa3f41ce78c
|
[
"MIT"
] | null | null | null |
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import warnings
from collections import defaultdict
from math import isnan
import numpy as np
from pymatgen.analysis.reaction_calculator import (
BalancedReaction,
ComputedReaction,
Reaction,
ReactionError,
)
from pymatgen.core.composition import Composition
from pymatgen.entries.computed_entries import ComputedEntry
class ReactionTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
reactants = [Composition("Fe"), Composition("O2")]
products = [Composition("Fe2O3")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "2 Fe + 1.5 O2 -> Fe2O3")
self.assertEqual(rxn.normalized_repr, "4 Fe + 3 O2 -> 2 Fe2O3")
d = rxn.as_dict()
rxn = Reaction.from_dict(d)
repr, factor = rxn.normalized_repr_and_factor()
self.assertEqual(repr, "4 Fe + 3 O2 -> 2 Fe2O3")
self.assertAlmostEqual(factor, 2)
reactants = [Composition("FePO4"), Composition("Mn")]
products = [Composition("FePO4"), Composition("Xe")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "FePO4 -> FePO4")
products = [Composition("Ti2 O4"), Composition("O1")]
reactants = [Composition("Ti1 O2")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "2 TiO2 -> 2 TiO2")
reactants = [Composition("FePO4"), Composition("Li")]
products = [Composition("LiFePO4")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "FePO4 + Li -> LiFePO4")
reactants = [Composition("MgO")]
products = [Composition("MgO")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "MgO -> MgO")
reactants = [Composition("Mg")]
products = [Composition("Mg")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "Mg -> Mg")
reactants = [Composition("FePO4"), Composition("LiPO3")]
products = [Composition("LiFeP2O7")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "FePO4 + LiPO3 -> LiFeP2O7")
reactants = [Composition("Na"), Composition("K2O")]
products = [Composition("Na2O"), Composition("K")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "2 Na + K2O -> Na2O + 2 K")
# Test for an old bug which has a problem when excess product is
# defined.
products = [Composition("FePO4"), Composition("O")]
reactants = [Composition("FePO4")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "FePO4 -> FePO4")
products = list(map(Composition, ["LiCrO2", "La8Ti8O12", "O2"]))
reactants = [Composition("LiLa3Ti3CrO12")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "LiLa3Ti3CrO12 -> LiCrO2 + 1.5 La2Ti2O3 + 2.75 O2")
def test_rank(self):
reactants = [Composition("La2Zr2O7"), Composition("LiCoO2")]
products = [
Composition("La2O3"),
Composition("Co2O3"),
Composition("Li2ZrO3"),
Composition("Li2O"),
]
self.assertEqual(
str(Reaction(reactants, products)),
"La2Zr2O7 + 6 LiCoO2 -> La2O3 + 3 Co2O3 + 2 Li2ZrO3 + Li2O",
)
reactants = [Composition("La2O3"), Composition("Co2O3"), Composition("Li2ZrO3")]
products = [
Composition("Li2O"),
Composition("La2Zr2O7"),
Composition("Li3CoO3"),
]
self.assertEqual(
str(Reaction(reactants, products)),
"La2O3 + 0.3333 Co2O3 + 2 Li2ZrO3 -> Li2O + La2Zr2O7 + 0.6667 Li3CoO3",
)
reactants = [Composition("La2O3"), Composition("Co2O3"), Composition("Li2ZrO3")]
products = [
Composition("Xe"),
Composition("Li2O"),
Composition("La2Zr2O7"),
Composition("Li3CoO3"),
]
self.assertEqual(
str(Reaction(reactants, products)),
"La2O3 + 0.3333 Co2O3 + 2 Li2ZrO3 -> Li2O + La2Zr2O7 + 0.6667 Li3CoO3",
)
reactants = [Composition("La2O3"), Composition("Co2O3"), Composition("Li2ZrO3")]
products = [
Composition("Xe"),
Composition("Li2O"),
Composition("La2Zr2O7"),
Composition("Li3CoO3"),
Composition("XeNe"),
]
self.assertEqual(
str(Reaction(reactants, products)),
"La2O3 + 0.3333 Co2O3 + 2 Li2ZrO3 -> Li2O + La2Zr2O7 + 0.6667 Li3CoO3",
)
reactants = [Composition("LiCoO2")]
products = [
Composition("La2O3"),
Composition("Co2O3"),
Composition("Li2O1"),
Composition("Li1F1"),
Composition("Co1F3"),
]
self.assertEqual(
str(Reaction(reactants, products)),
"1.667 LiCoO2 + 0.3333 CoF3 -> Co2O3 + 0.3333 Li2O + LiF",
)
# this test can fail because of numerical rank calculation issues
reactants = [Composition("LiCoO2"), Composition("Li2O1")]
products = [Composition("ZrF4"), Composition("Co2O3")]
self.assertEqual(str(Reaction(reactants, products)), "2 LiCoO2 -> Li2O + Co2O3")
def test_singular_case(self):
rxn = Reaction(
[Composition("XeMn"), Composition("Li")],
[Composition("S"), Composition("LiS2"), Composition("FeCl")],
)
self.assertEqual(str(rxn), "Li + 2 S -> LiS2")
def test_overdetermined(self):
self.assertRaises(ReactionError, Reaction, [Composition("Li")], [Composition("LiO2")])
def test_scientific_notation(self):
products = [Composition("FePO3.9999"), Composition("O2")]
reactants = [Composition("FePO4")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "FePO4 -> Fe1P1O3.9999 + 5e-05 O2")
self.assertEqual(rxn, Reaction.from_string(str(rxn)))
rxn2 = Reaction.from_string("FePO4 + 20 CO -> 1e1 O2 + Fe1P1O4 + 20 C")
self.assertEqual(str(rxn2), "20 CO -> 20 C + 10 O2")
def test_equals(self):
reactants = [Composition("Fe"), Composition("O2")]
products = [Composition("Fe2O3")]
rxn = Reaction(reactants, products)
reactants = [Composition("O2"), Composition("Fe")]
products = [Composition("Fe2O3")]
rxn2 = Reaction(reactants, products)
self.assertTrue(rxn == rxn2)
def test_normalize_to(self):
products = [Composition("Fe"), Composition("O2")]
reactants = [Composition("Fe2O3")]
rxn = Reaction(reactants, products)
rxn.normalize_to(Composition("Fe"), 3)
self.assertEqual(str(rxn), "1.5 Fe2O3 -> 3 Fe + 2.25 O2")
def test_calculate_energy(self):
reactants = [Composition("MgO"), Composition("Al2O3")]
products = [Composition("MgAl2O4")]
energies = {
Composition("MgO"): -0.1,
Composition("Al2O3"): -0.2,
Composition("MgAl2O4"): -0.5,
}
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "MgO + Al2O3 -> MgAl2O4")
self.assertEqual(rxn.normalized_repr, "MgO + Al2O3 -> MgAl2O4")
self.assertAlmostEqual(rxn.calculate_energy(energies), -0.2, 5)
def test_as_entry(self):
reactants = [Composition("MgO"), Composition("Al2O3")]
products = [Composition("MgAl2O4")]
energies = {
Composition("MgO"): -0.1,
Composition("Al2O3"): -0.2,
Composition("MgAl2O4"): -0.5,
}
rxn = Reaction(reactants, products)
entry = rxn.as_entry(energies)
self.assertEqual(entry.name, "MgO + Al2O3 -> MgAl2O4")
self.assertAlmostEqual(entry.energy, -0.2, 5)
products = [Composition("Fe"), Composition("O2")]
reactants = [Composition("Fe2O3")]
rxn = Reaction(reactants, products)
energies = {
Composition("Fe"): 0,
Composition("O2"): 0,
Composition("Fe2O3"): 0.5,
}
entry = rxn.as_entry(energies)
self.assertEqual(entry.composition, Composition("Fe1.0 O1.5"))
self.assertAlmostEqual(entry.energy, -0.25, 5)
def test_products_reactants(self):
reactants = [
Composition("Li3Fe2(PO4)3"),
Composition("Fe2O3"),
Composition("O2"),
]
products = [Composition("LiFePO4")]
energies = {
Composition("Li3Fe2(PO4)3"): -0.1,
Composition("Fe2O3"): -0.2,
Composition("O2"): -0.2,
Composition("LiFePO4"): -0.5,
}
rxn = Reaction(reactants, products)
self.assertIn(Composition("O2"), rxn.products, "O not in products!")
self.assertIn(Composition("Li3Fe2(PO4)3"), rxn.reactants, "Li3Fe2(PO4)4 not in reactants!")
self.assertEqual(str(rxn), "0.3333 Li3Fe2(PO4)3 + 0.1667 Fe2O3 -> 0.25 O2 + LiFePO4")
self.assertEqual(rxn.normalized_repr, "4 Li3Fe2(PO4)3 + 2 Fe2O3 -> 3 O2 + 12 LiFePO4")
self.assertAlmostEqual(rxn.calculate_energy(energies), -0.48333333, 5)
def test_to_from_dict(self):
reactants = [Composition("Fe"), Composition("O2")]
products = [Composition("Fe2O3")]
rxn = Reaction(reactants, products)
d = rxn.as_dict()
rxn = Reaction.from_dict(d)
self.assertEqual(rxn.normalized_repr, "4 Fe + 3 O2 -> 2 Fe2O3")
def test_underdetermined(self):
reactants = [Composition("Fe"), Composition("O2")]
products = [Composition("Fe"), Composition("O2")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "Fe + O2 -> Fe + O2")
reactants = [
Composition("Fe"),
Composition("O2"),
Composition("Na"),
Composition("Li"),
Composition("Cl"),
]
products = [Composition("FeO2"), Composition("NaCl"), Composition("Li2Cl2")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "Fe + O2 + Na + 2 Li + 1.5 Cl2 -> FeO2 + NaCl + 2 LiCl")
reactants = [
Composition("Fe"),
Composition("Na"),
Composition("Li2O"),
Composition("Cl"),
]
products = [
Composition("LiCl"),
Composition("Na2O"),
Composition("Xe"),
Composition("FeCl"),
Composition("Mn"),
]
rxn = Reaction(reactants, products)
# this can't normalize to 1 LiCl + 1 Na2O (not enough O), so chooses LiCl and FeCl
self.assertEqual(str(rxn), "Fe + Na + 0.5 Li2O + Cl2 -> LiCl + 0.5 Na2O + FeCl")
def test_underdetermined_reactants(self):
reactants = [Composition("Li"), Composition("Cl"), Composition("Cl")]
products = [Composition("LiCl")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "Li + 0.25 Cl2 + 0.25 Cl2 -> LiCl")
reactants = [Composition("LiMnCl3"), Composition("LiCl"), Composition("MnCl2")]
products = [Composition("Li2MnCl4")]
rxn = Reaction(reactants, products)
self.assertEqual(str(rxn), "LiMnCl3 + 3 LiCl + MnCl2 -> 2 Li2MnCl4")
class BalancedReactionTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
def tearDown(self):
warnings.simplefilter("default")
def test_init(self):
rct = {Composition("K2SO4"): 3, Composition("Na2S"): 1, Composition("Li"): 24}
prod = {Composition("KNaS"): 2, Composition("K2S"): 2, Composition("Li2O"): 12}
rxn = BalancedReaction(rct, prod)
self.assertIsNotNone(str(rxn))
# Test unbalanced exception
rct = {Composition("K2SO4"): 1, Composition("Na2S"): 1, Composition("Li"): 24}
prod = {Composition("KNaS"): 2, Composition("K2S"): 2, Composition("Li2O"): 12}
self.assertRaises(ReactionError, BalancedReaction, rct, prod)
def test_to_from_dict(self):
rct = {Composition("K2SO4"): 3, Composition("Na2S"): 1, Composition("Li"): 24}
prod = {Composition("KNaS"): 2, Composition("K2S"): 2, Composition("Li2O"): 12}
rxn = BalancedReaction(rct, prod)
d = rxn.as_dict()
new_rxn = BalancedReaction.from_dict(d)
for comp in new_rxn.all_comp:
self.assertEqual(new_rxn.get_coeff(comp), rxn.get_coeff(comp))
def test_from_string(self):
rxn = BalancedReaction({Composition("Li"): 4, Composition("O2"): 1}, {Composition("Li2O"): 2})
self.assertEqual(rxn, BalancedReaction.from_string("4 Li + O2 -> 2Li2O"))
rxn = BalancedReaction(
{Composition("Li(NiO2)3"): 1},
{
Composition("O2"): 0.5,
Composition("Li(NiO2)2"): 1,
Composition("NiO"): 1,
},
)
self.assertEqual(
rxn,
BalancedReaction.from_string("1.000 Li(NiO2)3 -> 0.500 O2 + 1.000 Li(NiO2)2 + 1.000 NiO"),
)
def test_remove_spectator_species(self):
rxn = BalancedReaction(
{Composition("Li"): 4, Composition("O2"): 1, Composition("Na"): 1},
{Composition("Li2O"): 2, Composition("Na"): 1},
)
self.assertTrue(Composition("Na") not in rxn.all_comp)
class ComputedReactionTest(unittest.TestCase):
def setUp(self):
d = [
{
"correction": 0.0,
"data": {},
"energy": -108.56492362,
"parameters": {},
"composition": {"Li": 54},
},
{
"correction": 0.0,
"data": {},
"energy": -577.94689128,
"parameters": {},
"composition": {"O": 32, "Li": 64},
},
{
"correction": 0.0,
"data": {},
"energy": -17.02844794,
"parameters": {},
"composition": {"O": 2},
},
{
"correction": 0.0,
"data": {},
"energy": -959.64693323,
"parameters": {},
"composition": {"O": 72, "Li": 72},
},
]
entries = []
for e in d:
entries.append(ComputedEntry.from_dict(e))
rcts = list(filter(lambda e: e.composition.reduced_formula in ["Li", "O2"], entries))
prods = list(filter(lambda e: e.composition.reduced_formula == "Li2O2", entries))
self.rxn = ComputedReaction(rcts, prods)
def test_calculated_reaction_energy(self):
self.assertAlmostEqual(self.rxn.calculated_reaction_energy, -5.60748821935)
def test_calculated_reaction_energy_uncertainty(self):
d = [
{
"correction": 0.0,
"data": {},
"energy": -108.56492362,
"parameters": {},
"composition": {"Li": 54},
},
{
"correction": 0.0,
"data": {},
"energy": -17.02844794,
"parameters": {},
"composition": {"O": 2},
},
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -38.76889738,
"composition": defaultdict(float, {"Li": 4.0, "O": 4.0}),
"energy_adjustments": [
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ConstantEnergyAdjustment",
"@version": "2020.6.8",
"value": -1.864,
"uncertainty": 0.0744,
"name": "MP2020 Composition Correction",
"cls": {
"@module": "pymatgen.entries.compatibility",
"@class": "MaterialsProject2020Compatibility",
"@version": "2020.6.8",
"compat_type": "Advanced",
"correct_peroxide": True,
"check_potcar_hash": False,
},
"description": "Constant energy adjustment (-1.864 eV)",
}
],
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Li_sv", "O"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Li_sv", "PBE O"],
"oxide_type": "peroxide",
},
"data": {"oxide_type": "peroxide"},
"entry_id": "mp-841",
"correction": -1.864,
},
]
entries = []
for e in d:
entries.append(ComputedEntry.from_dict(e))
rcts = list(filter(lambda e: e.composition.reduced_formula in ["Li", "O2"], entries))
prods = list(filter(lambda e: e.composition.reduced_formula == "Li2O2", entries))
rxn_with_uncertainty = ComputedReaction(rcts, prods)
self.assertAlmostEqual(rxn_with_uncertainty.calculated_reaction_energy_uncertainty, 0.5 * 0.0744)
def test_calculated_reaction_energy_uncertainty_for_no_uncertainty(self):
# test that reaction_energy_uncertainty property doesn't cause errors
# when products/reactants have no uncertainties
self.assertAlmostEqual(self.rxn.calculated_reaction_energy_uncertainty, 0)
def test_calculated_reaction_energy_uncertainty_for_nan(self):
# test that reaction_energy_uncertainty property is nan when the uncertainty
# for any product/reactant is nan
d = [
{
"correction": 0.0,
"data": {},
"energy": -108.56492362,
"parameters": {},
"composition": {"Li": 54},
},
{
"correction": 0.0,
"data": {},
"energy": -17.02844794,
"parameters": {},
"composition": {"O": 2},
},
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ComputedEntry",
"energy": -38.76889738,
"composition": defaultdict(float, {"Li": 4.0, "O": 4.0}),
"energy_adjustments": [
{
"@module": "pymatgen.entries.computed_entries",
"@class": "ConstantEnergyAdjustment",
"@version": "2020.6.8",
"value": -1.864,
"uncertainty": np.nan,
"name": "MP2020 Composition Correction",
"cls": {
"@module": "pymatgen.entries.compatibility",
"@class": "MaterialsProject2020Compatibility",
"@version": "2020.6.8",
"compat_type": "Advanced",
"correct_peroxide": True,
"check_potcar_hash": False,
},
"description": "Constant energy adjustment (-1.864 eV)",
}
],
"parameters": {
"run_type": "GGA",
"is_hubbard": False,
"pseudo_potential": {
"functional": "PBE",
"labels": ["Li_sv", "O"],
"pot_type": "paw",
},
"hubbards": {},
"potcar_symbols": ["PBE Li_sv", "PBE O"],
"oxide_type": "peroxide",
},
"data": {"oxide_type": "peroxide"},
"entry_id": "mp-841",
"correction": -1.864,
},
]
entries = []
for e in d:
entries.append(ComputedEntry.from_dict(e))
rcts = list(filter(lambda e: e.composition.reduced_formula in ["Li", "O2"], entries))
prods = list(filter(lambda e: e.composition.reduced_formula == "Li2O2", entries))
rxn_with_uncertainty = ComputedReaction(rcts, prods)
self.assertTrue(isnan(rxn_with_uncertainty.calculated_reaction_energy_uncertainty))
def test_init(self):
self.assertEqual(str(self.rxn), "2 Li + O2 -> Li2O2")
def test_to_from_dict(self):
d = self.rxn.as_dict()
new_rxn = ComputedReaction.from_dict(d)
self.assertEqual(str(new_rxn), "2 Li + O2 -> Li2O2")
def test_all_entries(self):
for c, e in zip(self.rxn.coeffs, self.rxn.all_entries):
if c > 0:
self.assertEqual(e.composition.reduced_formula, "Li2O2")
self.assertAlmostEqual(e.energy, -959.64693323)
if __name__ == "__main__":
unittest.main()
| 38.138544
| 105
| 0.529061
|
7f8e69b8777a4596adc1e66b704b0aaaef484160
| 6,995
|
py
|
Python
|
api_server/switch_api/utilities/DataInterfaces/SqlInterface.py
|
cliftbar/switch_suncode2019
|
cad8fcca50a4848ba946f39aeaa624a230af679d
|
[
"MIT"
] | null | null | null |
api_server/switch_api/utilities/DataInterfaces/SqlInterface.py
|
cliftbar/switch_suncode2019
|
cad8fcca50a4848ba946f39aeaa624a230af679d
|
[
"MIT"
] | null | null | null |
api_server/switch_api/utilities/DataInterfaces/SqlInterface.py
|
cliftbar/switch_suncode2019
|
cad8fcca50a4848ba946f39aeaa624a230af679d
|
[
"MIT"
] | 1
|
2019-08-31T01:10:10.000Z
|
2019-08-31T01:10:10.000Z
|
import logging
import time
from abc import abstractmethod
from enum import Enum
from typing import Dict, Callable, Any, List
from schema import Schema
import sqlalchemy
from sqlalchemy.engine import ResultProxy
from sqlalchemy.orm import Query
from sqlalchemy.schema import Table
from sqlalchemy.engine.base import Engine
from sqlalchemy.engine.base import Connection
from contextlib import contextmanager
from switch_api.utilities.DataInterfaces import ConnectionOptions
logger = logging.getLogger(__name__)
class SqlDialect(Enum):
postgres = "postgres"
sqlite = "sqlite"
@classmethod
def has_value(cls, value) -> bool:
return any(value == item.value for item in cls)
# TODO: Connection Factory
class SqlConnectionOptions(ConnectionOptions):
@staticmethod
def factory(sql_connection_type: SqlDialect, **kwargs) -> 'SqlConnectionOptions':
"""
Function signatures for factory method
Postgres: (dialect: SqlDialects, host: str, port: int, username: str, password: str,
database_name: str, timeout: int = None)
"""
return SqlConnectionFactories.get_factory(sql_connection_type)(**kwargs)
def __init__(self, dialect: SqlDialect, host: str, port: int, username: str, password: str, database_name: str
, timeout_s: int = None):
self.dialect: SqlDialect = dialect
self.host: str = host
self.port: int = port
self.username: str = username
self.password: str = password
self.database_name: str = database_name
self.timeout: int = timeout_s
self.connection_string: str = None
@classmethod
@abstractmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
pass
class PostgresConnectionOptions(SqlConnectionOptions):
_factory_schema: Schema = Schema(
{
'host': str,
'port': int,
'username': str,
'password': str,
'database_name': str
# 'timeout': int
},
ignore_extra_keys=True
)
def __init__(self,
dialect: SqlDialect,
host: str,
port: int,
username: str,
password: str,
database_name: str,
timeout_s: int = None) -> None:
super().__init__(dialect, host, port, username, password, database_name, timeout_s)
self.connection_string = \
f"postgresql://{self.username}:{self.password}@{self.host}:{self.port}/{self.database_name}"
@classmethod
def schema_validate_arguments(cls, schema: Schema, parameters: Dict) -> Dict:
return schema.validate(parameters)
@classmethod
def factory(cls, **kwargs) -> 'PostgresConnectionOptions':
parameters: Dict = cls.schema_validate_arguments(cls._factory_schema, kwargs)
return cls(SqlDialect.postgres, parameters['host'], parameters['port']
, parameters['username'], parameters['password'], parameters['database_name']
, parameters.get('timeout'))
class SqlConnectionFactories:
_factories: Dict[SqlDialect, Callable] = {
SqlDialect.postgres: PostgresConnectionOptions.factory
# , SqlDialects.sqlite: SqliteConnectionOptions.factory
}
@classmethod
def get_factory(cls, factory_type: SqlDialect) -> Callable:
return cls._factories[factory_type]
class SqlInterface:
"""SQL methods to tack onto SQL based librarians"""
def __init__(self, connection_options: SqlConnectionOptions) -> None:
self.connection_options = connection_options
self.sql_engine: Engine = None
self.sql_metadata: sqlalchemy.MetaData = None
def update(self, schema: str, table: str, column: str, value: Any, sql_connection: Connection) -> None:
raise NotImplementedError
def select(self, schema: str, table: str, sql_connection: Connection) -> List[Dict[str, Any]]:
sql_table: Table = self._get_table_reflection(schema, table)
return self._execute_query(sql_connection, sql_table.select())
def insert(self, schema: str, table: str, values: List[Dict[str, Any]], sql_connection: Connection) -> None:
sql_table: Table = self._get_table_reflection(schema, table)
insert_query = sql_table.insert(values=values)
self._execute_query(sql_connection, insert_query)
def setup_pre_connection(self, connection_options) -> None:
self._build_engine(connection_options)
self._metadata_reflection(self.sql_engine)
def close_connection(self, sql_connection: Connection) -> None:
if sql_connection is not None:
sql_connection.close()
@contextmanager
def managed_connection(self, connection_options: SqlConnectionOptions = None) -> Connection:
if connection_options is None:
connection_options = self.connection_options
self.setup_pre_connection(connection_options)
connection: Connection = None
try:
connection = self.sql_engine.connect()
yield connection
finally:
self.close_connection(connection)
# SQLAlchemy internal methods
def _build_engine(self, connection_options: SqlConnectionOptions) -> None:
self.sql_engine = sqlalchemy.create_engine(connection_options.connection_string)
def _metadata_reflection(self, sql_engine) -> None:
self.sql_metadata = sqlalchemy.MetaData(bind=sql_engine)
def _get_table_reflection(self, schema: str, table: str) -> Table:
return Table(table, self.sql_metadata, schema=schema, autoload=True)
def _validate_write_schema(self, table: Table, values: Dict[str, Any]) -> bool:
table_columns = list(dict(table.columns).keys())
return list(values.keys()) == table_columns
def _parse_result_proxy(self, result) -> List[Dict[str, Any]]:
return list(map(lambda x: dict(x), result))
def _execute_query(self, sql_connection: Connection, sql_query: Query) -> List[Dict[str, Any]]:
start_time: float = time.time()
return_result: List[Dict[str, Any]] = None
try:
result: ResultProxy = sql_connection.execute(sql_query)
if result.returns_rows:
return_result: List[Dict[str, Any]] = self._parse_result_proxy(result)
except Exception as e:
logger.info(f"SQL query failed: {e}")
logger.debug(f"SQL query {str(sql_query.compile())}, connection: {sql_connection.engine} failed with exception {e}")
raise e
finally:
end_time: float = time.time()
query_time: float = end_time - start_time
logger.info(f"SQL execute time: {query_time}")
logger.debug(
f"SQL execute time: {query_time}, query: {str(sql_query.compile())}, connection: {sql_connection.engine}"
)
return return_result
| 37.607527
| 128
| 0.666333
|
dd8769ff19e5369ac070df72ddf16afb206c3494
| 68,788
|
py
|
Python
|
lingvo/core/base_input_generator.py
|
Saiprasad16/lingvo
|
1862d61748f03f950c14144acc9cf21f1e7ed358
|
[
"Apache-2.0"
] | 1
|
2021-05-10T10:47:28.000Z
|
2021-05-10T10:47:28.000Z
|
lingvo/core/base_input_generator.py
|
Saiprasad16/lingvo
|
1862d61748f03f950c14144acc9cf21f1e7ed358
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/base_input_generator.py
|
Saiprasad16/lingvo
|
1862d61748f03f950c14144acc9cf21f1e7ed358
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input generators.
There are three types of batch sizes:
* Device split batch size: Defined by Params() and is the batch size
on each device/TPU core. BaseInputGenerator.params.batch_size and
BaseSequenceInputGenerator.params.bucket_batch_limit specify per-split batch
size.
* GlobalBatchSize: number of examples in a global batch.
* InfeedBatchSize: global_batch_size // num_infeed_hosts, where
num_infeed_hosts is cluster.num_tpu_hosts if using per-host infeed with TPU,
otherwise num_infeed_hosts is 1.
TODO(rpang): Deal with on packed_inputs.
"""
import inspect
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import batch_utils
from lingvo.core import cluster
from lingvo.core import cluster_factory
from lingvo.core import datasource
from lingvo.core import hyperparams
from lingvo.core import input_generator_helper as ig_helper
from lingvo.core import inspect_utils
from lingvo.core import ops
from lingvo.core import py_utils
from lingvo.core import tokenizers
from lingvo.core import tpu_embedding_layers
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import io_ops
from tensorflow.python.tpu import tpu_embedding as tpu_embedding_lib
from tensorflow.python.tpu import tpu_feed
# pylint: enable=g-direct-tensorflow-import
DEFAULT_TOKENIZER_KEY = 'default'
INPUT_DATA_STATS_SUMMARIES_COLLECTION = 'INPUT_DATA_STATS_SUMMARIES'
class BaseInputGenerator(base_layer.BaseLayer):
"""The abstract base input generator."""
@classmethod
def DefineInfeedParams(cls, p):
# TPU related infeed tuning.
# Supported use cases:
#
# Data parallelism (num_partitions=None)
# - single host (use_per_host_infeed=False, tpu_infeed_parallelism=1))
# - multi host (use_per_host_infeed=False, tpu_infeed_parallelism>1)
# - per host (use_per_host_infeed=True)
# - unsharded inputs (_InputBatch returns a single NestedMap)
# - sharded inputs (_InputBatch returns a list containing
# tpu_number_of_shards NestedMaps)
# Model parallelism (num_partitions>1 where)
# - non-partitioned infeed (use_partitioned_infeed_queue=False):
# - Only first partition gets infeed (e.g. manual partition)
# - single host (use_per_host_infeed=False)
# - per host (use_per_host_infeed=True)
# - All partitions gets data parallel infeed (e.g. MoE)
# - single host not supported
# - per host (use_per_host_infeed=True, use_per_core_infeed=True)
# num_partitions should be set to number of partitions per replica
# - partitioned infeed (use_partitioned_infeed_queue=True)
# - single host (use_per_host_infeed=False)
# - per host (use_per_host_infeed=True)
# num_partitions should be set to number of partitions per replica
# and all partitions should exist on a single host
p.Define('use_per_host_infeed', False,
'Whether run infeed op on each host.')
p.Define('use_per_core_infeed', False,
'Whether to shard the infeed per TPU core instead of per replica')
p.Define('tpu_infeed_parallelism', 1,
'Uses these many python threads to drive infeed concurrently.')
p.Define('use_partitioned_infeed_queue', False, 'Use partitioned infeed')
p.Define(
'num_partitions', None,
'Number of partitions to split the model graph into. Used with '
'model parallelism. When >1, it specifies the number of devices '
'used to place one replica of the model graph nodes.')
@classmethod
def Params(cls):
"""Defaults params for input generators."""
p = super().Params()
p.name = 'input'
p.Define(
'file_datasource', None,
'The DataSource that produces input batches for this input generator.')
p.Define(
'batch_size', 0, 'Batch size for a device split. This will be '
'scaled to match the accelarator hardware topology.')
p.Define(
'num_samples', 0,
'If non-zero, the dataset contains these many samples. '
'For test/eval dataset, if we want the test/evel job evaluate '
'the whole dataset, this param must be set precisely. Otherwise, '
'this param is optional.')
p.Define('resettable', False,
'If True, the input generator must implement Reset().')
# For an input generator to support samples_per_summary == 0 to indicate
# using the entire dataset, it must (1) be resettable, and (2) throws
# tf.errors.OutOfRangeError when reading a batch beyond an epoch.
p.Define(
'eval_samples_per_summary', None, 'If not None, overrides '
'task_p.eval.samples_per_summary directly. Allowed to be 0, which '
'means to use the entire dataset.')
p.Define(
'decoder_samples_per_summary', None, 'If not None, overrides '
'task_p.eval.decoder_samples_per_summary directly. Allowed to be 0, '
'which means to use the entire dataset.')
p.Define(
'filter_sparse_tensors', False,
'If true, filter out SparseTensors in input_batch before enqueuing '
'onto TPU.')
cls.DefineInfeedParams(p)
p.Define('remote', hyperparams.Params(),
'Params to configure remote input policy.')
p.remote.Define(
'max_inflights_per_target', 32, 'The maximum number of '
'concurrent inflight remote input fetches per remote target.')
p.Define(
'input_stats_summary_interval_steps', 10,
'Number of steps in between logging of TF scalar summaries for '
'training related input data stats.')
p.Define(
'skip_tpu_embedding_enqueue_ops', False,
'Whether to skip CreateTpuEmbeddingEnqueueOps. This is useful for '
'multi-program training with one tasking having tpu embedding and '
'the other not.')
return p
def __init__(self, params):
super().__init__(params)
# parameter to tell the bprop one hot for all the files.
# TODO(ankurbpn): Initialize when using sources from mixed record yielders.
self._bprop_onehot = tf.constant([1], dtype=tf.float32)
# Each entry is a regular expression specifying the set of variables
# to bprop per data source.
self._bprop_variable_filters = ['']
# For TPU enqueue ops, we do not use graph collections, instead, we rely
# on this member variable. This is especially useful for
# executor-driven multiple programs, as we need more fine-grained
# access to drive the infeed for a specific program, rather than
# a single global collection across the graph.
self._tpu_infeed_op = None
# A list of InfeedQueues.
self._tpu_queues = []
# Set to true in GetPreprocessedInputBatch() (and thus _InputBatch())
self._in_get_processed_input_batch = False
# Merged TF scalar summaries for training related input data stats.
self._merged_input_data_summary_op = None
# Tensorboard layout for charts displaying input data stats.
self._input_data_summary_layout = None
self.CreateDatasource()
def CreateDatasource(self):
if self.params.file_datasource:
self.CreateChild('datasource', self.params.file_datasource)
self.datasource.SetInputGenerator(self)
def CommonInputOpArgs(self):
"""Common input params."""
return {}
def GetBpropVariableFilters(self):
return self._bprop_variable_filters
def GetInputSourceOneHot(self):
"""Get the current bprop type of the input generator batch."""
return self._bprop_onehot
def GlobalBatchSize(self):
"""Returns the total batch size (for stats), int or dynamic int tensor."""
# Uses `InfeedBatchSize()` instead of calculating it from `p.batch_size`
# because the behavior would be overridden by subclasses.
global_batch_size = batch_utils.scale_infeed_to_global(
self.InfeedBatchSize(), self.params.use_per_host_infeed)
tf.logging.info('GlobalBatchSize {}'.format(global_batch_size))
return global_batch_size
def InfeedBatchSize(self):
"""Returns the batch size of the input batch: int or dynamic int tensor."""
batch_per_input = batch_utils.scale_split_to_infeed(
self.params.batch_size, self.params.use_per_host_infeed)
tf.logging.info('batch_per_input: %d', batch_per_input)
return batch_per_input
def Initialize(self, sess):
"""Initialize using a session."""
if 'datasource' in self.children:
self.datasource.Initialize(sess)
def _InputBatch(self):
"""The current input batch, not preprocessed.
This is meant to be overridden by subclasses, but not called directly.
Callers should use `GetPreprocessedInputBatch()`.
Returns:
A NestedMap (or list of NestedMaps when using TPU sharded infeed) of
input tensors.
"""
raise NotImplementedError('Abstract method')
def _PreprocessInputBatch(self, batch):
"""Preprocesses input batch from _InputBatch.
Args:
batch: A NestedMap (or list of NestedMaps when using TPU sharded infeed)
containing input tensors in the format returned by _InputBatch.
Returns:
A NestedMap containing preprocessed inputs to feed to the model.
"""
return batch
def GetPreprocessedInputBatch(self):
"""Returns preprocessed batch of inputs.
These are the actual inputs fed to the model.
Subclasses generally should not override this function directly. Instead,
override _InputBatch and maybe _PreprocessInputBatch.
"""
self._in_get_processed_input_batch = True
# TODO(b/139345706): Use self.datasource.GetNext() for all datasource.
if ('datasource' in self.children and
isinstance(self.datasource, datasource.TFDatasetSource)):
if self.cluster.input_targets:
raise ValueError(
'TFDatasetSource subclassed DataSources do not support using '
'train_input_replica. Try tf_data_service_replicas instead.')
# pylint: disable=protected-access
if ((self._InputBatch.__func__ is not BaseInputGenerator._InputBatch and
self._InputBatch.__func__
is not BaseInputGeneratorFromFiles._InputBatch) or
self._PreprocessInputBatch.__func__
is not BaseInputGenerator._PreprocessInputBatch):
# pylint: enable=protected-access
# If you hit this error trying to run with --tf_data_service_replicas,
# try to refactor your input generator by moving all the code inside
# _InputBatch and _PreprocessInputBatch to _DataSourceFromFilePattern.
raise ValueError(
'Batches obtained through p.file_datasource do not go through '
'self._InputBatch() or self._PreprocessInputBatch(). To reduce the '
'potential of mistakes, this error is raised when either of those '
'functions have been overridden.')
batch = self.datasource.GetNext()
else:
batch = self._PreprocessInputBatch(self._InputBatch())
self._in_get_processed_input_batch = False
if py_utils.GetUnitTestSession():
self.Initialize(py_utils.GetUnitTestSession())
return batch
@property
def tpu_number_of_shards(self):
"""Number of shards to split the input batch into."""
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
shards = (self.cluster.total_worker_devices // num_infeed_hosts)
if p.use_partitioned_infeed_queue or not p.use_per_core_infeed:
shards = shards // self.cluster.num_devices_per_split
return shards
def CreateTpuEnqueueOps(self, job_name=None):
"""Create the host-side enqueue ops.
This should be called in an outer non-TPU context.
Args:
job_name: the name of the job on which the enqueue operations run.
"""
assert not self._tpu_queues, ('CreateTpuEnqueueOps should only be called '
'once.')
self._tpu_queues = []
self._per_host_batches = []
self._per_host_emb_batches = []
# A list of lists, where the [i][j] element is the j-th passthrought batch
# of the i-th task. Each task will have more than one passthrought batch iff
# sharded infeed is used.
self._per_host_passthrough_batches = []
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_cores_per_host = self.cluster.total_worker_devices // num_tpu_hosts
tf.logging.info(
'CreateTpuEnqueueOps num_splits_per_client={} '
'num_devices_per_split={} num_tpu_hosts={} use_per_host_infeed={}'
.format(self.cluster.num_splits_per_client,
self.cluster.num_devices_per_split, num_tpu_hosts,
p.use_per_host_infeed))
assert num_tpu_hosts > 0, ('num_tpu_hosts: %d' % num_tpu_hosts)
if p.use_per_core_infeed:
if (not p.use_per_host_infeed) or p.use_partitioned_infeed_queue:
raise ValueError('use_per_core_infeed need to have use_per_host_infeed '
'but not use_partitioned_infeed_queue.')
if (self.cluster.num_devices_per_split > num_cores_per_host and
(p.use_per_host_infeed and not p.use_per_core_infeed)):
tf.logging.fatal('Doesn\'t support per host infeed mode when '
'num_devices_per_split({}) > num_cores_per_host({}).'
'Each host must be able to accommodate >= 1 split when '
'using per_host_infeed.'.format(
self.cluster.num_devices_per_split,
num_cores_per_host))
shards = self.tpu_number_of_shards
tf.logging.info('shards {}'.format(shards))
input_ops_list = []
cpu_passthrough_keys = self.GetCpuPassthroughKeys()
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
tf.logging.info('num_infeed_hosts: %d', num_infeed_hosts)
host_devices = self.cluster.ListDevices(self.cluster.job_spec).flatten()
if p.use_per_host_infeed and num_infeed_hosts != len(host_devices):
raise ValueError(
f'Configuration mismatch, number of infeed hosts {num_infeed_hosts} '
f'does not match available devices {host_devices}.')
for task_id in range(num_infeed_hosts):
host_device = host_devices[task_id]
if cpu_passthrough_keys and (
'/task:{}/device:CPU:0'.format(task_id) not in host_device):
raise ValueError(
f'CPU passthrough configuration mismatch, device {host_device} '
f'does not match task id {task_id}.')
with tf.device(host_device), cluster.InfeedContextScope(
infeed_host_index=task_id, num_infeed_hosts=num_infeed_hosts):
batch = self.GetPreprocessedInputBatch()
if not isinstance(batch, (list, tuple)):
batch = [batch]
cur_passthrough_batches = []
for i in range(len(batch)):
b = batch[i]
assert isinstance(b, py_utils.NestedMap)
# Hack: bucket_keys and xxx.bucket_keys are not needed on TPU.
# Note that when MultiTaskData is used, bucket_keys will be at the
# second level of the dictionary.
b = b.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))
# Split out any keys that are meant for CPU passthrough only.
cur_passthrough_batches.append(
b.FilterKeyVal(lambda k, _: k in cpu_passthrough_keys))
b = b.FilterKeyVal(lambda k, _: k not in cpu_passthrough_keys)
batch[i] = b
if i > 0:
# If the input batch is already sharded, check that the shards are
# compatible with each other.
assert py_utils.IsCompatible(b, batch[0])
self._per_host_passthrough_batches.append(cur_passthrough_batches)
tf.logging.info('CPU passthrough keys: %s', cpu_passthrough_keys)
if p.filter_sparse_tensors:
# Make a copy of this host's input batch, then filter out any
# SparseTensor features. This way, SparseTensor features are not fed
# into the TPU InfeedQueue (and only to TPUEmbedding).
# TODO(jeffreyzhao): Hack, come up with better solution.
# Ideally we would like users to override
# CreateTpuEmbeddingEnqueueOpsForHost() to modify the input batch
# and remove fields they don't want to enqueue onto TPU.
# However, the TPUEmbedding singleton and TPU embedding enqueue ops
# are currently constructed after CreateTpuEnqueueOps() is called.
emb_batch = []
new_batch = []
for i, b in enumerate(batch):
emb_batch.append(
b.Filter(lambda v: isinstance(v, tf.sparse.SparseTensor)))
new_batch.append(
b.Filter(lambda v: not isinstance(v, tf.sparse.SparseTensor)))
self._per_host_emb_batches.append(emb_batch)
batch = new_batch
self._batch_nm_types = batch[0]
tf.logging.info('host_device: %s, batch: %r', host_device, batch)
self._per_host_batches.append(batch)
for b in batch:
for k, x in b.FlattenItems():
assert x.shape.is_fully_defined(), (
'Shape must be fully defined: %s: %s' % (k, x))
# TODO(cwhipkey): if it's a string (or other type not supported on
# TPU), drop it from feeding and on the other end add in an op that
# fails if used.
shapes = batch[0].Transform(lambda x: x.shape).Flatten()
dtypes = batch[0].Transform(lambda x: x.dtype).Flatten()
tf.logging.info('host_device: %s infeed shapes: %r', host_device,
shapes)
tf.logging.info('host_device: %s infeed dtypes: %r', host_device,
dtypes)
if p.use_partitioned_infeed_queue:
device_assignment = py_utils.GetTpuDeviceAssignment(job_name)
host_device = device_assignment.host_device(
replica=0, job=tf.flags.FLAGS.tf_master)
host_id = int(host_device.split('/task:')[1].split('/device:')[0])
tf.logging.info('host_id: {} host_device: {}'.format(
host_id, host_device))
q = tpu_feed._PartitionedInfeedQueue( # pylint: disable=protected-access
number_of_tuple_elements=len(dtypes),
device_assignment=device_assignment,
host_id=host_id,
input_partition_dims=[
[p.num_partitions] + [1] * (len(s) - 1) for s in shapes
],
tuple_types=dtypes,
tuple_shapes=shapes)
else:
if p.use_per_core_infeed:
q = tpu_feed.InfeedQueue(
tuple_types=dtypes,
tuple_shapes=shapes,
number_of_partitions=p.num_partitions)
elif len(batch) > 1:
# When the input batch is sharded, the unsharded dtypes and shapes
# will be determined later by the generate_enqueue_ops() call.
q = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(batch[0].Flatten()))
else:
q = tpu_feed.InfeedQueue(tuple_types=dtypes, tuple_shapes=shapes)
assert shards is not None
q.set_number_of_shards(shards)
self._tpu_queues.append(q)
if p.use_partitioned_infeed_queue:
assert len(batch) == 1
input_ops = q.generate_enqueue_ops([batch[0].Flatten()])
elif p.use_per_host_infeed:
# TODO(ylc/zhifengc): Add this to a policy module and test it.
def TPUOrdinalFunction(shard_index_in_host):
if p.use_per_core_infeed:
return shard_index_in_host
device_assignment = py_utils.GetTpuDeviceAssignment()
if device_assignment:
# We put both enqueue/dequeue ops at core 0 in each replica.
replica = device_assignment.lookup_replicas(
task_id, 0)[shard_index_in_host] # pylint: disable=cell-var-from-loop
return device_assignment.tpu_ordinal(replica=replica)
else:
return shard_index_in_host
if len(batch) > 1:
# In this case, the `shard_index_in_host` argument of
# `TPUOrdinalFunction` is the index of a sharded batch in the
# `batch` list.
input_ops = q.generate_enqueue_ops(
[b.Flatten() for b in batch],
placement_function=lambda x: host_device, # pylint: disable=cell-var-from-loop
tpu_ordinal_function=TPUOrdinalFunction)
else:
input_ops = q.split_inputs_and_generate_enqueue_ops(
batch[0].Flatten(),
placement_function=lambda x: host_device, # pylint: disable=cell-var-from-loop
tpu_ordinal_function=TPUOrdinalFunction)
else:
assert len(batch) == 1
input_ops = q.split_inputs_and_generate_enqueue_ops(
batch[0].Flatten(),
device_assignment=py_utils.GetTpuDeviceAssignment(job_name))
input_ops_list += input_ops
tf.logging.info('input_ops_list %s', input_ops_list)
grouped_infeed_op = tf.group(*input_ops_list)
self._tpu_infeed_op = []
for _ in range(p.tpu_infeed_parallelism):
self._tpu_infeed_op.append(grouped_infeed_op)
def TpuDequeueBatch(self):
"""Create TPU dequeue ops.
This should only be called within a TPU context.
Returns:
- A NestedMap of the input batch.
"""
assert self._tpu_queues, 'CreateTpuEnqueueOps must be called first.'
with tf.device(tf.tpu.core(0)):
# Note that the dequeue_tuple op on the TPU core
# only cares about the shape/types being dequeued
# which is why this is hard-coded to the first Queue.
tensors = self._tpu_queues[0].generate_dequeue_op()
return self._batch_nm_types.Pack(tensors)
def CreateTpuEmbeddingEnqueueOps(self, mode_override=None):
"""Creates the TpuEmbedding enqueue ops on all hosts.
Note that this must be called after the instantiation of the
monolithic TPUEmbeddingLayer.
Args:
mode_override: String to override TPU embedding mode. See
TPUEmbedding.generate_enqueue_ops()
"""
p = self.params
if p.skip_tpu_embedding_enqueue_ops:
return
tpu_embedding_collection = tpu_embedding_layers.TpuEmbeddingCollection.Get()
tpu_embedding = tpu_embedding_collection.tpu_embedding
if not tpu_embedding:
return
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
if p.filter_sparse_tensors:
assert len(self._per_host_emb_batches) == num_infeed_hosts
else:
assert len(self._per_host_batches) == num_infeed_hosts
if num_tpu_hosts > 1 and tpu_embedding is not None:
if not p.use_per_host_infeed:
tf.logging.fatal(
'TPU Embedding must be used with per_host_infeed with multiple '
'TPU host topologies.')
enqueue_ops = []
for task_id in range(num_infeed_hosts):
host_device = '/task:{}/device:CPU:0'.format(task_id)
if p.filter_sparse_tensors:
batch = self._per_host_emb_batches[task_id]
else:
batch = self._per_host_batches[task_id]
assert len(batch) == 1, "Tpu Embedding doesn't support sharded inputs."
batch = batch[0]
with tf.device(host_device):
tf.logging.info('host_device: %s, batch: %r', host_device, batch)
enqueue_ops += self.CreateTpuEmbeddingEnqueueOpsForHost(
tpu_embedding, batch, mode_override=mode_override)
self._tpu_infeed_op.append(tf.group(*enqueue_ops))
def CreateTpuEmbeddingEnqueueOpsForHost(self,
tpu_embedding,
input_batch,
mode_override=None):
"""Hook for creating TPU embedding enqueue ops for a single host.
Used by CreateTpuEmbeddingEnqueueOps(). Override this method in input
generators to control how embedding inputs are enqueued onto TPU.
Args:
tpu_embedding: The monolithic TpuEmbedding object.
input_batch: The input batch for this host.
mode_override: String to override TPU embedding mode. See
TPUEmbedding.generate_enqueue_ops()
Returns:
A list of TPU Embedding enqueue ops.
"""
tpu_emb_input_keys = list(tpu_embedding.feature_to_config_dict.keys())
tf.logging.info('tpu_emb_input_keys: %r', tpu_emb_input_keys)
num_cores_per_host = tpu_embedding.num_cores_per_host
enqueue_dict_per_core = [{} for _ in range(num_cores_per_host)]
for key in tpu_emb_input_keys:
feat = input_batch[key]
if isinstance(feat, tf.sparse.SparseTensor):
tpu_emb_feat_splitted = tf.sparse.split(
feat, num_cores_per_host, axis=0)
for core, split in enumerate(tpu_emb_feat_splitted):
enqueue_data = tpu_embedding_lib.EnqueueData.from_sparse_tensor(split)
enqueue_dict_per_core[core][key] = enqueue_data
else:
tpu_emb_feat_splitted = tf.split(feat, num_cores_per_host)
for core, split in enumerate(tpu_emb_feat_splitted):
# Dense to sparse. Note the assumption of a padding id.
sample_indices = tf.where(tf.not_equal(split, -1))
embedding_indices = tf.gather_nd(split, sample_indices)
enqueue_data = tpu_embedding_lib.EnqueueData(embedding_indices,
sample_indices)
enqueue_dict_per_core[core][key] = enqueue_data
return tpu_embedding.generate_enqueue_ops(
enqueue_dict_per_core, mode_override=mode_override)
def GetCpuPassthroughKeys(self):
"""Return a list of keys from the input to skip sending to the device.
When running on TPU, a user may want to avoid sending some inputs to the
device; either the type is not supported (e.g., string), or the input will
not be processed on the device at all. However, these items may be still
useful to passthrough to the "output", e.g., for decoding purposes.
This function should return a list of keys from InputBatch() that should not
be sent to the TPU, but can be combined with the outputs of Decode() before
passing to PostProcessDecodeOut().
Returns:
A list of keys from the input to filter from being sent to the device,
which may be combined with the output of Decode() prior to
PostProcessDecodeOut().
"""
return []
def CreateCpuPassthroughEnqueueOps(self):
"""Creates enqueue ops to pass through CPU inputs to the output."""
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
cpu_passthrough_keys = self.GetCpuPassthroughKeys()
if not cpu_passthrough_keys:
return
# There is one enqueue op per host.
self._host_queues = {}
enqueue_ops = []
assert len(self._per_host_batches) == num_infeed_hosts
for task_id in range(num_infeed_hosts):
host_device = '/task:{}/device:CPU:0'.format(task_id)
batch = self._per_host_passthrough_batches[task_id]
assert isinstance(batch, list)
with tf.device(host_device):
self._cpu_nm_types = batch[0] if len(batch) == 1 else batch
tf.logging.info('host_device CPU passthrough types: %s, batch: %r',
host_device, batch)
cpu_dtypes = py_utils.Flatten(
py_utils.Transform(lambda x: x.dtype, batch))
# NOTE: we use a large capacity queue under the assumption that the size
# of these tensors will be generally smaller than that sent to the TPU,
# and that the TPU queue will likely fill up before the host queue,
# blocking further enqueues.
host_queue = tf.queue.FIFOQueue(capacity=10000, dtypes=cpu_dtypes)
self._host_queues[task_id] = host_queue
enqueue_ops += [host_queue.enqueue(py_utils.Flatten(batch))]
self._tpu_infeed_op.append(tf.group(*enqueue_ops))
def DequeueCpuPassthrough(self, concat=True):
"""Create CPU dequeue ops.
Args:
concat: Whether to concat the passthrough batches for each host into one
batch.
Returns:
None if there are no CPU passthrough values. Otherwise, a NestedMap of the
CPU passthrough input batch if `concat`, or a list of NestedMaps (one for
each host) if not `concat`.
"""
cpu_passthrough_keys = self.GetCpuPassthroughKeys()
if not cpu_passthrough_keys:
return None
p = self.params
num_tpu_hosts = self.cluster.num_tpu_hosts
num_infeed_hosts = num_tpu_hosts if p.use_per_host_infeed else 1
tensor_list = []
for task_id in range(num_infeed_hosts):
with tf.device('/task:{}/device:CPU:0'.format(task_id)):
tensors = self._host_queues[task_id].dequeue()
# Make list if only one tensor.
if not isinstance(tensors, list):
tensors = [tensors]
tensor_list.append(tensors)
# TODO(laigd): consider moving the concat logic out to make the API simpler.
if concat:
with tf.device('/task:0/device:CPU:0'):
# Transpose to get per-dequeue-element tuples, then concat.
result = list(map(lambda xs: tf.concat(xs, axis=0), zip(*tensor_list)))
return py_utils.Pack(self._cpu_nm_types, result)
# Return a list of batches, one per host.
return [py_utils.Pack(self._cpu_nm_types, xs) for xs in tensor_list]
@property
def tpu_infeed_op(self):
if self._tpu_infeed_op is not None:
return self._tpu_infeed_op
else:
raise ValueError('TPU infeed op not set. Call CreateTpuEnqueueOps first.')
@property
def merged_input_data_summary_op(self):
return self._merged_input_data_summary_op
@property
def input_data_summary_layout(self):
return self._input_data_summary_layout
def SplitInputBatch(self, num_splits):
"""Splits the current InputBatch into num_splits ways.
Args:
num_splits: The number of splits.
Returns:
A list of `.NestedMap`. Each `.NestedMap` represents the input
tensors in one split.
"""
assert num_splits >= 1
batch = self.GetPreprocessedInputBatch()
if num_splits == 1:
# Special case. No split is needed.
return [batch]
assert not py_utils.use_tpu()
field_split = ig_helper.SplitTensors(batch.Flatten(), num_splits)
num_fields = len(field_split)
ret = []
for j in range(num_splits):
split_flatten = [field_split[i][j] for i in range(num_fields)]
split = batch.Pack(split_flatten)
ret += [split]
return ret
def Reset(self, sess):
"""Reset the input-generator.
Override so that the input_generator reproduces examples as if from a fresh
instantiation.
Args:
sess: A tensorflow session.
"""
raise NotImplementedError()
@property
def _map_args(self):
"""Default args for tf.data.DataSet.map()."""
return {
'num_parallel_calls':
1 if self.cluster.in_unit_test else tf.data.experimental.AUTOTUNE,
'deterministic':
self.cluster.require_sequential_input_order
}
def FilePatternToDataSource(p):
"""Helper to turn p.file_pattern (deprecated) into p.file_datasource."""
if isinstance(p.file_pattern, str):
ds = datasource.SimpleDataSource.Params().Set(file_pattern=p.file_pattern)
elif isinstance(p.file_pattern, (list, tuple)):
if all([isinstance(x, str) for x in p.file_pattern]):
# While this violates the documentation and intended use, there are
# subclasses that have used a tuple of strings, rather than a list of
# string, weight tuples. Rather than treating lists and tuples
# differently, support both here until p.file_pattern is removed.
ds = datasource.SimpleDataSource.Params().Set(
file_pattern=list(p.file_pattern))
elif p.use_within_batch_mixing:
if max(list(map(len, p.file_pattern))) >= 3:
# Within batch mixing doesn't work with backprop filters, i.e. when
# file_pattern param contains a list of
# <file_pattern, weight, [bprop_variable_filter]> tuples.
raise ValueError('Expected a list of pairs, got %s' % p.file_pattern)
file_patterns, weights = (list(x) for x in zip(*p.file_pattern))
ds = datasource.SimpleDataSource.Params().Set(
file_pattern=file_patterns, weights=weights)
else:
# Otherwise fall back to MixByWeight-based approach.
datasources = []
weights = []
bprop_variable_filters = []
for input_entry in p.file_pattern:
if isinstance(input_entry, str):
raise ValueError('Should explicitly specify weights, got string: %s' %
input_entry)
file_pattern, weight = input_entry[:2]
datasources.append(
datasource.SimpleDataSource.Params().Set(file_pattern=file_pattern))
weights.append(weight)
bprop_variable_filter = input_entry[2] if len(input_entry) > 2 else ''
bprop_variable_filters.append(bprop_variable_filter)
ds = datasource.CrossBatchMixingDataSource.Params().Set(
sub=datasources,
weights=weights,
bprop_variable_filters=bprop_variable_filters)
else:
raise ValueError('Cannot parse p.file_pattern into a datasource.')
if cluster_factory.Current().tf_data_service_address:
bucket_upper_bound = None
if 'bucket_upper_bound' in p:
bucket_upper_bound = p.bucket_upper_bound
ds = datasource.TFDataServiceSource.Params().Set(
sub=ds, bucket_upper_bound=bucket_upper_bound)
ds = datasource.TFDatasetPrefetch.Params().Set(sub=ds)
return ds
class BaseInputGeneratorFromFiles(BaseInputGenerator):
"""Base class for input generators that reads from files.
Subclasses should implement _DataSourceFromFilePattern.
"""
@classmethod
def Params(cls):
"""Defaults params for input generators."""
p = super().Params()
p.Define(
# NOTE: file_pattern is deprecated. New params should use
# file_datasource instead.
# TODO(b/139345706) remove file_pattern parameter
'file_pattern',
'',
'A single file pattern string, a list of file pattern strings or a list'
' of <file_pattern, weight> pairs or a list of <file_pattern, weight, '
'bprop_variable_filter> tuples. Some of the cases may not be supported '
'with use_within_batch_mixing, where probablistic samples are from the '
'inputs proportional to their weights. Typically, values are binary '
'protocol buffers containing train/eval samples. Keys are not used.')
p.Define('file_random_seed', 301,
'Random seed for shuffling the input data.')
p.Define(
'file_buffer_size', 10000,
'How many records are buffered for random shuffling. This param '
'affects how much RAM a train/test job needs. E.g., if an average '
'record is about 500KB, the buffer needs 5GB ram.')
p.Define(
'file_buffer_size_in_seconds', 0,
'If non-zero, keep enough records in the buffer to handle N seconds '
'worth of demand. E.g., if the training job is reading 1000 records '
'per second and this parameter is set to 10, the buffer is resized '
'to contain 10000 records. This parameter is useful when reading from '
'many data sources at different speeds, as it automatically tunes the '
'size of buffers to fit demand. The file_buffer_size parameter is an '
'upper bound to the buffer size.')
p.Define('file_parallelism', 16, 'How many files to read concurrently.')
p.Define(
'flush_every_n', 0, 'If non-zero, flushes all batches buffered '
'so far every these many records are yielded.')
p.Define('num_batcher_threads', 1, 'Number of threads to use for input '
'record batcher.')
p.Define(
'repeat_count', -1,
'Number of repetitions of a dataset before throwing OutOfRange error '
'when using require_sequential_input_order. Must only be set if '
'cluster.require_sequential_input_order is True.')
# TODO(b/139345706) when file_pattern is deleted use_within_batch_mixing
# will be specified by setting weights in SimpleDataSource in
# p.file_datasource and this param should be deleted as well.
p.Define(
'use_within_batch_mixing', False, 'Whether to mix records from '
'different input sources within batch or across batches (the '
'default option). This option only takes effect when file_pattern'
' is a list of file patterns with weights.')
return p
def __init__(self, params):
if params.use_per_host_infeed and params.file_random_seed != 0:
raise ValueError('file_random_seed needs to be 0 when '
'use_per_host_infeed == True.')
super().__init__(params)
def CreateDatasource(self):
p = self.params
assert not (
p.file_pattern and p.file_datasource
), 'Only one of file_pattern and file_datasource can be specified'
if not p.file_datasource:
p.file_datasource = FilePatternToDataSource(p)
# TODO(b/139345706) remove support for file_pattern
# p.file_pattern = ''
super().CreateDatasource()
def CommonInputOpArgs(self):
"""Common input params."""
p = self.params
args = super().CommonInputOpArgs()
num_input_replicas = 1
input_replica_id = 0
infeed_context = cluster.GetInfeedContext()
if infeed_context:
num_input_replicas = infeed_context.num_infeed_hosts
input_replica_id = infeed_context.infeed_host_index
tf.logging.info('input_replica_id=%s/%s', input_replica_id,
num_input_replicas)
# Legacy behavior for Lingvo input ops: require_sequential_order defaults to
# False for eval jobs. Note that this value is different from
# self.cluster.require_sequential_input_order.
require_sequential_order = bool(
self.cluster.params.require_sequential_input_order)
args.update({
'file_random_seed': p.file_random_seed,
'file_buffer_size': p.file_buffer_size,
'file_parallelism': p.file_parallelism,
'file_buffer_size_in_seconds': p.file_buffer_size_in_seconds,
'flush_every_n': p.flush_every_n,
'num_threads': p.num_batcher_threads,
'require_sequential_order': require_sequential_order,
'repeat_count': p.repeat_count,
'num_input_replicas': num_input_replicas,
'input_replica_id': input_replica_id,
})
args.update(self._InputOpBucketingArgs())
return args
def _InputOpBucketingArgs(self):
return {
'bucket_upper_bound': [1000000],
'bucket_batch_limit': [self.InfeedBatchSize()],
'bucket_adjust_every_n': 0,
}
def _InputBatch(self):
return self._BuildDataSource()
# TODO(b/139345706): After p.file_pattern is deleted, the following functions
# _DataSourceFromFilePattern, _BuildDataSourceWithMetadata, _BuildDataSource
# can be deleted and functionality moved to using the DataSource directly.
def _DataSourceFromFilePattern(self, file_pattern, input_source_weights=None):
"""Return a NestedMap containing an input batch from a string file_pattern.
Subclasses should implement this function.
Args:
file_pattern: A string file pattern.
input_source_weights: A list of float input source weights to control
input example mix in the batch. The records will be sampled from inputs
proportionally to these weights. Defaults to None which should be
treated as an empty list.
Returns:
A `.NestedMap` of tf.Tensors containing a batch of input data with shapes
[batch, ...].
"""
return py_utils.NestedMap(x=tf.zeros([1]))
def _BuildDataSourceWithMetadata(self):
"""Read and return input batch from `p.file_pattern`.
`p.file_pattern` may be a string file_pattern or a
list of (file_pattern, weight, [bprop_variable_filter]) tuples.
bprop_variable_filter is optional. When bprop_variable_filter is used,
batches will always contain the examples from the same source. Otherwise,
examples from different sources may be mixed together.
Returns:
A `.NestedMap` containing
- data: `.NestedMap` of tf.Tensor as in `_DataSourceFromFilePattern()`.
- source_selected: optional tensor of size [batch_size, #datasources].
- selected_bprop: optional tensor of size [#datasources].
- bprop_variable_filters: optional list of filters for each source.
Raises:
ValueError: If file_datasource is not set
"""
p = self.params
if p.use_per_host_infeed and not self._in_get_processed_input_batch:
raise ValueError(
'This input generator does not support p.use_per_host_infeed. '
'Please set it to False, or move the call to self._BuildDataSource() '
'from self.__init__() to self._InputBatch() for batches to be '
'correctly replicated per host.')
if not p.file_datasource and p.file_pattern:
# This is a workaround for subclasses which have defined
# their own data source-like functionality.
tf.logging.info(
'Creating data source-like output from class %s using '
'file_pattern %s', self, p.file_pattern)
ret = py_utils.NestedMap()
ret.data = self._DataSourceFromFilePattern(p.file_pattern)
else:
tf.logging.info(
'Building data source %s with params %s and '
'file_pattern %s', self.datasource, self.datasource.params,
p.file_pattern)
batch = self.datasource.GetNext()
ret = self.datasource.GetMeta()
ret.data = batch
if 'selected_bprop' in ret:
self._bprop_onehot = ret.selected_bprop
if 'bprop_variable_filters' in ret:
self._bprop_variable_filters = ret.bprop_variable_filters
if 'source_selected' not in ret:
ret.source_selected = None
return ret
def _BuildDataSource(self):
"""Read and return input batch from `p.file_pattern`.
Same as _BuildDataSourceWithMetadata but does not return any metadata.
Returns:
A `.NestedMap` of tf.Tensor as in `self._DataSourceFromFilePattern()`.
Raises:
ValueError: If unknown token type.
"""
return self._BuildDataSourceWithMetadata()['data']
class BaseSequenceInputGenerator(BaseInputGeneratorFromFiles):
"""The basic sequence input generator.
Subclasses should implement _DataSourceFromFilePattern defined in
BaseInputGeneratorFromFiles.
"""
@classmethod
def Params(cls):
"""Defaults params for sequence input generators."""
p = super().Params()
p.Delete('batch_size')
# How input should be bucketized.
p.Define(
'bucket_upper_bound', [2560], 'Bucketing scheme. Required to be'
'a sorted list of integers. Examples that are longer than all bucket'
'upper bounds are skipped.')
p.Define(
'bucket_batch_limit', [8],
'Desired per-split batch size per bucket. Scaled in '
'infeed_bucket_batch_limit to the infeed size.'
'Must be the same length as bucket_upper_bound.')
p.Define(
'bucket_adjust_every_n', 0, 'If non-zero, optimize the values of '
'bucket_upper_bound except the last one after every N records '
'based on the current input length distribution.')
p.Define('source_max_length', None,
'The maximum length of the source sequence.')
p.Define('target_max_length', 300,
'The maximum length of the target sequence.')
p.Define('pad_to_max_seq_length', False,
'If True, input tensors will be padded to max_length.')
p.Define('tokenizer', tokenizers.AsciiTokenizer.Params(),
'Tokenizer params.')
p.Define(
'tokenizer_dict', {},
'If multiple tokenizers are required, they can be accessed through '
'this dict via a key.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.tokenizer:
assert DEFAULT_TOKENIZER_KEY not in p.tokenizer_dict
p.tokenizer_dict[DEFAULT_TOKENIZER_KEY] = p.tokenizer
self.tokenizer_dict = {}
for k, p in p.tokenizer_dict.items():
if p:
name = '_tokenizer_' + k
self.CreateChild(name, p)
self.tokenizer_dict[k] = self.children[name]
else:
self.tokenizer_dict[k] = None
if DEFAULT_TOKENIZER_KEY in self.tokenizer_dict:
self.tokenizer = self.tokenizer_dict[DEFAULT_TOKENIZER_KEY]
@property # Adjust batch size according to the cluster spec.
def infeed_bucket_batch_limit(self):
"""Returns the bucket batch limit for one infeed host."""
p = self.params
infeed_bucket_batch_limit = [
batch_utils.scale_split_to_infeed(b, p.use_per_host_infeed)
for b in p.bucket_batch_limit
]
tf.logging.info(
'infeed_bucket_batch_limit={} num_splits_per_client={} bucket_batch_limit={}'
.format(infeed_bucket_batch_limit, self.cluster.num_splits_per_client,
p.bucket_batch_limit))
return infeed_bucket_batch_limit
def InfeedBatchSize(self):
"""Returns the batch size of one infeed pipeline.
Override in subclass to provide dynamically shaped infeed batch size.
If use_per_host_infeed is False then there is only one infeed pipeline and
then the GlobalBatchSize() and the InfeedBatchSize() is the same.
"""
buckets = self.infeed_bucket_batch_limit
if any(x != buckets[0] for x in buckets):
tf.logging.warning('Using max bucket batch limit but not all limits are '
'the same {}'.format(buckets))
infeed_size = max(buckets)
tf.logging.info('InfeedBatchSize: %d', infeed_size)
return infeed_size
def _InputOpBucketingArgs(self):
p = self.params
bucket_batch_limit = self.infeed_bucket_batch_limit
tf.logging.info('infeed_bucket_batch_limit %r', bucket_batch_limit)
return {
'bucket_upper_bound': p.bucket_upper_bound,
'bucket_batch_limit': bucket_batch_limit,
'bucket_adjust_every_n': p.bucket_adjust_every_n,
}
def StringsToIds(self,
strs,
is_source=False,
external_max_length=None,
external_append_eos=None,
key=None,
languages=None):
"""Tokenize strs into vocab ids.
Args:
strs: A vector of strings.
is_source: A bool to indicate whether to use `source_max_length` to pad
'strs'.
external_max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
key: A string key in case the model has multiple tokenizers.
languages: A vector of str with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings) with the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
Usually ids[i, 0] == SOS, ids[i, j+1] == labels[i, j], and labels[i, :]
ends with EOS. That is, `ids` and `labels` are inputs and ground-truth
labels for step-by-step teacher-forcing training, respectively.
Raises:
ValueError: If unknown token type.
"""
p = self.params
if external_max_length is not None:
maxlen = external_max_length
elif is_source:
maxlen = p.source_max_length
else:
maxlen = p.target_max_length
key = key or DEFAULT_TOKENIZER_KEY
return self.tokenizer_dict[key].StringsToIds(
strs, maxlen, external_append_eos, languages=languages)
def StringsToIdsWithOffsets(self,
strs,
is_source=False,
external_max_length=None,
external_append_eos=None,
key=None,
languages=None):
"""Tokenize strs into vocab ids, and also return byte-level offsets.
Args:
strs: A vector of strings.
is_source: A bool to indicate whether to use `source_max_length` to pad
'strs'.
external_max_length: An int providing the max_length for strs.
external_append_eos: Bool or None. If None, will be ignored and
`params.append_eos` will be used. If bool, will determine if an eos
symbol will be added to tokens.
key: A string key in case the model has multiple tokenizers.
languages: A vector of str with the same length as `strs`.
Returns:
A tuple (ids, labels, paddings) with the same shape [batch, maxlen].
- ids[i, j] is the input token id of i-th sample for j-th step.
- labels[i, j] is the target token id of i-th sample for j-th step.
- paddings[i, j] is 1 iff i-th sample's j-th step is padded.
- start_offset[i, j] is the byte-level offset of the start of the j-th id
in the i-th original string
- end_offset[i, j] is the byte-level offset of the end of the j-th id
in the i-th original string
Usually ids[i, 0] == SOS, ids[i, j+1] == labels[i, j], and labels[i, :]
ends with EOS. That is, `ids` and `labels` are inputs and ground-truth
labels for step-by-step teacher-forcing training, respectively.
Raises:
ValueError: If unknown token type.
Exception: If the specified tokenizer does not support offsets.
"""
p = self.params
if external_max_length is not None:
maxlen = external_max_length
elif is_source:
maxlen = p.source_max_length
else:
maxlen = p.target_max_length
key = key or DEFAULT_TOKENIZER_KEY
return self.tokenizer_dict[key].StringsToIdsWithOffsets(
strs, maxlen, external_append_eos, languages=languages)
def IdsToStrings(self, ids, lens, key=None):
"""Converts ids back to strings.
Args:
ids: A matrix of shape [batch, seqlen]. ids[i, :] is the i-th sample's
ids.
lens: A vector of shape [batch]. lens[i] is the sequence length of the
i-th sample. Only the first lens[i] tokens in ids[i, :] are valid tokens
for the i-th sequence.
key: A string key in case the model has multiple tokenizers.
Returns:
sequences - A vector of shape [batch]. The converted string sequence.
Raises:
ValueError: If unknown token type.
"""
key = key or DEFAULT_TOKENIZER_KEY
return self.tokenizer_dict[key].IdsToStrings(ids, lens)
def Cast(self, v):
"""Cast tensor dtype to fprop_dtype."""
if not v.dtype.is_floating:
return v
return tf.cast(v, py_utils.FPropDtype(self.params))
class BaseTinyDatasetInput(BaseInputGenerator):
"""Input generator for tiny dataset which are stored in tf checkpoint.
| Input batch (b: batch size, h: height, w: width, d: depth):
| raw: Samples. [b, h, w, d].
| data: Preprocessed samples. [b, h, w, d].
| label: Labels. [b].
| weight: [b]. weight[i] is 1.0 if i-th sample is considered to
| be a real example. Otherwise, weight[i] is 0.0.
"""
@classmethod
def Params(cls):
"""Defaults params."""
p = super().Params()
p.Define('ckpt', None, 'A TensorFlow checkpoint.')
p.Define('data', 'x_train', 'The tensor name in the ckpt.')
p.Define('data_dtype', tf.uint8, 'The tensor dtype in the ckpt.')
p.Define(
'data_shape', (0, 0, 0), 'A tuple of ints. E.g., a tiny image '
'has the shape (height, weight, depth).')
p.Define('label', 'y_train', 'The tensor name in the ckpt.')
p.Define('label_dtype', tf.uint8, 'The tensor dtype in the ckpt.')
p.Define('repeat', True, 'If true, goes through the dataset repeatedly.')
p.use_per_host_infeed = True
return p
def _InputBatch(self):
p = self.params
@tf.function
def ReadData():
x, y = io_ops.restore_v2(p.ckpt, [p.data, p.label], [''] * 2,
[p.data_dtype, p.label_dtype])
# Always convert to float32.
return tf.cast(x, tf.float32), tf.cast(y, tf.float32)
# Loads data and label into memory and keep it around.
data, label = ops.cached_call(
f=ReadData.get_concrete_function(), T=[tf.float32, tf.float32])
b, shape = self.InfeedBatchSize(), list(p.data_shape)
data = tf.reshape(data, [-1] + shape)
label = tf.reshape(label, [-1])
label = py_utils.HasShape(label, [tf.shape(data)[0]])
sample_ids = ops.random_permutation_sequence(
num=p.num_samples,
batch=b,
repeat=p.repeat,
seed=p.random_seed if p.random_seed else 0)
n = tf.shape(sample_ids)[0]
raw = py_utils.PadOrTrimTo(tf.gather(data, sample_ids), [b] + shape)
ret = py_utils.NestedMap(
raw=raw,
data=self._Preprocess(raw),
label=py_utils.PadOrTrimTo(tf.gather(label, sample_ids), [b]),
weight=py_utils.PadOrTrimTo(tf.ones([n], dtype=tf.float32), [b]))
if not py_utils.use_tpu():
ret['sample_ids'] = sample_ids
return ret
def _Preprocess(self, raw):
return raw
class TFDataSequenceInputGenerator(BaseSequenceInputGenerator):
"""tf.data input pipeline for sequences.
Inherits params from BaseSequenceInputGenerator so this can be a drop-in
replacement for existing input generators inheriting from
BaseSequenceInputGenerator. However, many params may be ignored / unused.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prefetch_buffer_size', 1, 'Local prefetch buffer size.')
p.resettable = True
return p
def CreateDatasource(self):
p = self.params
if not p.file_datasource:
# Convert p.file_pattern into p.file_datasource.
ds = self.ConvertFilePatternToDataSource(p, p.file_pattern)
p.file_pattern = ''
else:
ds = p.file_datasource
ds = datasource.CustomTFDatasetTransform.Params().Set(
sub=ds, fn='TakeEvalSamples')
ds = datasource.TFDatasetBatchBySequenceLength.Params().Set(
sub=ds,
seqlen_fn='GetSequenceLength',
input_shape_fn='_InputShape',
input_padding_fn='_InputPaddingValue',
bucket_upper_bound=p.bucket_upper_bound,
bucket_batch_limit=p.bucket_batch_limit)
if self.cluster.tf_data_service_address:
ds = datasource.TFDataServiceSource.Params().Set(
sub=ds, bucket_upper_bound=p.bucket_upper_bound)
ds = datasource.TFDatasetPrefetch.Params().Set(
sub=ds, buffer_size=p.prefetch_buffer_size)
p.file_datasource = ds
super().CreateDatasource()
@classmethod
def ConvertFilePatternToDataSource(cls, p, file_pattern):
if isinstance(file_pattern, str):
file_patterns = file_pattern.split(',')
weights = None
else:
if all([isinstance(x, str) for x in file_pattern]):
file_patterns = file_pattern
weights = None
elif all([isinstance(x, tuple) for x in file_pattern]):
file_patterns, weights = zip(*file_pattern)
else:
raise ValueError(
f'file_pattern must be all strings or all tuples, but got: '
f'{file_pattern}.')
for fp in file_patterns:
if ',' in fp:
raise ValueError(f'file_pattern should not contain comma: {fp}')
ds = []
for fp in file_patterns:
ds.append(datasource.TFDatasetFnInput.Params().Set(
load_fn='LoadDataset',
kwargs=dict(file_pattern=fp),
shuffle_buffer_size=p.file_buffer_size))
if len(ds) > 1:
if not p.use_within_batch_mixing:
raise ValueError(
'Only p.use_within_batch_mixing is supported with multiple '
'file_patterns.')
ds = [datasource.TFDatasetMixer.Params().Set(sub=ds, weights=weights)]
ds = datasource.CustomTFDatasetTransform.Params().Set(
sub=ds[0], fn='ProcessDataset')
return ds
def Reset(self, sess):
self.datasource.Reset(sess)
def GetPreprocessedInputBatch(self):
return self.datasource.GetNext()
def LoadDataset(self, file_pattern):
"""Load a dataset from file.
Args:
file_pattern: the path to the file to load.
Returns:
A tf.data.Dataset() whose elements represent a single training sample
without a leading batch dim.
"""
raise NotImplementedError()
def TakeEvalSamples(self, dataset):
p = self.params
if self.do_eval and p.num_samples > 0:
dataset = dataset.take(p.num_samples)
return dataset
def ProcessDataset(self, dataset):
"""Processes a dataset returned by LoadDataset.
Args:
dataset: A dataset returned by LoadDataset.
Returns:
A processed dataset containing NestedMaps of Tensors without a leading
batch dimension.
"""
raise NotImplementedError()
def GetSequenceLength(self, example):
"""Returns sequence length for the example NestedMap from the dataset.
Args:
example: A NestedMap containing an input example. Tensors in the example
do not have a leading batch dimension.
Returns:
An integer sequence length for the example.
"""
raise NotImplementedError()
def _InputShape(self, key):
"""Returns the final shape of the tensor corresponding to key as a tuple.
The shape should not include a leading batch dimension.
Args:
key: The NestedMap key to return shape for.
"""
if key in ('source_id', 'bucket_keys'):
return ()
raise ValueError('Unexpected key %s' % key)
def _InputPaddingValue(self, key, tensorspec):
"""Returns the value to pad the tensor corresponding to key with."""
if key.endswith('_paddings'):
return tf.ones([], dtype=tensorspec.dtype)
else:
return tf.zeros([], dtype=tensorspec.dtype)
class BaseDataExampleInputGenerator(BaseInputGenerator):
"""Base class for input generators that read Feature protos via tf.data."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('input_files', None, 'Delimited glob of input files.')
p.Define(
'dataset_type', None,
'A dataset class constructor such as tf.data.TFRecordDataset. '
'The class constructor must take a list of filenames and produce an '
'object that extends tf.data.Dataset.')
p.Define('randomize_order', True, 'Whether to randomize the order.')
p.Define('parallel_readers', 1, 'Number of parallel reader threads.')
p.Define('num_examples', -1, 'Number of examples (-1 for unlimited).')
p.Define(
'num_epochs', -1,
'Number of passes through the data to make (-1 for unlimited).'
'`tf.errors.OutOfRangeError` is thrown after the limit is reached.')
p.Define('randomize_shuffle_size', 500,
'Size of the random shuffle buffer.')
return p
def __init__(self, params):
super().__init__(params)
p = params
assert p.input_files, (
'input_files is required for a tf.data example input generator')
assert p.dataset_type, (
'dataset_type is required for a tf.data example input generator')
def GetFeatureSpec(self):
"""Subclasses must implement and return a feature spec.
Returns:
NestedMap of features compatible with tf.io.parse_example. Default
implementation returns an empty dict.
"""
return {}
def GetPreprocessedInputBatch(self):
p = self.params
def ParseAndProcess(*cols):
"""Parses a Tensorflow example into features."""
# Assume either one or two column input. If one, then the record is
# assumed to be that column. If 2, then it is assumed to be a KV store
# and the record is the second column.
assert len(cols) in [
1, 2
], ('BaseExampleInputGenerator supports one or two column input')
record = cols[-1]
feature_spec = self.GetFeatureSpec()
features = py_utils.NestedMap(tf.io.parse_example(record, feature_spec))
return self._PreprocessInputBatch(features)
dataset_factory = p.dataset_type
dataset = (
tf.data.Dataset.list_files(
p.input_files, shuffle=bool(p.randomize_order)).apply(
tf.data.experimental.parallel_interleave(
dataset_factory,
cycle_length=p.parallel_readers,
sloppy=p.randomize_order)))
if p.randomize_order:
dataset = dataset.shuffle(p.randomize_shuffle_size)
dataset = dataset.take(p.num_examples)
dataset = dataset.repeat(p.num_epochs)
dataset = dataset.batch(self.InfeedBatchSize(), drop_remainder=True)
dataset = dataset.map(
ParseAndProcess, num_parallel_calls=p.parallel_readers)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
return input_batch
def DefineTFDataInput(name, func, ignore_args=None, map_args=None):
"""Defines a new InputGenerator class from given tf.data pipeline.
This function allows users to utilize existing tf.data pipelines which are
defined externally, without making binding boilerplates.
The generated InputGenerator behaves like a one-shot iterator of the given
pipeline. If the iterator is designed to be repeated, the returned
InputGenerator will work similarly.
This function generates `Params` automatically by analysing the given
pipeline's signature so that the behavior of the pipeline can be saved into
`Params`.
This function defines the InputGenerator class on the caller's module. To
avoid any confusion, the returned class have to be stored in the module-level
symbol with the same identifier with given `name`.
Example:
>>> # A tf.data pipeline which returns a dict of Tensors.
>>> def my_dataset(begin=0, end=10):
... ds = tf.data.Dataset.from_tensor_slices(tf.range(begin, end))
... return ds.map(lambda x: {'value': x})
>>> # Defines the InputGenerator class for my_dataset.
>>> MyInput = DefineTFDataInput('MyInput', my_dataset)
>>> # Obtains Params of MyInput.
>>> p = MyInput.Params()
>>> assert p.args.begin == 0
>>> assert p.args.end == 10
>>> # Instantiates the InputGenerator from Params.
>>> ig = p.Instantiate()
>>> assert isinstance(ig, MyInput)
>>> # Obtains the data tensors.
>>> # In TFv1:
>>> data = ig.GetPreprocessedInputBatch()
>>> with tf.Session() as sess:
... values = sess.run(data) # {'value': 0}
... values = sess.run(data) # {'value': 1}
... values = sess.run(data) # {'value': 2}
>>> # In TFv2:
>>> values = ig.GetPreprocessedInputBatch() # {'value': 0}
>>> values = ig.GetPreprocessedInputBatch() # {'value': 1}
>>> values = ig.GetPreprocessedInputBatch() # {'value': 2}
Args:
name: A string, representing the name of the new InputGenerator class.
func: A callable to be analysed to generate the new InputGenerator. The
return value of `func` must be a single `tf.data.Dataset` which yields a
dict or its subclasses. The signature (parameter list) of `func` must have
all explicit parameters needed to configure the pipeline. `*args` and
`**kwargs` parameters would be ignored from defining `Params`.
ignore_args: A collection of strings, representing the set of parameter
names to be ignored from defining `Params`.
map_args: A {str: str} dict, representing mappings from existing fields in
`Params()` to `func`'s parameter. These mappings can be used to propagate
some particular Lingvo-specific options defined by others (typically by
super classes: `BaseInputGenerator` or `BaseLayer`) to the given function.
Each entry in the dict represents a `{func_param: layer_param}` pair such
that the `Params().layer_param` field will be mapped to the parameter
`func_param` of `func`. `func_param` won't be added into `Params().args`
to avoid duplicated definitions about the same parameters.
Returns:
A new InputGenerator class that invokes `func` internally. The `Params()`
method of the returned class makes a new Params containing the `args` field
representing the parameters of `func`. The `GetPreprocessedInputBatch()`
method returns a `py_utils.NestedMap` representing the same dict of the
obtained data from the dataset.
"""
ignore_args = set(ignore_args if ignore_args is not None else ())
map_args = dict(map_args if map_args is not None else {})
# Defines the class first as it will be required to call `super()`.
generated_cls = type(name, (BaseInputGenerator,), {})
@classmethod
def _Params(cls):
"""Generates Params to configure the InputGenerator.
This function analyses the signature of the given callable `func` and
defines corresponding fields into `Params` to the obtained function
parameters.
Returns:
An `InstantiableParams` object representing the InputGenerator. It has the
`args` field which contains the set of parameters of `func`.
"""
# Keys in `map_args` will also be ignored.
actual_ignore_args = ignore_args | set(map_args.keys())
p = super(generated_cls, cls).Params()
# Introduces a new group `args` to avoid confusion between `func`'s
# parameters and existing params defined by super classes.
# TODO(oday): For better UX, consider removing this nested field and add
# `func`s parameters to `p` directly. We need to make sure that there are no
# side effects by integrating `func`'s parameters and follows:
# - BaseInputGenerator.Params()
# - BaseLayer.Params()
# - InstantiableParams.cls
p.Define('args', hyperparams.Params(), 'Parameter list of the pipeline.')
inspect_utils.DefineParams(func, p.args, actual_ignore_args)
ds = datasource.TFDatasetFnInput.Params().Set(
load_fn='GetDataset', shuffle_buffer_size=1)
if cluster_factory.Current().tf_data_service_address:
ds = datasource.TFDataServiceSource.Params().Set(sub=ds)
ds = datasource.TFDatasetPrefetch.Params().Set(sub=ds)
p.file_datasource = ds
return p
def _GetDataset(self):
p = self.params
overrides = {k: p.Get(v) for k, v in map_args.items()}
dataset = inspect_utils.CallWithParams(func, p.args, **overrides)
assert isinstance(dataset, (tf.tf1.data.Dataset, tf.tf2.data.Dataset)), (
'DefineTFDataInput must take a callable which returns a '
'`tf.data.Dataset`. The given callable `%s` returned `%s`' %
(func, dataset))
return dataset
def _GetPreprocessedInputBatch(self):
"""Generates data tensors by invoking the pipeline."""
# TFv1: Returns Tensors which will be determined by Session.run().
# TFv2: Returns Tensors with actual values.
data = self.datasource.GetNext()
# Converts dict to NestedMap to maintain consistency with existing
# functionalities in base_input_generator.
# TODO(oday): Consider mitigating this restriction.
assert isinstance(data, dict), (
'DefineTFDataInput accepts only datasets that returns a dict or its '
'subclasses.')
if not isinstance(data, py_utils.NestedMap):
data = py_utils.NestedMap.FromNestedDict(data)
return data
# Overrides member methods.
generated_cls.Params = _Params
generated_cls.GetDataset = _GetDataset
generated_cls.GetPreprocessedInputBatch = _GetPreprocessedInputBatch
# Sets __module__ to the caller's module name for pickling and restoring from
# Params to work.
# See also the namedtuple's implementation for details.
module = inspect.stack()[1].frame.f_globals.get('__name__', '__main__')
generated_cls.__module__ = module
return generated_cls
| 40.558962
| 95
| 0.679435
|
5524375de699146ef8dc3d1b462ae6233a316bd8
| 9,822
|
py
|
Python
|
redash/query_runner/pg.py
|
rrgaya-zz/redash
|
8ad08a566adf1ce6fd461bf7cf11b51f12b3a48f
|
[
"BSD-2-Clause"
] | 8
|
2019-05-05T10:33:43.000Z
|
2021-07-14T11:21:52.000Z
|
redash/query_runner/pg.py
|
rrgaya-zz/redash
|
8ad08a566adf1ce6fd461bf7cf11b51f12b3a48f
|
[
"BSD-2-Clause"
] | 3
|
2020-06-18T15:21:00.000Z
|
2021-02-08T20:35:37.000Z
|
redash/query_runner/pg.py
|
rrgaya-zz/redash
|
8ad08a566adf1ce6fd461bf7cf11b51f12b3a48f
|
[
"BSD-2-Clause"
] | 15
|
2019-06-29T13:58:00.000Z
|
2022-02-27T14:57:03.000Z
|
import os
import logging
import select
import psycopg2
from psycopg2.extras import Range
from redash.query_runner import *
from redash.utils import JSONEncoder, json_dumps, json_loads
logger = logging.getLogger(__name__)
types_map = {
20: TYPE_INTEGER,
21: TYPE_INTEGER,
23: TYPE_INTEGER,
700: TYPE_FLOAT,
1700: TYPE_FLOAT,
701: TYPE_FLOAT,
16: TYPE_BOOLEAN,
1082: TYPE_DATE,
1114: TYPE_DATETIME,
1184: TYPE_DATETIME,
1014: TYPE_STRING,
1015: TYPE_STRING,
1008: TYPE_STRING,
1009: TYPE_STRING,
2951: TYPE_STRING
}
class PostgreSQLJSONEncoder(JSONEncoder):
def default(self, o):
if isinstance(o, Range):
# From: https://github.com/psycopg/psycopg2/pull/779
if o._bounds is None:
return ''
items = [
o._bounds[0],
str(o._lower),
', ',
str(o._upper),
o._bounds[1]
]
return ''.join(items)
return super(PostgreSQLJSONEncoder, self).default(o)
def _wait(conn, timeout=None):
while 1:
try:
state = conn.poll()
if state == psycopg2.extensions.POLL_OK:
break
elif state == psycopg2.extensions.POLL_WRITE:
select.select([], [conn.fileno()], [], timeout)
elif state == psycopg2.extensions.POLL_READ:
select.select([conn.fileno()], [], [], timeout)
else:
raise psycopg2.OperationalError("poll() returned %s" % state)
except select.error:
raise psycopg2.OperationalError("select.error received")
class PostgreSQL(BaseSQLQueryRunner):
noop_query = "SELECT 1"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"host": {
"type": "string",
"default": "127.0.0.1"
},
"port": {
"type": "number",
"default": 5432
},
"dbname": {
"type": "string",
"title": "Database Name"
},
"sslmode": {
"type": "string",
"title": "SSL Mode",
"default": "prefer"
}
},
"order": ['host', 'port', 'user', 'password'],
"required": ["dbname"],
"secret": ["password"]
}
@classmethod
def type(cls):
return "pg"
def _get_definitions(self, schema, query):
results, error = self.run_query(query, None)
if error is not None:
raise Exception("Failed getting schema.")
results = json_loads(results)
for row in results['rows']:
if row['table_schema'] != 'public':
table_name = u'{}.{}'.format(row['table_schema'], row['table_name'])
else:
table_name = row['table_name']
if table_name not in schema:
schema[table_name] = {'name': table_name, 'columns': []}
schema[table_name]['columns'].append(row['column_name'])
def _get_tables(self, schema):
'''
relkind constants per https://www.postgresql.org/docs/10/static/catalog-pg-class.html
r = regular table
v = view
m = materialized view
f = foreign table
p = partitioned table (new in 10)
---
i = index
S = sequence
t = TOAST table
c = composite type
'''
query = """
SELECT s.nspname as table_schema,
c.relname as table_name,
a.attname as column_name
FROM pg_class c
JOIN pg_namespace s
ON c.relnamespace = s.oid
AND s.nspname NOT IN ('pg_catalog', 'information_schema')
JOIN pg_attribute a
ON a.attrelid = c.oid
AND a.attnum > 0
AND NOT a.attisdropped
WHERE c.relkind IN ('m', 'f', 'p')
UNION
SELECT table_schema,
table_name,
column_name
FROM information_schema.columns
WHERE table_schema NOT IN ('pg_catalog', 'information_schema')
"""
self._get_definitions(schema, query)
return schema.values()
def _get_connection(self):
connection = psycopg2.connect(user=self.configuration.get('user'),
password=self.configuration.get('password'),
host=self.configuration.get('host'),
port=self.configuration.get('port'),
dbname=self.configuration.get('dbname'),
sslmode=self.configuration.get('sslmode'),
async_=True)
return connection
def run_query(self, query, user):
connection = self._get_connection()
_wait(connection, timeout=10)
cursor = connection.cursor()
try:
cursor.execute(query)
_wait(connection)
if cursor.description is not None:
columns = self.fetch_columns([(i[0], types_map.get(i[1], None)) for i in cursor.description])
rows = [dict(zip((c['name'] for c in columns), row)) for row in cursor]
data = {'columns': columns, 'rows': rows}
error = None
json_data = json_dumps(data, ignore_nan=True, cls=PostgreSQLJSONEncoder)
else:
error = 'Query completed but it returned no data.'
json_data = None
except (select.error, OSError) as e:
error = "Query interrupted. Please retry."
json_data = None
except psycopg2.DatabaseError as e:
error = e.message
json_data = None
except (KeyboardInterrupt, InterruptException):
connection.cancel()
error = "Query cancelled by user."
json_data = None
finally:
connection.close()
return json_data, error
class Redshift(PostgreSQL):
@classmethod
def type(cls):
return "redshift"
def _get_connection(self):
sslrootcert_path = os.path.join(os.path.dirname(__file__), './files/redshift-ca-bundle.crt')
connection = psycopg2.connect(user=self.configuration.get('user'),
password=self.configuration.get('password'),
host=self.configuration.get('host'),
port=self.configuration.get('port'),
dbname=self.configuration.get('dbname'),
sslmode=self.configuration.get('sslmode', 'prefer'),
sslrootcert=sslrootcert_path,
async_=True)
return connection
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"host": {
"type": "string"
},
"port": {
"type": "number"
},
"dbname": {
"type": "string",
"title": "Database Name"
},
"sslmode": {
"type": "string",
"title": "SSL Mode",
"default": "prefer"
}
},
"order": ['host', 'port', 'user', 'password'],
"required": ["dbname", "user", "password", "host", "port"],
"secret": ["password"]
}
def _get_tables(self, schema):
# Use svv_columns to include internal & external (Spectrum) tables and views data for Redshift
# https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_COLUMNS.html
# Use HAS_SCHEMA_PRIVILEGE(), SVV_EXTERNAL_SCHEMAS and HAS_TABLE_PRIVILEGE() to filter
# out tables the current user cannot access.
# https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_SCHEMA_PRIVILEGE.html
# https://docs.aws.amazon.com/redshift/latest/dg/r_SVV_EXTERNAL_SCHEMAS.html
# https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_TABLE_PRIVILEGE.html
query = """
WITH tables AS (
SELECT DISTINCT table_name,
table_schema,
column_name,
ordinal_position AS pos
FROM svv_columns
WHERE table_schema NOT IN ('pg_internal','pg_catalog','information_schema')
)
SELECT table_name, table_schema, column_name
FROM tables
WHERE
HAS_SCHEMA_PRIVILEGE(table_schema, 'USAGE') AND
(
table_schema IN (SELECT schemaname FROM SVV_EXTERNAL_SCHEMAS) OR
HAS_TABLE_PRIVILEGE('"' || table_schema || '"."' || table_name || '"', 'SELECT')
)
ORDER BY table_name, pos
"""
self._get_definitions(schema, query)
return schema.values()
class CockroachDB(PostgreSQL):
@classmethod
def type(cls):
return "cockroach"
register(PostgreSQL)
register(Redshift)
register(CockroachDB)
| 31.581994
| 109
| 0.503869
|
f3a0ee3aa8206e35a384d717ca5d6c038a9c25f2
| 9,884
|
py
|
Python
|
Tiles/tilesdemo.py
|
davidoj/RL_Aggregation
|
d1f7fa01016660963e87dd4bcdb475a7b4aed466
|
[
"MIT"
] | null | null | null |
Tiles/tilesdemo.py
|
davidoj/RL_Aggregation
|
d1f7fa01016660963e87dd4bcdb475a7b4aed466
|
[
"MIT"
] | null | null | null |
Tiles/tilesdemo.py
|
davidoj/RL_Aggregation
|
d1f7fa01016660963e87dd4bcdb475a7b4aed466
|
[
"MIT"
] | 1
|
2020-06-22T22:49:11.000Z
|
2020-06-22T22:49:11.000Z
|
""" Tiles demo
This demo is good for illustrating or testing the tile routines.
The demo displays a graph where instead of lines, there is a dot for
each x,y pair in the graph. The x,y pairs are points whose tiles match the
point we are doing tiles for. There is a separate graph entry for each
of the following tile types: regular(square), stripes, diagonals, diamond,
logarithmic sized, and exponential sized.
The graph window first comes up with tiles for the point 2.0, 2.0 displayed. You
can click anywhere in the graph window to show the tiles for that point. You can
use the graph highlighting functions by pressing the space bar, and then the
arrow keys, to show the different tiles (some may be hidden by others, so this
is the only way to see some of them). There is a menu of other options for the
tile codes (e.g. collision tables, number of tilings). If you choose one of these,
a dnw window will be created, and you can move it side by side with the old one
to compare things.
When imported, a tile window is automatically created for you. There is a function to
create windows which you may wish to call:
showtiles(numtilings, memct, floats, ints, title, start, end, intervals)
where:
numtilings is the number of tilings to be done
memct is the memory size or collision table to use
floats are the (two) floating point numbers to do tiles for
ints is an optional list of integers to use in the tiling (defaults to none)
start is the starting point of the tile window (defaults to 0.0)
end is the end point of the tile window (defaults to 5.0)
intervals is the number of intervals between whole number points (default 10)
Note: don't make the start and end too far apart, or too many intervals
between. The program will call the tiles function for each interval between\
the start and end points (in each direction) for each type of tiling and
compare it against the tiles returned by those functions for the original
point, so if you ask for too many, it will be VERY slow.
"""
import tiles
import fancytiles
import random
import RLtoolkit.Quickgraph.graph as graph
from RLtoolkit.G.g import *
class Tileview(graph.Dataview):
"""Special graph view for tile display"""
def gDrawView (self):
self.parentgraph.gDrawView()
pass
def gClickEventHandler(self, x, y):
print "clicked at ", x, y
self.newExample(x, y)
def newExample (self, x, y):
global inputarray, functionapproximator, examples
self.parentgraph.drawExample(x, y)
class TileDisplay(graph.Graph):
def __init__(self, x, y, numtilings=1, memct=8192, title="Tile Display", \
dataviewtype=Tileview, start=0.0, end=5.0, intervals=10, **kwargs):
title = title + " for "+str(numtilings)+" tilings"
graph.Graph.__init__(self, title, dataviewtype, **kwargs)
self.dataview.parentgraph = self
self.tilex = x
self.tiley = y
self.numtilings = numtilings
self.memct = memct
self.start = start
self.end = end
self.intervals = intervals
self.initDemo()
def gDrawView(self):
graph.Graph.gDrawView(self)
gDrawLineR(self.dataview, self.tilex, self.tiley, 0, .2, 'black')
gDrawLineR(self.dataview, self.tilex, self.tiley, 0, -.2, 'black')
gDrawLineR(self.dataview, self.tilex, self.tiley, .2, 0, 'black')
gDrawLineR(self.dataview, self.tilex, self.tiley, -.2, 0, 'black')
def drawExample(self, x, y):
self.tilex = x
self.tiley = y
graph.graph(self.calcTiledata(self.numtilings, self.memct, [x, y]), None, self)
gDrawLineR(self.dataview, x, y, 0, .2, 'black')
gDrawLineR(self.dataview, x, y, 0, -.2, 'black')
gDrawLineR(self.dataview, x, y, .2, 0, 'black')
gDrawLineR(self.dataview, x, y, -.2, 0, 'black')
def initDemo(self):
gClear(self)
gClear(self.dataview)
self.data = []
self.drawExample(self.tilex, self.tiley)
graph.xGraphLimits(self.start, self.end, self)
graph.yGraphLimits(self.start, self.end, self)
graph.graphPointsOnly(self)
graph.xTickmarks(1, self)
graph.yTickmarks(1, self)
graph.gridGraph(5, self)
def calcTiledata(self, numtilings, memct, floats, ints=[]):
samet = []
sametd = []
sametbd = []
sametdm = []
sametl = []
samete = []
samets = []
t = tiles.tiles(numtilings, memct, floats, ints)
tsx = fancytiles.stripetiles(numtilings, memct, [floats[0]], None, ints)
tsy = fancytiles.stripetiles(numtilings, memct, [floats[1]], None, ints)
td = fancytiles.diagonaltiles(numtilings, memct, floats, None, ints)
tbd = fancytiles.backdiagonaltiles(numtilings, memct, floats, None, ints)
tdm = fancytiles.diamondtiles(numtilings, memct, floats, None, ints)
tl = fancytiles.logtiles(numtilings, memct, floats, ints)
te = fancytiles.exptiles(numtilings, memct, floats, ints)
total = int((self.end - self.start) * self.intervals)
for i in xrange(total):
for j in xrange(total):
x = float(i)/self.intervals + self.start
y = float(j)/self.intervals + self.start
newfloats = [x, y]
if tiles.tiles(numtilings, memct, newfloats, ints) == t:
samet.append(newfloats)
if fancytiles.stripetiles(numtilings, memct, [x], None, ints) == tsx or \
fancytiles.stripetiles(numtilings, memct, [y], None, ints) == tsy:
samets.append(newfloats)
if fancytiles.diagonaltiles(numtilings, memct, newfloats, None, ints) == td:
sametd.append(newfloats)
if fancytiles.backdiagonaltiles(numtilings, memct, newfloats, None, ints) == tbd:
sametbd.append(newfloats)
if fancytiles.diamondtiles(numtilings, memct, newfloats, None, ints) == tdm:
sametdm.append(newfloats)
if fancytiles.logtiles(numtilings, memct, newfloats, ints) == tl:
sametl.append(newfloats)
if fancytiles.exptiles(numtilings, memct, newfloats, ints) == te:
samete.append(newfloats)
data = [samet, samets, sametd, sametbd, sametdm, sametl, samete]
return data
def showtiles(numtilings, memct, floats, ints=[], title="Tile Display", \
start=0.0, end=5.0, intervals=10):
w = TileDisplay(2.0, 2.0, numtilings, memct, title=title, start=start, \
end=end, intervals=intervals, gdViewport=(0, 20, 600, 620))
# should really have one for each type of tiling for test?
ctu = tiles.CollisionTable(4096, 'unsafe')
cts = tiles.CollisionTable(4096, 'safe')
ctss = tiles.CollisionTable(4096, 'super safe')
gAddMenu(GMENU, 'Tile Window', \
[['1 tiling, memory 1024', lambda: showtiles(1, 1024, [1.0, 2.0], title="Tile Display, memory 1024")], \
['2 tilings, memory 1024', lambda: showtiles(2, 1024, [1.0, 2.0], title="Tile Display, memory 1024")], \
['4 tilings, memory 1024', lambda: showtiles(4, 1024, [1.0, 2.0], title="Tile Display, memory 1024")], \
['8 tilings, memory 1024', lambda: showtiles(8, 1024, [1.0, 2.0], title="Tile Display, memory 1024")], \
['16 tilings, memory 1024', lambda: showtiles(16, 1024, [1.0, 2.0], title="Tile Display, memory 1024")], \
'---', \
['1 tiling, memory 2048', lambda: showtiles(1, 2048, [1.0, 2.0], title="Tile Display, memory 2048")], \
['2 tilings, memory 2048', lambda: showtiles(2, 2048, [1.0, 2.0], title="Tile Display, memory 2048")], \
['4 tilings, memory 2048', lambda: showtiles(4, 2048, [1.0, 2.0], title="Tile Display, memory 2048")], \
['8 tilings, memory 2048', lambda: showtiles(8, 2048, [1.0, 2.0], title="Tile Display, memory 2048")], \
['16 tilings, memory 2048', lambda: showtiles(16, 2048, [1.0, 2.0], title="Tile Display, memory 2048")], \
'---', \
['1 tiling, memory 4096', lambda: showtiles(1, 4096, [1.0, 2.0], title="Tile Display, memory 4096")], \
['2 tilings, memory 4096', lambda: showtiles(2, 4096, [1.0, 2.0], title="Tile Display, memory 4096")], \
['4 tilings, memory 4096', lambda: showtiles(4, 4096, [1.0, 2.0], title="Tile Display, memory 4096")], \
['8 tilings, memory 4096', lambda: showtiles(8, 4096, [1.0, 2.0], title="Tile Display, memory 4096")], \
['16 tilings, memory 4096', lambda: showtiles(16, 4096, [1.0, 2.0], title="Tile Display, memory 4096")], \
'---', \
['1 tiling, safe collision table', lambda: showtiles(1, cts, [1.0, 2.0], title="Tile Display, safe collision table")], \
['2 tilings, safe collision table', lambda: showtiles(2, cts, [1.0, 2.0], title="Tile Display, safe collision table")], \
'---', \
['1 tiling, super safe collision table', lambda: showtiles(1, ctss, [1.0, 2.0], title="Tile Display, super safe collision table")], \
['2 tilings, super safe collision table', lambda: showtiles(2, ctss, [1.0, 2.0], title="Tile Display, super safe collision table")], \
'---', \
['1 tiling, range -2 to 7, memory 4096', lambda: showtiles(1, 4096, [1.0, 2.0], start=-2.0, end=7.0, title="Tile Display, memory 4096")], \
['2 tilings, range -2 to 7, memory 4096', lambda: showtiles(2, 4096, [1.0, 2.0], start=-2.0, end=7.0, title="Tile Display, memory 4096")], \
'---', \
['Quit', gQuit]])
if __name__ == "__main__":
showtiles(1, 4096, [2.0, 2.0], title="Tile Display, memory 4096")
graph.gStartEventLoop()
| 53.139785
| 150
| 0.624949
|
885979730fea0f869d48d9171e2c13de8c4c2e60
| 112
|
py
|
Python
|
CodeSignal/test.py
|
JeffreyAsuncion/CodingProblems_Python
|
db71cb46b2579c1c65767a644a0ea989da4fa559
|
[
"MIT"
] | null | null | null |
CodeSignal/test.py
|
JeffreyAsuncion/CodingProblems_Python
|
db71cb46b2579c1c65767a644a0ea989da4fa559
|
[
"MIT"
] | null | null | null |
CodeSignal/test.py
|
JeffreyAsuncion/CodingProblems_Python
|
db71cb46b2579c1c65767a644a0ea989da4fa559
|
[
"MIT"
] | null | null | null |
sequence = [1,2,3,4,5]
preNum = -99999999999
for num in sequence:
if not num > preNum:
print(False)
| 18.666667
| 24
| 0.625
|
9911b5533d8332c99a0309b889c58ebb42e79b25
| 12,362
|
py
|
Python
|
models/soft_refinedet.py
|
gdjmck/multitrident
|
437a6d46d49cec8552a4cb465b23ce387bf284d9
|
[
"Apache-2.0"
] | 106
|
2019-06-13T07:23:46.000Z
|
2021-05-20T02:52:37.000Z
|
models/soft_refinedet.py
|
yangheng111/multitrident
|
b11808af66ade1637822de53840ad4a16719a9f2
|
[
"Apache-2.0"
] | 7
|
2019-07-11T05:40:48.000Z
|
2021-02-06T15:17:07.000Z
|
models/soft_refinedet.py
|
yangheng111/multitrident
|
b11808af66ade1637822de53840ad4a16719a9f2
|
[
"Apache-2.0"
] | 26
|
2019-06-17T01:42:35.000Z
|
2021-11-02T12:13:58.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import *
from data import voc_refinedet, coco_refinedet
import os
class SoftRefineDet(nn.Module):
"""Single Shot Multibox Architecture
The network is composed of a base VGG network followed by the
added multibox conv layers. Each multibox layer branches into
1) conv2d for class conf scores
2) conv2d for localization predictions
3) associated priorbox layer to produce default bounding
boxes specific to the layer's feature map size.
See: https://arxiv.org/pdf/1512.02325.pdf for more details.
Args:
phase: (string) Can be "test" or "train"
size: input image size
base: VGG16 layers for input, size of either 300 or 500
extras: extra layers that feed to multibox loc and conf layers
head: "multibox head" consists of loc and conf conv layers
"""
def __init__(self, phase, size, base, extras, ARM, ODM, TCB, AF, num_classes):
super(SoftRefineDet, self).__init__()
self.phase = phase
self.num_classes = num_classes
self.cfg = (coco_refinedet, voc_refinedet)[num_classes == 21]
self.priorbox = PriorBox(self.cfg[str(size)])
with torch.no_grad():
self.priors = self.priorbox.forward()
self.size = size
# SSD network
self.vgg = nn.ModuleList(base)
# Layer learns to scale the l2 normalized features from conv4_3
self.conv4_3_L2Norm = L2Norm(512, 10)
self.conv5_3_L2Norm = L2Norm(512, 8)
self.extras = nn.ModuleList(extras)
self.arm_loc = nn.ModuleList(ARM[0])
self.arm_conf = nn.ModuleList(ARM[1])
self.odm_loc = nn.ModuleList(ODM[0])
self.odm_conf = nn.ModuleList(ODM[1])
#self.tcb = nn.ModuleList(TCB)
self.tcb0 = nn.ModuleList(TCB[0])
self.tcb1 = nn.ModuleList(TCB[1])
self.tcb2 = nn.ModuleList(TCB[2])
self.af_loc = nn.ModuleList(AF[0])
self.af_conf = nn.ModuleList(AF[1])
if phase == 'test':
self.softmax = nn.Softmax(dim=-1)
self.detect = Detect_RefineDet(num_classes, self.size, 0, 1000, 0.01, 0.45, 0.01, 500)
def forward(self, x):
"""Applies network layers and ops on input image(s) x.
Args:
x: input image or batch of images. Shape: [batch,3,300,300].
Return:
Depending on phase:
test:
Variable(tensor) of output class label predictions,
confidence score, and corresponding location predictions for
each object detected. Shape: [batch,topk,7]
train:
list of concat outputs from:
1: confidence layers, Shape: [batch*num_priors,num_classes]
2: localization layers, Shape: [batch,num_priors*4]
3: priorbox layers, Shape: [2,num_priors*4]
"""
sources = list()
tcb_source = list()
arm_loc = list()
arm_conf = list()
odm_loc = list()
odm_conf = list()
# apply vgg up to conv4_3 relu and conv5_3 relu
for k in range(30):
x = self.vgg[k](x)
if 22 == k:
s = self.conv4_3_L2Norm(x)
sources.append(s)
elif 29 == k:
s = self.conv5_3_L2Norm(x)
sources.append(s)
# apply vgg up to fc7
for k in range(30, len(self.vgg)):
x = self.vgg[k](x)
sources.append(x)
# apply extra layers and cache source layer outputs
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
sources.append(x)
# apply ARM and ODM to source layers
for (x, l, c) in zip(sources, self.arm_loc, self.arm_conf):
arm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())
arm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())
arm_loc = torch.cat([o.view(o.size(0), -1) for o in arm_loc], 1)
arm_conf = torch.cat([o.view(o.size(0), -1) for o in arm_conf], 1)
#print([x.size() for x in sources])
# calculate TCB features
#print([x.size() for x in sources])
p = None
for k, v in enumerate(sources[::-1]):
s = v
for i in range(3):
s = self.tcb0[(3-k)*3 + i](s)
#print(s.size())
if k != 0:
u = p
u = self.tcb1[3-k](u)
s += u
for i in range(3):
s = self.tcb2[(3-k)*3 + i](s)
p = s
tcb_source.append(s)
#print([x.size() for x in tcb_source])
tcb_source.reverse()
# apply ODM to source layers
for (x, l, c) in zip(tcb_source, self.odm_loc, self.odm_conf):
odm_loc.append(l(x).permute(0, 2, 3, 1).contiguous())
odm_conf.append(c(x).permute(0, 2, 3, 1).contiguous())
odm_loc = torch.cat([o.view(o.size(0), -1) for o in odm_loc], 1)
odm_conf = torch.cat([o.view(o.size(0), -1) for o in odm_conf], 1)
#print(arm_loc.size(), arm_conf.size(), odm_loc.size(), odm_conf.size())
AF_loc=[]
AF_conf=[]
for (x, l, c) in zip(tcb_source, self.af_loc, self.af_conf):
AF_loc.append(l(x))
AF_conf.append(c(x))
if self.phase == "test":
#print(loc, conf)
output = self.detect(
arm_loc.view(arm_loc.size(0), -1, 4), # arm loc preds
self.softmax(arm_conf.view(arm_conf.size(0), -1,
2)), # arm conf preds
odm_loc.view(odm_loc.size(0), -1, 4), # odm loc preds
self.softmax(odm_conf.view(odm_conf.size(0), -1,
self.num_classes)), # odm conf preds
self.priors.type(type(x.data)) # default boxes
)
else:
output = (
arm_loc.view(arm_loc.size(0), -1, 4),
arm_conf.view(arm_conf.size(0), -1, 2),
odm_loc.view(odm_loc.size(0), -1, 4),
odm_conf.view(odm_conf.size(0), -1, self.num_classes),
self.priors
)
return output, (AF_loc, AF_conf)
def load_weights(self, base_file):
other, ext = os.path.splitext(base_file)
if ext == '.pkl' or '.pth':
print('Loading weights into state dict...')
self.load_state_dict(torch.load(base_file,
map_location=lambda storage, loc: storage))
print('Finished!')
else:
print('Sorry only .pth and .pkl files supported.')
# This function is derived from torchvision VGG make_layers()
# https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=3, dilation=3)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, size, i, batch_norm=False):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1],
kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
return layers
def arm_multibox(vgg, extra_layers, cfg):
arm_loc_layers = []
arm_conf_layers = []
vgg_source = [21, 28, -2]
for k, v in enumerate(vgg_source):
arm_loc_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 4, kernel_size=3, padding=1)]
arm_conf_layers += [nn.Conv2d(vgg[v].out_channels,
cfg[k] * 2, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 3):
arm_loc_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 4, kernel_size=3, padding=1)]
arm_conf_layers += [nn.Conv2d(v.out_channels, cfg[k]
* 2, kernel_size=3, padding=1)]
return (arm_loc_layers, arm_conf_layers)
def odm_multibox(vgg, extra_layers, cfg, num_classes):
odm_loc_layers = []
odm_conf_layers = []
vgg_source = [21, 28, -2]
for k, v in enumerate(vgg_source):
odm_loc_layers += [nn.Conv2d(256, cfg[k] * 4, kernel_size=3, padding=1)]
odm_conf_layers += [nn.Conv2d(256, cfg[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 3):
odm_loc_layers += [nn.Conv2d(256, cfg[k] * 4, kernel_size=3, padding=1)]
odm_conf_layers += [nn.Conv2d(256, cfg[k] * num_classes, kernel_size=3, padding=1)]
return (odm_loc_layers, odm_conf_layers)
def AF(num_classes):
conf_layers = [
nn.Conv2d(256, num_classes, kernel_size=3, padding=1),
nn.Conv2d(256, num_classes, kernel_size=3, padding=1),
nn.Conv2d(256, num_classes, kernel_size=3, padding=1),
nn.Conv2d(256, num_classes, kernel_size=3, padding=1)
]
loc_layers = [
nn.Conv2d(256, 4, kernel_size=3, padding=1),
nn.Conv2d(256, 4, kernel_size=3, padding=1),
nn.Conv2d(256, 4, kernel_size=3, padding=1),
nn.Conv2d(256, 4, kernel_size=3, padding=1)
]
return (loc_layers, conf_layers)
def add_tcb(cfg):
feature_scale_layers = []
feature_upsample_layers = []
feature_pred_layers = []
for k, v in enumerate(cfg):
feature_scale_layers += [nn.Conv2d(cfg[k], 256, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1)
]
feature_pred_layers += [nn.ReLU(inplace=True),
nn.Conv2d(256, 256, 3, padding=1),
nn.ReLU(inplace=True)
]
if k != len(cfg) - 1:
feature_upsample_layers += [nn.ConvTranspose2d(256, 256, 2, 2)]
return (feature_scale_layers, feature_upsample_layers, feature_pred_layers)
base = {
'320': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
}
extras = {
'320': [256, 'S', 512],
'512': [256, 'S', 512],
}
mbox = {
'320': [3, 3, 3, 3], # number of boxes per feature map location
'512': [3, 3, 3, 3], # number of boxes per feature map location
}
tcb = {
'320': [512, 512, 1024, 512],
'512': [512, 512, 1024, 512],
}
def build_softrefinedet(phase, size=320, num_classes=21):
if phase != "test" and phase != "train":
print("ERROR: Phase: " + phase + " not recognized")
return
if size != 320 and size != 512:
print("ERROR: You specified size " + repr(size) + ". However, " +
"currently only RefineDet320 and RefineDet512 is supported!")
return
base_ = vgg(base[str(size)], 3)
extras_ = add_extras(extras[str(size)], size, 1024)
ARM_ = arm_multibox(base_, extras_, mbox[str(size)])
ODM_ = odm_multibox(base_, extras_, mbox[str(size)], num_classes)
TCB_ = add_tcb(tcb[str(size)])
AF_ = AF(num_classes)
return SoftRefineDet(phase, size, base_, extras_, ARM_, ODM_, TCB_, AF_, num_classes)
| 39.369427
| 98
| 0.555088
|
3b442195b4bdbb38822a959ea54d0586b627e270
| 5,771
|
py
|
Python
|
utils/load_corpus.py
|
ssbeicdm/ssbe
|
5ae671962c3e50ea912eba02779e9a1c3ae37534
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
utils/load_corpus.py
|
ssbeicdm/ssbe
|
5ae671962c3e50ea912eba02779e9a1c3ae37534
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
utils/load_corpus.py
|
ssbeicdm/ssbe
|
5ae671962c3e50ea912eba02779e9a1c3ae37534
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import json
from gensim.models.doc2vec import LabeledSentence
import string as st
import spacy.en
parser = spacy.load('en')
class Lemmatizer(object):
def __init__(self):
self.parser = parser
def __call__(self, line):
line = self.parser(line)
return [t.lemma_ for t in line if not t.lemma_.isdigit()]
class Paragraphs(object):
def __init__(self, istex=None, ucbl=None, istex_mr=None, wiki=None, tokenize=False, max_nb_wiki_paragraphs=None,
paragraphs_per_article=None, istex_tag=None, wiki_tag=None):
self.istex = istex
self.ucbl = ucbl
self.istex_mr = istex_mr
self.wiki = wiki
self.tokenize = tokenize
self.max_nb_wiki_paragraphs = max_nb_wiki_paragraphs
self.paragraphs_per_article = paragraphs_per_article
self.istex_tag = istex_tag
self.wiki_tag = wiki_tag
self.index = dict()
self.inversed_index = dict()
self.paper_count = 0
self.wiki_count = 0
def __iter__(self):
## UCBL data loading
if self.ucbl is not None:
f=open(self.ucbl,'r')
data=json.load(f)
f.close()
for doc in data :
line = doc["title"] + " __ " + doc["abstract"]
if self.tokenize:
u_line = self.to_unicode(line)
words_lst = self.tokenize(u_line)
try :
self.yield_element(words_lst, tag=self.istex_tag, count=self.paper_count)
self.index[self.paper_count] = 'UCBL_' + doc["istex_id"]
self.inversed_index['UCBL_' + doc["istex_id"]] = self.paper_count
except : continue
else:
yield line
self.index[self.paper_count] = 'UCBL_' + doc["istex_id"]
self.inversed_index['UCBL_' + doc["istex_id"]] = self.paper_count
self.paper_count += 1
self.ucbl_count = self.paper_count
## ISTEX Mental Rotation (istex_mr) data loading
if self.istex_mr is not None:
f=open(self.istex_mr,'r')
data=json.load(f)
f.close()
for doc in data :
line = doc["title"] + " __ " + doc["abstract"]
if self.tokenize:
u_line = self.to_unicode(line)
words_lst = self.tokenize(u_line)
try :
self.yield_element(words_lst, tag=self.istex_tag, count=self.paper_count)
self.index[self.paper_count] = 'MRISTEX_' + doc["istex_id"]
self.inversed_index['MRISTEX_' + doc["istex_id"]] = self.paper_count
except : continue
else:
yield line
self.index[self.paper_count] = 'MRISTEX_' + doc["istex_id"]
self.inversed_index['MRISTEX_' + doc["istex_id"]] = self.paper_count
self.paper_count += 1
self.istex_mr_count = self.paper_count - self.ucbl_count
## ISTEX data loading
if self.istex is not None:
for fname in os.listdir(self.istex):
f=open(os.path.join(self.istex, fname),'r')
data=json.load(f)
f.close()
for doc in data :
line = doc["title"] + " __ " + doc["abstract"]
if self.tokenize:
u_line = self.to_unicode(line)
words_lst = self.tokenize(u_line)
try :
self.yield_element(words_lst, tag=self.istex_tag, count=self.paper_count)
self.index[self.paper_count] = 'ISTEX_' + doc["istex_id"]
self.inversed_index['ISTEX_' + doc["istex_id"]] = self.paper_count
except : continue
else:
yield line
self.index[self.paper_count] = 'ISTEX_' + doc["istex_id"]
self.inversed_index['ISTEX_' + doc["istex_id"]] = self.paper_count
self.paper_count += 1
self.istex_count = self.paper_count - (self.ucbl_count + self.istex_mr_count)
## Wikipedia data itteration
if self.wiki is not None:
wiki = self.wiki
for sub in os.listdir(wiki):
subdir = os.path.join(wiki, sub)
for fname in os.listdir(subdir):
paragraphs_per_article_count = 0
for line in open(os.path.join(subdir, fname)): # each line represent a paragraph in wiki article
if self.max_nb_wiki_paragraphs is not None and self.wiki_count >= self.max_nb_wiki_paragraphs:
break
if len(line.split()) > 2 and line[:8] != '<doc id=': # to verify if the line is a paragraph
if self.tokenize:
u_line = self.to_unicode(line)
words_lst = self.tokenize(u_line)
self.yield_element(words_lst, tag=self.wiki_tag, count=self.wiki_count)
else:
yield line
self.index[self.paper_count] = "WIKI" + '_' + str(self.wiki_count)
self.inversed_index["WIKI" + '_' + str(self.wiki_count)] = self.paper_count
self.paper_count += 1
self.wiki_count += 1
if self.paragraphs_per_article is not None:
paragraphs_per_article_count += 1
if paragraphs_per_article_count >= self.paragraphs_per_article:
break
print 'number of wikipedia paragraphs: ', self.wiki_count
print 'number of abstracts: ', self.istex_count + self.ucbl_count
print 'total number of paragraphs and abstracts: ', self.paper_count
print 'number of ucbl articles: ', self.ucbl_count
if self.istex_mr is not None:
print 'number of istex_mr articles: ', self.istex_mr_count
print 'total number of mental rotation articles: ', self.istex_mr_count + self.ucbl_count
print 'number of istex articles other than mental rotation ones: ', self.istex_count
def to_unicode(self,line) :
try :
line = line.encode('utf-8','ignore').decode('utf-8')
except:
line2 = ""
for w in line.split() :
try :
line2+=w.encode('utf-8','ignore').decode('utf-8')+" "
except :
if w[-1] in ['?','.','!'] :
line2 += w[-1] + " "
line = line2.rstrip() # remove last space if it exists
return line
def tokenize(self, line):
lst = line.lower().split()
lst = [ i for i in lst if not i.isdigit()]
return lst
def yield_element(self, words_lst, tag=None, count=None):
if tag is not None:
yield LabeledSentence(words=words_lst ,tags=[tag+'_%s' % str(count)])
else:
yield words_lst
| 33.358382
| 113
| 0.668515
|
1dd321f38aee3be197c73d1209782cd484b66fae
| 2,958
|
py
|
Python
|
scripts/build/devtools_file_hashes.py
|
gonnavis/devtools-frontend
|
93f79d6e7b973f0b7aadde4547733ec0ce1cf11a
|
[
"BSD-3-Clause"
] | 2,389
|
2015-06-05T05:06:05.000Z
|
2022-03-31T07:08:56.000Z
|
scripts/build/devtools_file_hashes.py
|
gonnavis/devtools-frontend
|
93f79d6e7b973f0b7aadde4547733ec0ce1cf11a
|
[
"BSD-3-Clause"
] | 157
|
2016-02-03T06:03:25.000Z
|
2022-02-03T13:55:47.000Z
|
scripts/build/devtools_file_hashes.py
|
gonnavis/devtools-frontend
|
93f79d6e7b973f0b7aadde4547733ec0ce1cf11a
|
[
"BSD-3-Clause"
] | 388
|
2015-10-10T12:45:24.000Z
|
2022-03-22T18:11:08.000Z
|
# Copyright (c) 2014 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import os.path
try:
import json
except ImportError:
import simplejson as json
def save_hashes(hashes_file_path, hashes):
try:
with open(hashes_file_path, "wt") as hashes_file:
json.dump(hashes,
hashes_file,
indent=4,
separators=(",", ": "),
sort_keys=True)
except:
print("ERROR: Failed to write %s" % hashes_file_path)
raise
def load_hashes(hashes_file_path):
try:
with open(hashes_file_path, "r") as hashes_file:
hashes = json.load(hashes_file)
except:
return {}
return hashes
def calculate_file_hash(file_path):
with open(file_path, "rb") as file:
data = file.read()
md5_hash = hashlib.md5(data).hexdigest()
return md5_hash
def files_with_invalid_hashes(hash_file_path, file_paths):
hashes = load_hashes(hash_file_path)
result = []
for file_path in file_paths:
file_name = os.path.basename(file_path)
if calculate_file_hash(file_path) != hashes.get(file_name, ""):
result.append(file_path)
return result
def update_file_hashes(hash_file_path, file_paths):
hashes = {}
for file_path in file_paths:
file_name = os.path.basename(file_path)
hashes[file_name] = calculate_file_hash(file_path)
save_hashes(hash_file_path, hashes)
| 35.638554
| 72
| 0.709263
|
7af55702ad763d64b19e020a6774eff221ecbdef
| 863
|
py
|
Python
|
setup.py
|
gwtaylor/pyautodiff
|
7973e26f1c233570ed4bb10d08634ec7378e2152
|
[
"BSD-3-Clause"
] | 59
|
2015-02-03T20:50:59.000Z
|
2020-05-26T05:38:54.000Z
|
setup.py
|
gwtaylor/pyautodiff
|
7973e26f1c233570ed4bb10d08634ec7378e2152
|
[
"BSD-3-Clause"
] | 3
|
2015-05-10T06:22:45.000Z
|
2016-12-06T02:20:58.000Z
|
setup.py
|
gwtaylor/pyautodiff
|
7973e26f1c233570ed4bb10d08634ec7378e2152
|
[
"BSD-3-Clause"
] | 11
|
2015-04-15T16:52:09.000Z
|
2017-06-28T12:10:39.000Z
|
import sys
from setuptools import setup
if sys.version < '3':
raise ImportError(
'This version of autodiff only support Python 3+. Please check out an '
'earlier branch for use with Python 2.')
setup(
name='autodiff',
version='0.5',
maintainer='Lowin Data Company',
maintainer_email='info@lowindata.com',
description=('Automatic differentiation for NumPy.'),
license='BSD-3',
url='https://github.com/LowinData/pyautodiff',
long_description = open('README.md').read(),
install_requires=['numpy', 'theano', 'meta'],
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
]
)
| 30.821429
| 79
| 0.641947
|
0291cb546b48ab1f479537d9a794310ee283e76c
| 2,930
|
py
|
Python
|
src/simmer/schemas/read_yml.py
|
holdengill/SImMER
|
7608a9cb044f2827f43f2d0c177e17faf8ff7720
|
[
"MIT"
] | null | null | null |
src/simmer/schemas/read_yml.py
|
holdengill/SImMER
|
7608a9cb044f2827f43f2d0c177e17faf8ff7720
|
[
"MIT"
] | 25
|
2021-01-21T06:51:58.000Z
|
2022-03-28T21:07:30.000Z
|
src/simmer/schemas/read_yml.py
|
holdengill/SImMER
|
7608a9cb044f2827f43f2d0c177e17faf8ff7720
|
[
"MIT"
] | null | null | null |
import os
import cerberus
import yaml
from .custom_validator import SimmerValidator
def normalize(yml_dict, validator, schema, plot_types):
"""
Inputs:
:yml_dict: (dictionary) the dictionary to be normalized against a schema
:validator: (SimmerValidator) the validator object used.
:schema: the schema against which the yml_dict is normalized.
:plot_types: (list of strings) the basic plot_types that must be in
the uppermost keys.
Outputs:
:normalized: normalized dictionary.
"""
validator.schema = schema
for plot_type in plot_types:
if plot_type not in yml_dict.keys():
yml_dict[plot_type] = {}
normalized = validator.normalized(yml_dict)
return normalized
def read_yml(yml_filename):
"""
Reads in a yaml file.
Inputs:
:yml_filename: (string) path to the yml.
Outputs:
:parsed_yml_file: (dictionary) key-value pairs as read from the yaml
file.
"""
file = open(yml_filename)
parsed_yaml_file = yaml.load(file, Loader=yaml.SafeLoader)
file.close()
return parsed_yaml_file
def validate_yml(schema_filename, yml_filename):
"""
Ensures that a given yml file is in accordance with the provided schema. In
essence, this ensures that no odd keys or fields are provided to the yml.
Inputs:
:schema_filename: (string) path to schema yaml.
:yml_filename: (string) path to yml yaml.
Outputs:
:validated: (bool) whether or not the yaml was successfully validated.
"""
parsed_schema = read_yml(schema_filename)
parsed_yml = read_yml(yml_filename)
s = SimmerValidator()
try:
s.validate(parsed_yml, parsed_schema)
validated = True
except cerberus.SchemaError:
validated = False
return validated
def get_plotting_args(yml_filename=None):
"""
Gets plotting args.
Inputs:
:yml_filename: (string) path of the plotting yml to be used.
Defaults to None.
Outputs:
:plotting_arg: (dictionary) all arguments that are related to plotting.
See the `plotting_schema.yml` schema for documentation of keys and values.
"""
my_path = os.path.abspath(os.path.dirname(__file__))
schema_filename = os.path.join(my_path, "plotting_schema.yml")
plot_types = ["intermediate", "final_im", "rots"]
if not yml_filename:
# the normalizer fills in all empty fields later on
yml_dict = {plot_type: [{}] for plot_type in plot_types}
else:
if validate_yml(schema_filename, yml_filename):
yml_dict = read_yml(yml_filename)
else:
raise cerberus.SchemaError("parsing plotting yml failed")
s = SimmerValidator()
schema = read_yml(schema_filename)
plotting_args = normalize(yml_dict, s, schema, plot_types)
return plotting_args
| 29.009901
| 86
| 0.666894
|
865cb3d6e8455051d40eefdd8cc790be9c8b3bbd
| 1,926
|
py
|
Python
|
pages/source/conf.py
|
hep-statistics-standard/hep-statistics-standard.github.io
|
da74fbeab30c5bc0a503aa8a1a2d5a34e8c4530d
|
[
"BSD-3-Clause"
] | null | null | null |
pages/source/conf.py
|
hep-statistics-standard/hep-statistics-standard.github.io
|
da74fbeab30c5bc0a503aa8a1a2d5a34e8c4530d
|
[
"BSD-3-Clause"
] | null | null | null |
pages/source/conf.py
|
hep-statistics-standard/hep-statistics-standard.github.io
|
da74fbeab30c5bc0a503aa8a1a2d5a34e8c4530d
|
[
"BSD-3-Clause"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'HEP statistics standard'
copyright = '2020, Jonas Eschle, John Doe'
author = 'Jonas Eschle, John Doe'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| 35.018182
| 79
| 0.665109
|
2ec9813707d9708d72322d079f84f1cfb724a817
| 3,820
|
py
|
Python
|
examples/quickstart_pytorch/client.py
|
zliel/flower
|
c5a4b2718bed5ec73a3838cc997c38b5ba4862e7
|
[
"Apache-2.0"
] | null | null | null |
examples/quickstart_pytorch/client.py
|
zliel/flower
|
c5a4b2718bed5ec73a3838cc997c38b5ba4862e7
|
[
"Apache-2.0"
] | null | null | null |
examples/quickstart_pytorch/client.py
|
zliel/flower
|
c5a4b2718bed5ec73a3838cc997c38b5ba4862e7
|
[
"Apache-2.0"
] | null | null | null |
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
import flwr as fl
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main():
"""Create model, load data, define Flower client, start Flower client."""
# Model (simple CNN adapted from 'PyTorch: A 60 Minute Blitz')
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net().to(DEVICE)
# Load data (CIFAR-10)
trainloader, testloader = load_data()
# Flower client
class CifarClient(fl.client.NumPyClient):
def get_parameters(self):
return [val.cpu().numpy() for _, val in net.state_dict().items()]
def set_parameters(self, parameters):
params_dict = zip(net.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.Tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=True)
def fit(self, parameters, config):
self.set_parameters(parameters)
train(net, trainloader, epochs=1)
return self.get_parameters(), len(trainloader)
def evaluate(self, parameters, config):
self.set_parameters(parameters)
loss, accuracy = test(net, testloader)
return len(testloader), float(loss), float(accuracy)
# Start client
fl.client.start_numpy_client("0.0.0.0:8080", client=CifarClient())
def train(net, trainloader, epochs):
"""Train the network on the training set."""
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for _ in range(epochs):
for images, labels in trainloader:
images, labels = images.to(DEVICE), labels.to(DEVICE)
optimizer.zero_grad()
loss = criterion(net(images), labels)
loss.backward()
optimizer.step()
def test(net, testloader):
"""Validate the network on the entire test set."""
criterion = torch.nn.CrossEntropyLoss()
correct, total, loss = 0, 0, 0.0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
outputs = net(images)
loss += criterion(outputs, labels).item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return loss, accuracy
def load_data():
"""Load CIFAR-10 (training and test set)."""
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
trainset = CIFAR10("./dataset", train=True, download=True, transform=transform)
testset = CIFAR10("./dataset", train=False, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
testloader = DataLoader(testset, batch_size=32)
return trainloader, testloader
if __name__ == "__main__":
main()
| 34.727273
| 87
| 0.613613
|
693d73bd5b3b1a4496d7a1badcff7f8abf98059f
| 33,588
|
py
|
Python
|
scripts/pymo/preprocessing.py
|
TomKingsfordUoA/Co-Speech_Gesture_Generation
|
a9f7a66a254e724cb10fc630c140fb71a91ce594
|
[
"MIT"
] | 19
|
2020-06-29T14:42:02.000Z
|
2022-02-15T14:35:11.000Z
|
scripts/pymo/preprocessing.py
|
TomKingsfordUoA/Co-Speech_Gesture_Generation
|
a9f7a66a254e724cb10fc630c140fb71a91ce594
|
[
"MIT"
] | 7
|
2020-08-05T04:14:49.000Z
|
2022-02-21T09:43:31.000Z
|
scripts/pymo/preprocessing.py
|
TomKingsfordUoA/Co-Speech_Gesture_Generation
|
a9f7a66a254e724cb10fc630c140fb71a91ce594
|
[
"MIT"
] | 6
|
2020-07-27T08:17:33.000Z
|
2022-02-15T13:28:11.000Z
|
'''
Preprocessing Tranformers Based on sci-kit's API
By Omid Alemi
Created on June 12, 2017
Modified by Simon Alexanderson, 2020-06-24
'''
import copy
import pandas as pd
import numpy as np
import transforms3d as t3d
import scipy.ndimage.filters as filters
from scipy.spatial.transform import Rotation as R
from sklearn.base import BaseEstimator, TransformerMixin
class MocapParameterizer(BaseEstimator, TransformerMixin):
def __init__(self, param_type = 'euler'):
'''
param_type = {'euler', 'quat', 'expmap', 'position', 'expmap2pos'}
'''
self.param_type = param_type
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("MocapParameterizer: " + self.param_type)
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._to_expmap(X)
elif self.param_type == 'quat':
return X
elif self.param_type == 'position':
return self._to_pos(X)
elif self.param_type == 'expmap2pos':
return self._expmap_to_pos(X)
else:
raise 'param types: euler, quat, expmap, position, expmap2pos'
# return X
def inverse_transform(self, X, copy=None):
if self.param_type == 'euler':
return X
elif self.param_type == 'expmap':
return self._expmap_to_euler(X)
elif self.param_type == 'quat':
raise 'quat2euler is not supported'
elif self.param_type == 'position':
raise 'positions 2 eulers is not supported'
return X
else:
raise 'param types: euler, quat, expmap, position'
def fix_rotvec(self, rots):
'''fix problems with discontinuous rotation vectors'''
new_rots = rots.copy()
# Compute angles and alternative rotation angles
angs = np.linalg.norm(rots, axis=1)
alt_angs=2*np.pi-angs
#find discontinuities by checking if the alternative representation is closer
d_angs = np.diff(angs, axis=0)
d_angs2 = alt_angs[1:]-angs[:-1]
swps = np.where(np.abs(d_angs2)<np.abs(d_angs))[0]
#reshape into intervals where we should flip rotation axis
isodd = swps.shape[0] % 2 == 1
if isodd:
swps=swps[:-1]
intv = 1+swps.reshape((swps.shape[0]//2, 2))
#flip rotations in selected intervals
for ii in range(intv.shape[0]):
new_ax = -rots[intv[ii,0]:intv[ii,1],:]/np.tile(angs[intv[ii,0]:intv[ii,1], None], (1,3))
new_angs = alt_angs[intv[ii,0]:intv[ii,1]]
new_rots[intv[ii,0]:intv[ii,1],:] = new_ax*np.tile(new_angs[:, None], (1,3))
return new_rots
def _to_pos(self, X):
'''Converts joints rotations in Euler angles to joint positions'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
pos_df = pd.DataFrame(index=euler_df.index)
# List the columns that contain rotation channels
rot_cols = [c for c in euler_df.columns if ('rotation' in c)]
# List the columns that contain position channels
pos_cols = [c for c in euler_df.columns if ('position' in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton)
tree_data = {}
for joint in track.traverse():
parent = track.skeleton[joint]['parent']
rot_order = track.skeleton[joint]['order']
# Get the rotation columns that belong to this joint
rc = euler_df[[c for c in rot_cols if joint in c]]
# Get the position columns that belong to this joint
pc = euler_df[[c for c in pos_cols if joint in c]]
# Make sure the columns are organized in xyz order
if rc.shape[1] < 3:
euler_values = [[0,0,0] for f in rc.iterrows()]
rot_order = "XYZ"
else:
euler_values = [[f[1]['%s_%srotation'%(joint, rot_order[0])],
f[1]['%s_%srotation'%(joint, rot_order[1])],
f[1]['%s_%srotation'%(joint, rot_order[2])]] for f in rc.iterrows()]
if pc.shape[1] < 3:
pos_values = [[0,0,0] for f in pc.iterrows()]
else:
pos_values =[[f[1]['%s_Xposition'%joint],
f[1]['%s_Yposition'%joint],
f[1]['%s_Zposition'%joint]] for f in pc.iterrows()]
# Convert the eulers to rotation matrices
rotmats = R.from_euler(rot_order, euler_values, degrees=True).inv()
tree_data[joint]=[
[], # to store the rotation matrix
[] # to store the calculated position
]
if track.root_name == joint:
tree_data[joint][0] = rotmats
tree_data[joint][1] = pos_values
else:
# for every frame i, multiply this joint's rotmat to the rotmat of its parent
tree_data[joint][0] = rotmats*tree_data[parent][0]
# add the position channel to the offset and store it in k, for every frame i
k = pos_values + np.asarray(track.skeleton[joint]['offsets'])
# multiply k to the rotmat of the parent for every frame i
q = tree_data[parent][0].inv().apply(k)
# add q to the position of the parent, for every frame i
tree_data[joint][1] = tree_data[parent][1] + q
# Create the corresponding columns in the new DataFrame
pos_df['%s_Xposition'%joint] = pd.Series(data=[e[0] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Yposition'%joint] = pd.Series(data=[e[1] for e in tree_data[joint][1]], index=pos_df.index)
pos_df['%s_Zposition'%joint] = pd.Series(data=[e[2] for e in tree_data[joint][1]], index=pos_df.index)
new_track = track.clone()
new_track.values = pos_df
Q.append(new_track)
return Q
def _to_expmap(self, X):
'''Converts Euler angles to Exponential Maps'''
Q = []
for track in X:
channels = []
titles = []
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
exp_df = euler_df.copy()
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = euler_df[[c for c in rots if joint in c]] # Get the columns that belong to this joint
rot_order = track.skeleton[joint]['order']
r1_col = '%s_%srotation'%(joint, rot_order[0])
r2_col = '%s_%srotation'%(joint, rot_order[1])
r3_col = '%s_%srotation'%(joint, rot_order[2])
exp_df.drop([r1_col, r2_col, r3_col], axis=1, inplace=True)
euler = [[f[1][r1_col], f[1][r2_col], f[1][r3_col]] for f in r.iterrows()]
exps = np.array(self.fix_rotvec(R.from_euler(rot_order.lower(), euler, degrees=True).as_rotvec()))
# Create the corresponding columns in the new DataFrame
exp_df.insert(loc=0, column='%s_gamma'%joint, value=pd.Series(data=[e[2] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_beta'%joint, value=pd.Series(data=[e[1] for e in exps], index=exp_df.index))
exp_df.insert(loc=0, column='%s_alpha'%joint, value=pd.Series(data=[e[0] for e in exps], index=exp_df.index))
#print(exp_df.columns)
new_track = track.clone()
new_track.values = exp_df
Q.append(new_track)
return Q
def _expmap_to_euler(self, X):
Q = []
for track in X:
channels = []
titles = []
exp_df = track.values
# Create a new DataFrame to store the exponential map rep
euler_df = exp_df.copy()
# List the columns that contain rotation channels
exp_params = [c for c in exp_df.columns if ( any(p in c for p in ['alpha', 'beta','gamma']) and 'Nub' not in c)]
# List the joints that are not end sites, i.e., have channels
joints = (joint for joint in track.skeleton if 'Nub' not in joint)
for joint in joints:
r = exp_df[[c for c in exp_params if joint in c]] # Get the columns that belong to this joint
euler_df.drop(['%s_alpha'%joint, '%s_beta'%joint, '%s_gamma'%joint], axis=1, inplace=True)
expmap = [[f[1]['%s_alpha'%joint], f[1]['%s_beta'%joint], f[1]['%s_gamma'%joint]] for f in r.iterrows()] # Make sure the columsn are organized in xyz order
rot_order = track.skeleton[joint]['order']
euler_rots = np.array(R.from_rotvec(expmap).as_euler(rot_order.lower(), degrees=True))
# Create the corresponding columns in the new DataFrame
euler_df['%s_%srotation'%(joint, rot_order[0])] = pd.Series(data=[e[0] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[1])] = pd.Series(data=[e[1] for e in euler_rots], index=euler_df.index)
euler_df['%s_%srotation'%(joint, rot_order[2])] = pd.Series(data=[e[2] for e in euler_rots], index=euler_df.index)
new_track = track.clone()
new_track.values = euler_df
Q.append(new_track)
return Q
class Mirror(BaseEstimator, TransformerMixin):
def __init__(self, axis="X", append=True):
"""
Mirrors the data
"""
self.axis = axis
self.append = append
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("Mirror: " + self.axis)
Q = []
if self.append:
for track in X:
Q.append(track)
for track in X:
channels = []
titles = []
if self.axis == "X":
signs = np.array([1,-1,-1])
if self.axis == "Y":
signs = np.array([-1,1,-1])
if self.axis == "Z":
signs = np.array([-1,-1,1])
euler_df = track.values
# Create a new DataFrame to store the exponential map rep
new_df = pd.DataFrame(index=euler_df.index)
# Copy the root positions into the new DataFrame
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
new_df[rxp] = pd.Series(data=-signs[0]*euler_df[rxp], index=new_df.index)
new_df[ryp] = pd.Series(data=-signs[1]*euler_df[ryp], index=new_df.index)
new_df[rzp] = pd.Series(data=-signs[2]*euler_df[rzp], index=new_df.index)
# List the columns that contain rotation channels
rots = [c for c in euler_df.columns if ('rotation' in c and 'Nub' not in c)]
#lft_rots = [c for c in euler_df.columns if ('Left' in c and 'rotation' in c and 'Nub' not in c)]
#rgt_rots = [c for c in euler_df.columns if ('Right' in c and 'rotation' in c and 'Nub' not in c)]
lft_joints = (joint for joint in track.skeleton if 'Left' in joint and 'Nub' not in joint)
rgt_joints = (joint for joint in track.skeleton if 'Right' in joint and 'Nub' not in joint)
new_track = track.clone()
for lft_joint in lft_joints:
rgt_joint = lft_joint.replace('Left', 'Right')
# Create the corresponding columns in the new DataFrame
new_df['%s_Xrotation'%lft_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%rgt_joint], index=new_df.index)
new_df['%s_Yrotation'%lft_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%rgt_joint], index=new_df.index)
new_df['%s_Zrotation'%lft_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%rgt_joint], index=new_df.index)
new_df['%s_Xrotation'%rgt_joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%lft_joint], index=new_df.index)
new_df['%s_Yrotation'%rgt_joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%lft_joint], index=new_df.index)
new_df['%s_Zrotation'%rgt_joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%lft_joint], index=new_df.index)
# List the joints that are not left or right, i.e. are on the trunk
joints = (joint for joint in track.skeleton if 'Nub' not in joint and 'Left' not in joint and 'Right' not in joint)
for joint in joints:
# Create the corresponding columns in the new DataFrame
new_df['%s_Xrotation'%joint] = pd.Series(data=signs[0]*track.values['%s_Xrotation'%joint], index=new_df.index)
new_df['%s_Yrotation'%joint] = pd.Series(data=signs[1]*track.values['%s_Yrotation'%joint], index=new_df.index)
new_df['%s_Zrotation'%joint] = pd.Series(data=signs[2]*track.values['%s_Zrotation'%joint], index=new_df.index)
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
return X
class JointSelector(BaseEstimator, TransformerMixin):
'''
Allows for filtering the mocap data to include only the selected joints
'''
def __init__(self, joints, include_root=False):
self.joints = joints
self.include_root = include_root
def fit(self, X, y=None):
selected_joints = []
selected_channels = []
if self.include_root:
selected_joints.append(X[0].root_name)
selected_joints.extend(self.joints)
for joint_name in selected_joints:
selected_channels.extend([o for o in X[0].values.columns if (joint_name + "_") in o and 'Nub' not in o])
self.selected_joints = selected_joints
self.selected_channels = selected_channels
self.not_selected = X[0].values.columns.difference(selected_channels)
self.not_selected_values = {c:X[0].values[c].values[0] for c in self.not_selected}
self.orig_skeleton = X[0].skeleton
return self
def transform(self, X, y=None):
print("JointSelector")
Q = []
for track in X:
t2 = track.clone()
for key in track.skeleton.keys():
if key not in self.selected_joints:
t2.skeleton.pop(key)
t2.values = track.values[self.selected_channels]
Q.append(t2)
return Q
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
t2 = track.clone()
t2.skeleton = self.orig_skeleton
for d in self.not_selected:
t2.values[d] = self.not_selected_values[d]
Q.append(t2)
return Q
class Numpyfier(BaseEstimator, TransformerMixin):
'''
Just converts the values in a MocapData object into a numpy array
Useful for the final stage of a pipeline before training
'''
def __init__(self):
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
print("Numpyfier")
Q = []
for track in X:
Q.append(track.values.values)
#print("Numpyfier:" + str(track.values.columns))
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class Slicer(BaseEstimator, TransformerMixin):
'''
Slice the data into intervals of equal size
'''
def __init__(self, window_size, overlap=0.5):
self.window_size = window_size
self.overlap = overlap
pass
def fit(self, X, y=None):
self.org_mocap_ = X[0].clone()
self.org_mocap_.values.drop(self.org_mocap_.values.index, inplace=True)
return self
def transform(self, X, y=None):
print("Slicer")
Q = []
for track in X:
vals = track.values.values
nframes = vals.shape[0]
overlap_frames = (int)(self.overlap*self.window_size)
n_sequences = (nframes-overlap_frames)//(self.window_size-overlap_frames)
if n_sequences>0:
y = np.zeros((n_sequences, self.window_size, vals.shape[1]))
# extract sequences from the input data
for i in range(0,n_sequences):
frameIdx = (self.window_size-overlap_frames) * i
Q.append(vals[frameIdx:frameIdx+self.window_size,:])
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_mocap = self.org_mocap_.clone()
time_index = pd.to_timedelta([f for f in range(track.shape[0])], unit='s')
new_df = pd.DataFrame(data=track, index=time_index, columns=self.org_mocap_.values.columns)
new_mocap.values = new_df
Q.append(new_mocap)
return Q
class RootTransformer(BaseEstimator, TransformerMixin):
def __init__(self, method, position_smoothing=0, rotation_smoothing=0):
"""
Accepted methods:
abdolute_translation_deltas
pos_rot_deltas
"""
self.method = method
self.position_smoothing=position_smoothing
self.rotation_smoothing=rotation_smoothing
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
print("RootTransformer")
Q = []
for track in X:
if self.method == 'abdolute_translation_deltas':
new_df = track.values.copy()
xpcol = '%s_Xposition'%track.root_name
ypcol = '%s_Yposition'%track.root_name
zpcol = '%s_Zposition'%track.root_name
dxpcol = '%s_dXposition'%track.root_name
dzpcol = '%s_dZposition'%track.root_name
x=track.values[xpcol].copy()
z=track.values[zpcol].copy()
if self.position_smoothing>0:
x_sm = filters.gaussian_filter1d(x, self.position_smoothing, axis=0, mode='nearest')
z_sm = filters.gaussian_filter1d(z, self.position_smoothing, axis=0, mode='nearest')
dx = pd.Series(data=x_sm, index=new_df.index).diff()
dz = pd.Series(data=z_sm, index=new_df.index).diff()
new_df[xpcol] = x-x_sm
new_df[zpcol] = z-z_sm
else:
dx = x.diff()
dz = z.diff()
new_df.drop([xpcol, zpcol], axis=1, inplace=True)
dx[0] = dx[1]
dz[0] = dz[1]
new_df[dxpcol] = dx
new_df[dzpcol] = dz
new_track = track.clone()
new_track.values = new_df
# end of abdolute_translation_deltas
elif self.method == 'hip_centric':
new_track = track.clone()
# Absolute columns
xp_col = '%s_Xposition'%track.root_name
yp_col = '%s_Yposition'%track.root_name
zp_col = '%s_Zposition'%track.root_name
xr_col = '%s_Xrotation'%track.root_name
yr_col = '%s_Yrotation'%track.root_name
zr_col = '%s_Zrotation'%track.root_name
new_df = track.values.copy()
all_zeros = np.zeros(track.values[xp_col].values.shape)
new_df[xp_col] = pd.Series(data=all_zeros, index=new_df.index)
new_df[yp_col] = pd.Series(data=all_zeros, index=new_df.index)
new_df[zp_col] = pd.Series(data=all_zeros, index=new_df.index)
new_df[xr_col] = pd.Series(data=all_zeros, index=new_df.index)
new_df[yr_col] = pd.Series(data=all_zeros, index=new_df.index)
new_df[zr_col] = pd.Series(data=all_zeros, index=new_df.index)
new_track.values = new_df
#print(new_track.values.columns)
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None, start_pos=None):
Q = []
#TODO: simplify this implementation
startx = 0
startz = 0
if start_pos is not None:
startx, startz = start_pos
for track in X:
new_track = track.clone()
if self.method == 'abdolute_translation_deltas':
new_df = new_track.values
xpcol = '%s_Xposition'%track.root_name
ypcol = '%s_Yposition'%track.root_name
zpcol = '%s_Zposition'%track.root_name
dxpcol = '%s_dXposition'%track.root_name
dzpcol = '%s_dZposition'%track.root_name
dx = track.values[dxpcol].values
dz = track.values[dzpcol].values
recx = [startx]
recz = [startz]
for i in range(dx.shape[0]-1):
recx.append(recx[i]+dx[i+1])
recz.append(recz[i]+dz[i+1])
# recx = [recx[i]+dx[i+1] for i in range(dx.shape[0]-1)]
# recz = [recz[i]+dz[i+1] for i in range(dz.shape[0]-1)]
# recx = dx[:-1] + dx[1:]
# recz = dz[:-1] + dz[1:]
if self.position_smoothing > 0:
new_df[xpcol] = pd.Series(data=new_df[xpcol]+recx, index=new_df.index)
new_df[zpcol] = pd.Series(data=new_df[zpcol]+recz, index=new_df.index)
else:
new_df[xpcol] = pd.Series(data=recx, index=new_df.index)
new_df[zpcol] = pd.Series(data=recz, index=new_df.index)
new_df.drop([dxpcol, dzpcol], axis=1, inplace=True)
new_track.values = new_df
# end of abdolute_translation_deltas
Q.append(new_track)
return Q
class RootCentricPositionNormalizer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
Q = []
for track in X:
new_track = track.clone()
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
projected_root_pos = track.values[[rxp, ryp, rzp]]
projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref
new_df = pd.DataFrame(index=track.values.index)
all_but_root = [joint for joint in track.skeleton if track.root_name not in joint]
# all_but_root = [joint for joint in track.skeleton]
for joint in all_but_root:
new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]-projected_root_pos[rxp], index=new_df.index)
new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]-projected_root_pos[ryp], index=new_df.index)
new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]-projected_root_pos[rzp], index=new_df.index)
# keep the root as it is now
new_df[rxp] = track.values[rxp]
new_df[ryp] = track.values[ryp]
new_df[rzp] = track.values[rzp]
new_track.values = new_df
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
new_track = track.clone()
rxp = '%s_Xposition'%track.root_name
ryp = '%s_Yposition'%track.root_name
rzp = '%s_Zposition'%track.root_name
projected_root_pos = track.values[[rxp, ryp, rzp]]
projected_root_pos.loc[:,ryp] = 0 # we want the root's projection on the floor plane as the ref
new_df = pd.DataFrame(index=track.values.index)
for joint in track.skeleton:
new_df['%s_Xposition'%joint] = pd.Series(data=track.values['%s_Xposition'%joint]+projected_root_pos[rxp], index=new_df.index)
new_df['%s_Yposition'%joint] = pd.Series(data=track.values['%s_Yposition'%joint]+projected_root_pos[ryp], index=new_df.index)
new_df['%s_Zposition'%joint] = pd.Series(data=track.values['%s_Zposition'%joint]+projected_root_pos[rzp], index=new_df.index)
new_track.values = new_df
Q.append(new_track)
return Q
class Flattener(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return np.concatenate(X, axis=0)
class ConstantsRemover(BaseEstimator, TransformerMixin):
'''
For now it just looks at the first track
'''
def __init__(self, eps = 1e-6):
self.eps = eps
def fit(self, X, y=None):
stds = X[0].values.std()
cols = X[0].values.columns.values
self.const_dims_ = [c for c in cols if (stds[c] < self.eps).any()]
self.const_values_ = {c:X[0].values[c].values[0] for c in cols if (stds[c] < self.eps).any()}
return self
def transform(self, X, y=None):
Q = []
for track in X:
t2 = track.clone()
#for key in t2.skeleton.keys():
# if key in self.ConstDims_:
# t2.skeleton.pop(key)
#print(track.values.columns.difference(self.const_dims_))
t2.values.drop(self.const_dims_, axis=1, inplace=True)
#t2.values = track.values[track.values.columns.difference(self.const_dims_)]
Q.append(t2)
return Q
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
t2 = track.clone()
for d in self.const_dims_:
t2.values[d] = self.const_values_[d]
# t2.values.assign(d=pd.Series(data=self.const_values_[d], index = t2.values.index))
Q.append(t2)
return Q
class ListStandardScaler(BaseEstimator, TransformerMixin):
def __init__(self, is_DataFrame=False):
self.is_DataFrame = is_DataFrame
def fit(self, X, y=None):
if self.is_DataFrame:
X_train_flat = np.concatenate([m.values for m in X], axis=0)
else:
X_train_flat = np.concatenate([m for m in X], axis=0)
self.data_mean_ = np.mean(X_train_flat, axis=0)
self.data_std_ = np.std(X_train_flat, axis=0)
return self
def transform(self, X, y=None):
Q = []
for track in X:
if self.is_DataFrame:
normalized_track = track.copy()
normalized_track.values = (track.values - self.data_mean_) / self.data_std_
else:
normalized_track = (track - self.data_mean_) / self.data_std_
Q.append(normalized_track)
if self.is_DataFrame:
return Q
else:
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
if self.is_DataFrame:
unnormalized_track = track.copy()
unnormalized_track.values = (track.values * self.data_std_) + self.data_mean_
else:
unnormalized_track = (track * self.data_std_) + self.data_mean_
Q.append(unnormalized_track)
if self.is_DataFrame:
return Q
else:
return np.array(Q)
class ListMinMaxScaler(BaseEstimator, TransformerMixin):
def __init__(self, is_DataFrame=False):
self.is_DataFrame = is_DataFrame
def fit(self, X, y=None):
if self.is_DataFrame:
X_train_flat = np.concatenate([m.values for m in X], axis=0)
else:
X_train_flat = np.concatenate([m for m in X], axis=0)
self.data_max_ = np.max(X_train_flat, axis=0)
self.data_min_ = np.min(X_train_flat, axis=0)
return self
def transform(self, X, y=None):
Q = []
for track in X:
if self.is_DataFrame:
normalized_track = track.copy()
normalized_track.values = (track.values - self.data_min_) / (self.data_max_ - self.data_min_)
else:
normalized_track = (track - self.data_min_) / (self.data_max_ - self.data_min_)
Q.append(normalized_track)
if self.is_DataFrame:
return Q
else:
return np.array(Q)
def inverse_transform(self, X, copy=None):
Q = []
for track in X:
if self.is_DataFrame:
unnormalized_track = track.copy()
unnormalized_track.values = (track.values * (self.data_max_ - self.data_min_)) + self.data_min_
else:
unnormalized_track = (track * (self.data_max_ - self.data_min_)) + self.data_min_
Q.append(unnormalized_track)
if self.is_DataFrame:
return Q
else:
return np.array(Q)
class DownSampler(BaseEstimator, TransformerMixin):
def __init__(self, tgt_fps, keep_all=True):
self.tgt_fps = tgt_fps
self.keep_all = keep_all
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
Q = []
for track in X:
orig_fps=round(1.0/track.framerate)
rate = orig_fps//self.tgt_fps
if orig_fps%self.tgt_fps!=0:
print("error orig_fps (" + str(orig_fps) + ") is not dividable with tgt_fps (" + str(self.tgt_fps) + ")")
else:
print("downsampling with rate: " + str(rate))
#print(track.values.size)
for ii in range(0,rate):
new_track = track.clone()
new_track.values = track.values[ii:-1:rate].copy()
#print(new_track.values.size)
#new_track = track[0:-1:self.rate]
new_track.framerate = 1.0/self.tgt_fps
Q.append(new_track)
if not self.keep_all:
break
return Q
def inverse_transform(self, X, copy=None):
return X
class ReverseTime(BaseEstimator, TransformerMixin):
def __init__(self, append=True):
self.append = append
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
Q = []
if self.append:
for track in X:
Q.append(track)
for track in X:
new_track = track.clone()
new_track.values = track.values[-1::-1]
Q.append(new_track)
return Q
def inverse_transform(self, X, copy=None):
return X
#TODO: JointsSelector (x)
#TODO: SegmentMaker
#TODO: DynamicFeaturesAdder
#TODO: ShapeFeaturesAdder
#TODO: DataFrameNumpier (x)
class TemplateTransform(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X
| 36.508696
| 171
| 0.550881
|
3dea639187ef62778602bdd192cf5f2e2f49db82
| 8,421
|
py
|
Python
|
tests/queries/query_test.py
|
Taschenbergerm/ClickHouse
|
d15bac31ff462a7c5fde36d5e58a3683e7b45135
|
[
"Apache-2.0"
] | 3
|
2021-02-16T13:50:34.000Z
|
2021-03-19T12:22:52.000Z
|
tests/queries/query_test.py
|
Taschenbergerm/ClickHouse
|
d15bac31ff462a7c5fde36d5e58a3683e7b45135
|
[
"Apache-2.0"
] | null | null | null |
tests/queries/query_test.py
|
Taschenbergerm/ClickHouse
|
d15bac31ff462a7c5fde36d5e58a3683e7b45135
|
[
"Apache-2.0"
] | 1
|
2021-02-16T21:24:34.000Z
|
2021-02-16T21:24:34.000Z
|
import pytest
import difflib
import os
import random
import string
import subprocess
import sys
SKIP_LIST = [
# these couple of tests hangs everything
"00600_replace_running_query",
"00987_distributed_stack_overflow",
# just fail
"00505_secure",
"00505_shard_secure",
"00646_url_engine",
"00725_memory_tracking", # BROKEN
"00834_cancel_http_readonly_queries_on_client_close",
"00933_test_fix_extra_seek_on_compressed_cache",
"00965_logs_level_bugfix",
"00965_send_logs_level_concurrent_queries",
"00990_hasToken",
"00990_metric_log_table_not_empty",
"01014_lazy_database_concurrent_recreate_reattach_and_show_tables",
"01018_Distributed__shard_num",
"01018_ip_dictionary",
"01050_clickhouse_dict_source_with_subquery",
"01053_ssd_dictionary",
"01054_cache_dictionary_overflow_cell",
"01057_http_compression_prefer_brotli",
"01080_check_for_error_incorrect_size_of_nested_column",
"01083_expressions_in_engine_arguments",
"01086_odbc_roundtrip",
"01088_benchmark_query_id",
"01098_temporary_and_external_tables",
"01099_parallel_distributed_insert_select",
"01103_check_cpu_instructions_at_startup",
"01114_database_atomic",
"01148_zookeeper_path_macros_unfolding",
"01181_db_atomic_drop_on_cluster", # tcp port in reference
"01280_ssd_complex_key_dictionary",
"01293_client_interactive_vertical_multiline", # expect-test
"01293_client_interactive_vertical_singleline", # expect-test
"01293_system_distribution_queue", # FLAKY
"01293_show_clusters",
"01294_lazy_database_concurrent_recreate_reattach_and_show_tables",
"01294_system_distributed_on_cluster",
"01300_client_save_history_when_terminated", # expect-test
"01304_direct_io",
"01306_benchmark_json",
"01035_lc_empty_part_bug", # FLAKY
"01320_create_sync_race_condition_zookeeper",
"01355_CSV_input_format_allow_errors",
"01370_client_autocomplete_word_break_characters", # expect-test
"01376_GROUP_BY_injective_elimination_dictGet",
"01393_benchmark_secure_port",
"01418_custom_settings",
"01451_wrong_error_long_query",
"01455_opentelemetry_distributed",
"01473_event_time_microseconds",
"01474_executable_dictionary",
"01507_clickhouse_server_start_with_embedded_config",
"01514_distributed_cancel_query_on_error",
"01520_client_print_query_id", # expect-test
"01526_client_start_and_exit", # expect-test
"01527_dist_sharding_key_dictGet_reload",
"01545_url_file_format_settings",
"01553_datetime64_comparison",
"01555_system_distribution_queue_mask",
"01558_ttest_scipy",
"01561_mann_whitney_scipy",
"01582_distinct_optimization",
"01599_multiline_input_and_singleline_comments", # expect-test
"01601_custom_tld",
"01610_client_spawn_editor", # expect-test
"01676_clickhouse_client_autocomplete", # expect-test (partially)
"01683_text_log_deadlock", # secure tcp
]
def check_result(result, error, return_code, reference, replace_map):
if replace_map:
for old, new in replace_map.items():
result = result.replace(old.encode('utf-8'), new.encode('utf-8'))
if return_code != 0:
try:
print(error.decode('utf-8'), file=sys.stderr)
except UnicodeDecodeError:
print(error.decode('latin1'), file=sys.stderr) # encoding with 1 symbol per 1 byte, covering all values
pytest.fail('Client died unexpectedly with code {code}'.format(code=return_code), pytrace=False)
elif result != reference:
pytest.fail("Query output doesn't match reference:{eol}{diff}".format(
eol=os.linesep,
diff=os.linesep.join(l.strip() for l in difflib.unified_diff(reference.decode('utf-8').splitlines(),
result.decode('utf-8').splitlines(),
fromfile='expected', tofile='actual'))),
pytrace=False)
def run_client(bin_prefix, port, database, query, reference, replace_map=None):
# We can't use `text=True` since some tests may return binary data
client = subprocess.Popen([bin_prefix + '-client', '--port', str(port), '-d', database, '-m', '-n', '--testmode'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, error = client.communicate(query.encode('utf-8'))
assert client.returncode is not None, "Client should exit after processing all queries"
check_result(result, error, client.returncode, reference, replace_map)
def run_shell(bin_prefix, server, database, path, reference, replace_map=None):
env = {
'CLICKHOUSE_BINARY': bin_prefix,
'CLICKHOUSE_DATABASE': database,
'CLICKHOUSE_PORT_TCP': str(server.tcp_port),
'CLICKHOUSE_PORT_TCP_SECURE': str(server.tcps_port),
'CLICKHOUSE_PORT_TCP_WITH_PROXY': str(server.proxy_port),
'CLICKHOUSE_PORT_HTTP': str(server.http_port),
'CLICKHOUSE_PORT_INTERSERVER': str(server.inter_port),
'CLICKHOUSE_TMP': server.tmp_dir,
'CLICKHOUSE_CONFIG_CLIENT': server.client_config
}
shell = subprocess.Popen([path], env=env, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result, error = shell.communicate()
assert shell.returncode is not None, "Script should exit after executing all commands"
check_result(result, error, shell.returncode, reference, replace_map)
def random_str(length=10):
alphabet = string.ascii_lowercase + string.digits
random.seed(os.urandom(8))
return ''.join(random.choice(alphabet) for _ in range(length))
def test_sql_query(bin_prefix, sql_query, standalone_server):
for test in SKIP_LIST:
if test in sql_query:
pytest.skip("Test matches skip-list: " + test)
return
tcp_port = standalone_server.tcp_port
query_path = sql_query + ".sql"
reference_path = sql_query + ".reference"
if not os.path.exists(reference_path):
pytest.skip('No .reference file found')
with open(query_path, 'r') as file:
query = file.read()
with open(reference_path, 'rb') as file:
reference = file.read()
random_name = 'test_{random}'.format(random=random_str())
run_client(bin_prefix, tcp_port, 'default', 'CREATE DATABASE {random};'.format(random=random_name), b'')
run_client(bin_prefix, tcp_port, random_name, query, reference, {random_name: 'default'})
query = "SELECT 'SHOW ORPHANED TABLES'; SELECT name FROM system.tables WHERE database != 'system' ORDER BY (database, name);"
run_client(bin_prefix, tcp_port, 'default', query, b'SHOW ORPHANED TABLES\n')
query = 'DROP DATABASE {random};'.format(random=random_name)
run_client(bin_prefix, tcp_port, 'default', query, b'')
query = "SELECT 'SHOW ORPHANED DATABASES'; SHOW DATABASES;"
run_client(bin_prefix, tcp_port, 'default', query, b'SHOW ORPHANED DATABASES\ndefault\nsystem\n')
def test_shell_query(bin_prefix, shell_query, standalone_server):
for test in SKIP_LIST:
if test in shell_query:
pytest.skip("Test matches skip-list: " + test)
return
tcp_port = standalone_server.tcp_port
shell_path = shell_query + ".sh"
reference_path = shell_query + ".reference"
if not os.path.exists(reference_path):
pytest.skip('No .reference file found')
with open(reference_path, 'rb') as file:
reference = file.read()
random_name = 'test_{random}'.format(random=random_str())
query = 'CREATE DATABASE {random};'.format(random=random_name)
run_client(bin_prefix, tcp_port, 'default', query, b'')
run_shell(bin_prefix, standalone_server, random_name, shell_path, reference, {random_name: 'default'})
query = "SELECT 'SHOW ORPHANED TABLES'; SELECT name FROM system.tables WHERE database != 'system' ORDER BY (database, name);"
run_client(bin_prefix, tcp_port, 'default', query, b'SHOW ORPHANED TABLES\n')
query = 'DROP DATABASE {random};'.format(random=random_name)
run_client(bin_prefix, tcp_port, 'default', query, b'')
query = "SELECT 'SHOW ORPHANED DATABASES'; SHOW DATABASES;"
run_client(bin_prefix, tcp_port, 'default', query, b'SHOW ORPHANED DATABASES\ndefault\nsystem\n')
| 41.279412
| 129
| 0.710248
|
96ff8aa15c0c3219a7708ff43d19a23314413270
| 3,494
|
py
|
Python
|
hplip-3.20.3/ui4/pqdiagdialog.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
hplip-3.20.3/ui4/pqdiagdialog.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | 1
|
2021-11-20T16:33:39.000Z
|
2021-11-20T16:33:39.000Z
|
hplip-3.20.3/ui4/pqdiagdialog.py
|
Deril-Pana/wikiBlackcoinNL
|
9633307f0b485c27feae5da242944adf450e8963
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# (c) Copyright 2001-2015 HP Development Company, L.P.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Don Welch
#
# StdLib
import operator
# Local
from base.g import *
from base import device, utils, maint
from prnt import cups
from base.codes import *
from .ui_utils import *
# Qt
from PyQt4.QtCore import *
from PyQt4.QtGui import *
# Ui
from .pqdiagdialog_base import Ui_Dialog
class PQDiagDialog(QDialog, Ui_Dialog):
def __init__(self, parent, device_uri):
QDialog.__init__(self, parent)
self.setupUi(self)
self.device_uri = device_uri
self.initUi()
QTimer.singleShot(0, self.updateUi)
def initUi(self):
# connect signals/slots
self.connect(self.CancelButton, SIGNAL("clicked()"), self.CancelButton_clicked)
self.connect(self.RunButton, SIGNAL("clicked()"), self.RunButton_clicked)
self.connect(self.DeviceComboBox, SIGNAL("DeviceUriComboBox_noDevices"), self.DeviceUriComboBox_noDevices)
self.connect(self.DeviceComboBox, SIGNAL("DeviceUriComboBox_currentChanged"), self.DeviceUriComboBox_currentChanged)
self.DeviceComboBox.setFilter({'pq-diag-type': (operator.gt, 0)})
# Application icon
self.setWindowIcon(QIcon(load_pixmap('hp_logo', '128x128')))
if self.device_uri:
self.DeviceComboBox.setInitialDevice(self.device_uri)
def updateUi(self):
self.DeviceComboBox.updateUi()
self.LoadPaper.setButtonName(self.__tr("Run"))
self.LoadPaper.updateUi()
def DeviceUriComboBox_currentChanged(self, device_uri):
self.device_uri = device_uri
def DeviceUriComboBox_noDevices(self):
FailureUI(self, self.__tr("<b>No devices that support print quality diagnostics found.</b><p>Click <i>OK</i> to exit.</p>"))
self.close()
def CancelButton_clicked(self):
self.close()
def RunButton_clicked(self):
d = None
try:
try:
d = device.Device(self.device_uri)
except Error:
CheckDeviceUI(self)
return
pqdiag_type = d.pq_diag_type
try:
d.open()
except Error:
CheckDeviceUI(self)
else:
if d.isIdleAndNoError():
if pqdiag_type == 1:
maint.printQualityDiagType1(d, lambda : True)
elif pqdiag_type == 2:
maint.printQualityDiagType2(d, lambda : True)
else:
CheckDeviceUI(self)
finally:
if d is not None:
d.close()
self.close()
#
# Misc
#
def __tr(self,s,c = None):
return qApp.translate("PQDiagDialog",s,c)
| 27.952
| 132
| 0.638523
|
a3833ff5244ac9c3a4d108da7319e6ecac8e7707
| 8,009
|
gyp
|
Python
|
build/apk_tests.gyp
|
bofeng-song/webrtc
|
cd06ce5490635ef5ef953b17eabb6e833eed5b76
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
build/apk_tests.gyp
|
bofeng-song/webrtc
|
cd06ce5490635ef5ef953b17eabb6e833eed5b76
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
build/apk_tests.gyp
|
bofeng-song/webrtc
|
cd06ce5490635ef5ef953b17eabb6e833eed5b76
|
[
"DOC",
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This file exists in two versions. A no-op version under
# webrtc/build/apk_tests_noop.gyp and this one. This gyp file builds the apk
# unit tests (for Android) assuming that WebRTC is built inside a Chromium
# workspace. The no-op version is included when building WebRTC without
# Chromium. This is a workaround for the fact that 'includes' don't expand
# variables and that the relative location of apk_test.gypi is different for
# WebRTC when built as part of Chromium and when it is built without Chromium.
{
'includes': [
'common.gypi',
],
'targets': [
{
'target_name': 'audio_decoder_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'audio_decoder_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)audio_decoder_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/modules/modules.gyp:audio_decoder_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'common_audio_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'common_audio_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)common_audio_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/common_audio/common_audio.gyp:common_audio_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'common_video_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'common_video_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)common_video_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/common_video/common_video_unittests.gyp:common_video_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'modules_tests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'modules_tests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)modules_tests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/modules/modules.gyp:modules_tests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'modules_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'modules_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)modules_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/modules/modules.gyp:modules_unittests',
'audio_device_java',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'rtc_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'rtc_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)rtc_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/webrtc.gyp:rtc_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'system_wrappers_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'system_wrappers_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)system_wrappers_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/system_wrappers/system_wrappers_tests.gyp:system_wrappers_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'test_support_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'test_support_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)test_support_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/test/test.gyp:test_support_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'tools_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'tools_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)tools_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/tools/tools.gyp:tools_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'video_engine_core_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'video_engine_core_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)video_engine_core_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/video_engine/video_engine_core_unittests.gyp:video_engine_core_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'video_engine_tests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'video_engine_tests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)video_engine_tests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/webrtc.gyp:video_engine_tests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'voice_engine_unittests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'voice_engine_unittests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)voice_engine_unittests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/voice_engine/voice_engine.gyp:voice_engine_unittests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'webrtc_perf_tests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'webrtc_perf_tests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)webrtc_perf_tests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/webrtc.gyp:webrtc_perf_tests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'webrtc_nonparallel_tests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'webrtc_nonparallel_tests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)webrtc_nonparallel_tests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/webrtc.gyp:webrtc_nonparallel_tests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'audio_codec_speed_tests_apk',
'type': 'none',
'variables': {
'test_suite_name': 'audio_codec_speed_tests',
'input_shlib_path': '<(SHARED_LIB_DIR)/<(SHARED_LIB_PREFIX)audio_codec_speed_tests<(SHARED_LIB_SUFFIX)',
},
'dependencies': [
'<(webrtc_root)/modules/modules.gyp:audio_codec_speed_tests',
],
'includes': [
'../../build/apk_test.gypi',
],
},
{
'target_name': 'audio_device_java',
'type': 'none',
'variables': {
'java_in_dir': '<(webrtc_root)/modules/audio_device/android/java',
'additional_src_dirs': [ '<(webrtc_root)/base/java/src', ],
'never_lint': 1,
},
'includes': [
'../../build/java.gypi',
],
},
],
}
| 32.294355
| 116
| 0.601324
|
9a9cf757420aecdf825267cef948df7a3819ecd4
| 5,069
|
py
|
Python
|
torch_glow/tests/nodes/quantized_batchnorm2d_test.py
|
YaronBenAtar/glow
|
a13706a4239fa7eaf059c670dc573e3eb0768f86
|
[
"Apache-2.0"
] | 2,838
|
2018-05-02T16:57:22.000Z
|
2022-03-31T14:35:26.000Z
|
torch_glow/tests/nodes/quantized_batchnorm2d_test.py
|
YaronBenAtar/glow
|
a13706a4239fa7eaf059c670dc573e3eb0768f86
|
[
"Apache-2.0"
] | 4,149
|
2018-05-02T17:50:14.000Z
|
2022-03-31T23:56:43.000Z
|
torch_glow/tests/nodes/quantized_batchnorm2d_test.py
|
LaudateCorpus1/glow-1
|
cda5383b1609ebad1a3631ca77b41b8a863443d4
|
[
"Apache-2.0"
] | 685
|
2018-05-02T16:54:09.000Z
|
2022-03-24T01:12:24.000Z
|
# Copyright (c) Glow Contributors. See CONTRIBUTORS file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
from torch.ao.quantization import QConfig, observer
my_qconfig = QConfig(
activation=observer.default_observer,
weight=observer.HistogramObserver.with_args(dtype=torch.qint8, reduce_range=False),
)
class TestQuantizedBatchNorm2D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleQuantizedBatchNorm(nn.Module):
def __init__(
self,
C,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
):
super(SimpleQuantizedBatchNorm, self).__init__()
self.qconfig = my_qconfig
self.batchnorm = nn.BatchNorm2d(C)
self.batchnorm.scale = out_scale
self.batchnorm.zero_point = out_zero_point
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
self.relu = torch.nn.ReLU()
self.q = torch.ao.quantization.QuantStub()
self.q.scale = in_scale
self.q.zero_point = in_zero_point
self.dq = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.batchnorm(qx)
qy = self.relu(qy)
y = self.dq(qy)
return y
C = 7
in_scale = 0.102
out_scale = 0.003
in_zero_point = -37
out_zero_point = 3
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((6, C, 43, 52), requires_grad=False)
model = SimpleQuantizedBatchNorm(
C,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
)
model.eval()
utils.compare_tracing_methods(
model,
inputs,
skip_to_glow=True,
)
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 2D batchnorm Node with weights and biases on Glow.
"""
class SimpleQuantizedBatchNorm(nn.Module):
def __init__(
self,
C,
weight,
bias,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
):
super(SimpleQuantizedBatchNorm, self).__init__()
self.qconfig = my_qconfig
self.batchnorm = nn.BatchNorm2d(C)
self.batchnorm.scale = out_scale
self.batchnorm.zero_point = out_zero_point
self.batchnorm.weight = nn.Parameter(weight)
self.batchnorm.bias = nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
self.relu = nn.ReLU()
self.q = torch.ao.quantization.QuantStub()
self.q.scale = in_scale
self.q.zero_point = in_zero_point
self.dq = torch.ao.quantization.DeQuantStub()
def forward(self, x):
qx = self.q(x)
qy = self.batchnorm(qx)
y = self.dq(qy)
return y
C = 11
in_scale = 0.234
out_scale = 0.003
in_zero_point = -10
out_zero_point = -5
weight = torch.ones(C) + torch.rand(C) * 0.001
bias = torch.rand(C) * 0.0001
running_mean = torch.zeros(C)
running_var = torch.ones(C)
inputs = torch.randn((6, C, 33, 42), requires_grad=False)
model = SimpleQuantizedBatchNorm(
C,
weight,
bias,
running_mean,
running_var,
in_scale,
in_zero_point,
out_scale,
out_zero_point,
)
model.eval()
utils.compare_tracing_methods(
model,
inputs,
skip_to_glow=True,
)
| 31.68125
| 87
| 0.548234
|
7302bc79ef228952ee8276138a669408f569f2c8
| 2,321
|
py
|
Python
|
scraper.py
|
di37/selenium-youtube-scraper
|
0a79af0ed89c857475d99e261dc2277f3373a95d
|
[
"MIT"
] | null | null | null |
scraper.py
|
di37/selenium-youtube-scraper
|
0a79af0ed89c857475d99e261dc2277f3373a95d
|
[
"MIT"
] | null | null | null |
scraper.py
|
di37/selenium-youtube-scraper
|
0a79af0ed89c857475d99e261dc2277f3373a95d
|
[
"MIT"
] | null | null | null |
import smtplib
import json
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
YOUTUBE_TRENDING_URL = 'https://www.youtube.com/feed/trending'
def get_driver():
chrome_options = Options()
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-dev-shm-usage')
driver = webdriver.Chrome(options=chrome_options)
return driver
def get_videos(driver):
VIDEO_DIV_TAG = 'ytd-video-renderer'
driver.get(YOUTUBE_TRENDING_URL)
videos = driver.find_elements(By.TAG_NAME, VIDEO_DIV_TAG)
return videos
def parse_video(video):
# title, url, thumbnail_url, channel, views, uploaded,
# description
title_tag = video.find_element(By.ID, 'video-title')
title = title_tag.text
URL = title_tag.get_attribute('href')
thumbnail_url_tag = video.find_element(By.TAG_NAME, 'img')
thumbnail_url = thumbnail_url_tag.get_attribute('src')
metablock = video.find_element(By.CLASS_NAME, 'ytd-video-meta-block').text.split('\n')
channel_name = metablock[0]
views = metablock[1]
uploaded = metablock[2]
description = video.find_element(By.ID, 'description-text').text
return {
'Title': title,
'Url': URL,
'Thumbnail_url': thumbnail_url,
'Channel_name': channel_name,
'Views': views,
'Uploaded': uploaded,
'Description': description
}
def send_email(body):
EMAIL_ADDRESS = 'd.isham.993@gmail.com'
EMAIL_PASSWORD = os.environ['GMAIL_PASS']
try:
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.ehlo()
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
subject = 'YouTube top 10 trending videos!'
msg = f'''Subject: {subject}\n\n{body}'''
smtp.sendmail(EMAIL_ADDRESS, ['d.isham.ai93@gmail.com'], msg)
smtp.close()
except:
print('Something went wrong.')
if __name__ == '__main__':
print('Creating driver')
driver = get_driver()
print('Fetching trending videos')
videos = get_videos(driver)
print(f'Found {len(videos)} videos.')
print('Parsing top 10 videos')
videos_data = [parse_video(video) for video in videos[:10]]
print('Send the results over email.')
body = json.dumps(videos_data, indent=2)
send_email(body)
| 25.788889
| 88
| 0.71047
|
fd95695000963e8dd1a726b80e15ca45c9a896b5
| 488
|
py
|
Python
|
test_projects/test_module_a/test_feat_1.py
|
martinsmid/pytest-ui
|
15fcbe04a6467cc6f7a373ef6156acc44f0ba5ec
|
[
"MIT"
] | 27
|
2017-03-10T08:54:06.000Z
|
2021-09-28T13:51:10.000Z
|
test_projects/test_module_a/test_feat_1.py
|
martinsmid/pytest-ui
|
15fcbe04a6467cc6f7a373ef6156acc44f0ba5ec
|
[
"MIT"
] | 1
|
2021-06-22T19:29:31.000Z
|
2021-06-22T19:29:31.000Z
|
test_projects/test_module_a/test_feat_1.py
|
martinsmid/pytest-ui
|
15fcbe04a6467cc6f7a373ef6156acc44f0ba5ec
|
[
"MIT"
] | 1
|
2020-04-19T07:25:27.000Z
|
2020-04-19T07:25:27.000Z
|
from __future__ import print_function
import unittest
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
class TestOutputCapturing(unittest.TestCase):
def test_feat_1_case_1(self):
print('hello')
logger.debug('hello at the debug level')
def test_feat_1_case_2(self):
self.assertEqual(True, False)
def test_feat_1_case_3(self):
logger.error('hello at the error level')
def test_feat_1_case_4(self):
pass
| 22.181818
| 48
| 0.715164
|
a0fd3a71ef5e3e45dcbc389e74c3f103b099c11b
| 1,678
|
py
|
Python
|
helpers.py
|
gabrielaraujof/Babelegram
|
8ea28b68d24ef66a6b95e5b71e648b2b04a3bad0
|
[
"MIT"
] | 1
|
2016-03-29T16:54:49.000Z
|
2016-03-29T16:54:49.000Z
|
helpers.py
|
gabrielaraujof/Babelegram
|
8ea28b68d24ef66a6b95e5b71e648b2b04a3bad0
|
[
"MIT"
] | 2
|
2016-03-25T14:47:13.000Z
|
2016-03-30T04:48:26.000Z
|
helpers.py
|
gabrielaraujof/Babelegram
|
8ea28b68d24ef66a6b95e5b71e648b2b04a3bad0
|
[
"MIT"
] | 1
|
2016-03-29T16:54:50.000Z
|
2016-03-29T16:54:50.000Z
|
"""
A set of helpers functions and constants for supporting the bot engine.
"""
_LANG_SET = {
'ar': ' Arabic',
'bg': 'Bulgarian',
'ca': 'Catalan',
'zh-CHS': 'Chinese (Simplified)',
'zh-CHT': 'Chinese (Traditional)',
'cs': 'Czech',
'da': 'Danish',
'nl': 'Dutch',
'en': 'Inglês',
'et': 'Estonian',
'fi': 'Finnish',
'fr': 'Francês',
'de': 'Alemão',
'el': 'Greek',
'ht': 'Haitian Creole',
'he': 'Hebrew',
'hi': 'Hindi',
'hu': 'Hungarian',
'id': 'Indonesian',
'it': 'Italian',
'ja': 'Japanese',
'ko': 'Korean',
'lv': 'Latvian',
'lt': 'Lithuanian',
'mww': 'Hmong Daw',
'no': 'Norwegian',
'pl': 'Polish',
'pt': 'Português',
'ro': 'Romanian',
'ru': 'Russian',
'sk': 'Slovak',
'sl': 'Slovenian',
'es': 'Spanish',
'sv': 'Swedish',
'th': 'Thai',
'tr': 'Turkish',
'uk': 'Ukrainian',
'vi': 'Vietnamese',
}
def get_lang_name(lang_id):
""" Returns the name of a language by its id.
"""
return _LANG_SET[lang_id]
def start_rank():
""" Create the structure for ranking with the languages
available for translating."""
ocurrence_dict = {lang_id: 1 for lang_id, name in _LANG_SET.items()}
ranking = list([lang_id for lang_id, name in _LANG_SET.items()])
return ranking, ocurrence_dict
def rating_calc(item, ocurrences, last_ocurrences, total_ocurrences):
""" Calculates the rating of the target language.
"""
rating = ocurrences / total_ocurrences
if item in last_ocurrences:
rating *= 2
if last_ocurrences and item == last_ocurrences[-1]:
rating *= 4
return rating
| 23.971429
| 72
| 0.567342
|
4ae700203d43181c7662b8a568b439e4f77b8b04
| 1,170
|
py
|
Python
|
chapter2/data-preprocessing.py
|
kevingo/ml-az
|
02f998d8c7f5476461b5cf59267c9532308e4ea4
|
[
"MIT"
] | null | null | null |
chapter2/data-preprocessing.py
|
kevingo/ml-az
|
02f998d8c7f5476461b5cf59267c9532308e4ea4
|
[
"MIT"
] | null | null | null |
chapter2/data-preprocessing.py
|
kevingo/ml-az
|
02f998d8c7f5476461b5cf59267c9532308e4ea4
|
[
"MIT"
] | null | null | null |
# Import Library
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
# import data
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 3].values
# deal with missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
print X
# encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder_X = LabelEncoder()
X[:, 0] = labelEncoder_X.fit_transform(X[:, 0])
print X
oneHotEncoder = OneHotEncoder(categorical_features = [0])
X = oneHotEncoder.fit_transform(X).toarray()
print X
labelEncoder_Y = LabelEncoder()
Y = labelEncoder_Y.fit_transform(Y)
print Y
# Split data into training / testing dataset
from sklearn.cross_validation import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
print X_train
| 25.434783
| 92
| 0.759829
|
431623c0f1d70d0ccd2c36fa82c47496a7633c47
| 22,418
|
py
|
Python
|
tests/conftest.py
|
ArunkumarRamanan/computervision-recipes
|
861ebf32484f9a58ff3570cf73e7b86a254d33e1
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
ArunkumarRamanan/computervision-recipes
|
861ebf32484f9a58ff3570cf73e7b86a254d33e1
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
ArunkumarRamanan/computervision-recipes
|
861ebf32484f9a58ff3570cf73e7b86a254d33e1
|
[
"MIT"
] | 1
|
2020-09-07T03:19:02.000Z
|
2020-09-07T03:19:02.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# NOTE: This file is used by pytest to inject fixtures automatically. As it is explained in the documentation
# https://docs.pytest.org/en/latest/fixture.html:
# "If during implementing your tests you realize that you want to use a fixture function from multiple test files
# you can move it to a conftest.py file. You don't need to import the fixture you want to use in a test, it
# automatically gets discovered by pytest."
import numpy as np
import os
import pytest
import torch
import urllib.request
import random
from PIL import Image
from torch import tensor
from pathlib import Path
from fastai.vision import cnn_learner, models
from fastai.vision.data import ImageList, imagenet_stats
from typing import List, Tuple
from tempfile import TemporaryDirectory
from utils_cv.common.data import unzip_url
from utils_cv.classification.data import Urls as ic_urls
from utils_cv.detection.data import Urls as od_urls
from utils_cv.detection.bbox import DetectionBbox, AnnotationBbox
from utils_cv.detection.dataset import DetectionDataset
from utils_cv.detection.model import (
get_pretrained_fasterrcnn,
get_pretrained_maskrcnn,
get_pretrained_keypointrcnn,
DetectionLearner,
_extract_od_results,
_apply_threshold,
)
def path_classification_notebooks():
""" Returns the path of the classification notebooks folder. """
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"scenarios",
"classification",
)
)
def path_similarity_notebooks():
""" Returns the path of the similarity notebooks folder. """
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"scenarios",
"similarity",
)
)
def path_detection_notebooks():
""" Returns the path of the detection notebooks folder. """
return os.path.abspath(
os.path.join(
os.path.dirname(__file__), os.path.pardir, "scenarios", "detection"
)
)
# ----- Module fixtures ----------------------------------------------------------
@pytest.fixture(scope="module")
def classification_notebooks():
folder_notebooks = path_classification_notebooks()
# Path for the notebooks
paths = {
"00_webcam": os.path.join(folder_notebooks, "00_webcam.ipynb"),
"01_training_introduction": os.path.join(
folder_notebooks, "01_training_introduction.ipynb"
),
"02_multilabel_classification": os.path.join(
folder_notebooks, "02_multilabel_classification.ipynb"
),
"03_training_accuracy_vs_speed": os.path.join(
folder_notebooks, "03_training_accuracy_vs_speed.ipynb"
),
"10_image_annotation": os.path.join(
folder_notebooks, "10_image_annotation.ipynb"
),
"11_exploring_hyperparameters": os.path.join(
folder_notebooks, "11_exploring_hyperparameters.ipynb"
),
"12_hard_negative_sampling": os.path.join(
folder_notebooks, "12_hard_negative_sampling.ipynb"
),
"20_azure_workspace_setup": os.path.join(
folder_notebooks, "20_azure_workspace_setup.ipynb"
),
"21_deployment_on_azure_container_instances": os.path.join(
folder_notebooks,
"21_deployment_on_azure_container_instances.ipynb",
),
"22_deployment_on_azure_kubernetes_service": os.path.join(
folder_notebooks, "22_deployment_on_azure_kubernetes_service.ipynb"
),
"23_aci_aks_web_service_testing": os.path.join(
folder_notebooks, "23_aci_aks_web_service_testing.ipynb"
),
"24_exploring_hyperparameters_on_azureml": os.path.join(
folder_notebooks, "24_exploring_hyperparameters_on_azureml.ipynb"
),
}
return paths
@pytest.fixture(scope="module")
def similarity_notebooks():
folder_notebooks = path_similarity_notebooks()
# Path for the notebooks
paths = {
"00": os.path.join(folder_notebooks, "00_webcam.ipynb"),
"01": os.path.join(
folder_notebooks, "01_training_and_evaluation_introduction.ipynb"
),
"11": os.path.join(
folder_notebooks, "11_exploring_hyperparameters.ipynb"
),
"12": os.path.join(folder_notebooks, "12_fast_retrieval.ipynb"),
}
return paths
@pytest.fixture(scope="module")
def detection_notebooks():
folder_notebooks = path_detection_notebooks()
# Path for the notebooks
paths = {
"00": os.path.join(folder_notebooks, "00_webcam.ipynb"),
"01": os.path.join(folder_notebooks, "01_training_introduction.ipynb"),
"02": os.path.join(folder_notebooks, "02_mask_rcnn.ipynb"),
"03": os.path.join(folder_notebooks, "03_keypoint_rcnn.ipynb"),
"11": os.path.join(
folder_notebooks, "11_exploring_hyperparameters_on_azureml.ipynb"
),
"12": os.path.join(
folder_notebooks, "12_hard_negative_sampling.ipynb"
),
"20": os.path.join(
folder_notebooks, "20_deployment_on_kubernetes.ipynb"
),
}
return paths
# ----- Function fixtures ----------------------------------------------------------
@pytest.fixture(scope="function")
def tmp(tmp_path_factory):
"""Create a function-scoped temp directory.
Will be cleaned up after each test function.
Args:
tmp_path_factory (pytest.TempPathFactory): Pytest default fixture
Returns:
str: Temporary directory path
"""
with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:
yield td
@pytest.fixture(scope="function")
def func_tiny_od_data_path(tmp_session) -> str:
""" Returns the path to the fridge object detection dataset. """
return unzip_url(
od_urls.fridge_objects_tiny_path,
fpath=f"{tmp_session}/tmp",
dest=f"{tmp_session}/tmp",
exist_ok=True,
)
# ----- Session fixtures ----------------------------------------------------------
@pytest.fixture(scope="session")
def tmp_session(tmp_path_factory):
""" Same as 'tmp' fixture but with session level scope. """
with TemporaryDirectory(dir=tmp_path_factory.getbasetemp()) as td:
yield td
# ------|-- Classification/Similarity ---------------------------------------------
@pytest.fixture(scope="session")
def tiny_ic_multidata_path(tmp_session) -> List[str]:
""" Returns the path to multiple dataset. """
return [
unzip_url(
ic_urls.fridge_objects_watermark_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
),
unzip_url(
ic_urls.fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
),
]
@pytest.fixture(scope="session")
def tiny_ic_data_path(tmp_session) -> str:
""" Returns the path to the tiny fridge objects dataset. """
return unzip_url(
ic_urls.fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def tiny_multilabel_ic_data_path(tmp_session) -> str:
""" Returns the path to the tiny fridge objects dataset. """
return unzip_url(
ic_urls.multilabel_fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def multilabel_ic_data_path(tmp_session) -> str:
""" Returns the path to the tiny fridge objects dataset. """
return unzip_url(
ic_urls.multilabel_fridge_objects_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def tiny_ic_negatives_path(tmp_session) -> str:
""" Returns the path to the tiny negatives dataset. """
return unzip_url(
ic_urls.fridge_objects_negatives_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def tiny_ic_databunch(tmp_session):
""" Returns a databunch object for the tiny fridge objects dataset. """
im_paths = unzip_url(
ic_urls.fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
return (
ImageList.from_folder(im_paths)
.split_by_rand_pct(valid_pct=0.1, seed=20)
.label_from_folder()
.transform(size=50)
.databunch(bs=16)
.normalize(imagenet_stats)
)
@pytest.fixture(scope="session")
def multilabel_result():
""" Fake results to test evaluation metrics for multilabel classification. """
y_pred = torch.tensor(
[
[0.9, 0.0, 0.0, 0.0],
[0.9, 0.0, 0.9, 0.9],
[0.0, 0.9, 0.0, 0.0],
[0.9, 0.9, 0.0, 0.0],
]
).float()
y_true = torch.tensor(
[[1, 0, 0, 1], [1, 1, 1, 1], [0, 1, 0, 0], [1, 1, 1, 0]]
).float()
return y_pred, y_true
@pytest.fixture(scope="session")
def model_pred_scores(tiny_ic_databunch):
"""Return a simple learner and prediction scores on tiny ic data"""
model = models.resnet18
lr = 1e-4
epochs = 1
learn = cnn_learner(tiny_ic_databunch, model)
learn.fit(epochs, lr)
return learn, learn.get_preds()[0].tolist()
@pytest.fixture(scope="session")
def testing_im_list(tmp_session):
""" Set of 5 images from the can/ folder of the Fridge Objects dataset
used to test positive example rank calculations"""
im_paths = unzip_url(
ic_urls.fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
can_im_paths = os.listdir(os.path.join(im_paths, "can"))
can_im_paths = [
os.path.join(im_paths, "can", im_name) for im_name in can_im_paths
][0:5]
return can_im_paths
@pytest.fixture(scope="session")
def testing_databunch(tmp_session):
""" Builds a databunch from the Fridge Objects
and returns its validation component that is used
to test comparative_set_builder"""
im_paths = unzip_url(
ic_urls.fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
can_im_paths = os.listdir(os.path.join(im_paths, "can"))
can_im_paths = [
os.path.join(im_paths, "can", im_name) for im_name in can_im_paths
][0:5]
random.seed(642)
data = (
ImageList.from_folder(im_paths)
.split_by_rand_pct(valid_pct=0.2, seed=20)
.label_from_folder()
.transform(size=300)
.databunch(bs=16)
.normalize(imagenet_stats)
)
validation_bunch = data.valid_ds
return validation_bunch
# ------|-- Detection -------------------------------------------------------------
@pytest.fixture(scope="session")
def od_cup_path(tmp_session) -> str:
""" Returns the path to the downloaded cup image. """
IM_URL = "https://cvbp.blob.core.windows.net/public/images/cvbp_cup.jpg"
im_path = os.path.join(tmp_session, "example.jpg")
urllib.request.urlretrieve(IM_URL, im_path)
return im_path
@pytest.fixture(scope="session")
def od_cup_mask_path(tmp_session) -> str:
""" Returns the path to the downloaded cup image. """
im_url = (
"https://cvbp.blob.core.windows.net/public/images/cvbp_cup_mask.png"
)
im_path = os.path.join(tmp_session, "example_mask.png")
urllib.request.urlretrieve(im_url, im_path)
return im_path
@pytest.fixture(scope="session")
def od_cup_anno_bboxes(tmp_session, od_cup_path) -> List[AnnotationBbox]:
return [
AnnotationBbox(
left=61,
top=59,
right=273,
bottom=244,
label_name="cup",
label_idx=0,
im_path=od_cup_path,
)
]
@pytest.fixture(scope="session")
def od_cup_det_bboxes(tmp_session, od_cup_path) -> List[DetectionBbox]:
return [
DetectionBbox(
left=61,
top=59,
right=273,
bottom=244,
label_name="cup",
label_idx=0,
im_path=od_cup_path,
score=0.99,
)
]
@pytest.fixture(scope="session")
def od_mask_rects() -> Tuple:
""" Returns synthetic mask and rectangles ([left, top, right, bottom]) for
object detection.
"""
height = width = 100
mask = np.zeros((height, width), dtype=np.uint8)
mask[:10, :20] = 1
mask[20:40, 30:60] = 2
# corresponding binary masks of the mask above
binary_masks = np.zeros((2, height, width), dtype=np.bool)
binary_masks[0, :10, :20] = True
binary_masks[1, 20:40, 30:60] = True
# corresponding rectangles of the mask above
rects = [[0, 0, 19, 9], [30, 20, 59, 39]]
# a completely black image
im = Image.fromarray(np.zeros((height, width, 3), dtype=np.uint8))
return binary_masks, mask, rects, im
@pytest.fixture(scope="session")
def tiny_od_data_path(tmp_session) -> str:
""" Returns the path to the fridge object detection dataset. """
return unzip_url(
od_urls.fridge_objects_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def tiny_od_mask_data_path(tmp_session) -> str:
""" Returns the path to the fridge object detection mask dataset. """
return unzip_url(
od_urls.fridge_objects_mask_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def tiny_od_keypoint_data_path(tmp_session) -> str:
""" Returns the path to the fridge object detection keypoint dataset. """
return unzip_url(
od_urls.fridge_objects_keypoint_milk_bottle_tiny_path,
fpath=tmp_session,
dest=tmp_session,
exist_ok=True,
)
@pytest.fixture(scope="session")
def od_sample_im_anno(tiny_od_data_path) -> Tuple[Path, ...]:
""" Returns an annotation and image path from the tiny_od_data_path fixture.
Specifically, using the paths for 1.xml and 1.jpg
"""
anno_path = Path(tiny_od_data_path) / "annotations" / "1.xml"
im_path = Path(tiny_od_data_path) / "images" / "1.jpg"
return anno_path, im_path
@pytest.fixture(scope="session")
def od_data_path_labels() -> List[str]:
return ["water_bottle", "can", "milk_bottle", "carton"]
@pytest.fixture(scope="session")
def od_sample_raw_preds():
device = (
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
)
boxes = [
[109.0, 190.0, 205.0, 408.0],
[340.0, 326.0, 465.0, 549.0],
[214.0, 181.0, 315.0, 460.0],
[215.0, 193.0, 316.0, 471.0],
[109.0, 209.0, 209.0, 420.0],
]
# construct masks
masks = np.zeros((len(boxes), 1, 666, 499), dtype=np.float)
for rect, mask in zip(boxes, masks):
left, top, right, bottom = [int(x) for x in rect]
# first line of the bounding box
mask[:, top, left : (right + 1)] = 0.05
# other lines of the bounding box
mask[:, (top + 1) : (bottom + 1), left : (right + 1)] = 0.7
# construct keypoints
start_points = [[120, 200], [350, 350], [220, 300], [250, 400], [100, 350]]
keypoints = []
for x, y in start_points:
points = []
for i in range(13):
points.append([x + i, y + i, 2])
keypoints.append(points)
return [
{
"boxes": tensor(boxes, device=device, dtype=torch.float),
"labels": tensor(
[3, 3, 3, 2, 1], device=device, dtype=torch.int64
),
"scores": tensor(
[0.9985, 0.9979, 0.9945, 0.1470, 0.0903],
device=device,
dtype=torch.float,
),
"masks": tensor(masks, device=device, dtype=torch.float),
"keypoints": tensor(keypoints, device=device, dtype=torch.float32),
}
]
@pytest.fixture(scope="session")
def od_sample_detection(od_sample_raw_preds, od_detection_mask_dataset):
labels = ["one", "two", "three", "four"]
detections = _extract_od_results(
_apply_threshold(od_sample_raw_preds[0], threshold=0.001),
labels,
od_detection_mask_dataset.im_paths[0],
)
detections["idx"] = 0
del detections["keypoints"]
return detections
@pytest.fixture(scope="session")
def od_sample_keypoint_detection(
od_sample_raw_preds, tiny_od_detection_keypoint_dataset
):
labels = ["one", "two", "three", "four"]
detections = _extract_od_results(
_apply_threshold(od_sample_raw_preds[0], threshold=0.9),
labels,
tiny_od_detection_keypoint_dataset.im_paths[0],
)
detections["idx"] = 0
del detections["masks"]
return detections
@pytest.fixture(scope="session")
def od_detection_dataset(tiny_od_data_path):
""" returns a basic detection dataset. """
return DetectionDataset(tiny_od_data_path)
@pytest.fixture(scope="session")
def od_detection_mask_dataset(tiny_od_mask_data_path):
""" returns a basic detection mask dataset. """
return DetectionDataset(
tiny_od_mask_data_path, mask_dir="segmentation-masks"
)
@pytest.fixture(scope="session")
def tiny_od_detection_keypoint_dataset(tiny_od_keypoint_data_path):
""" returns a basic detection keypoint dataset. """
return DetectionDataset(
tiny_od_keypoint_data_path,
keypoint_meta={
"labels": [
"lid_left_top",
"lid_right_top",
"lid_left_bottom",
"lid_right_bottom",
"left_bottom",
"right_bottom",
],
"skeleton": [
[0, 1],
[0, 2],
[1, 3],
[2, 3],
[2, 4],
[3, 5],
[4, 5],
],
"hflip_inds": [1, 0, 3, 2, 5, 4],
},
)
@pytest.mark.gpu
@pytest.fixture(scope="session")
def od_detection_learner(od_detection_dataset):
""" returns a basic detection learner that has been trained for one epoch. """
model = get_pretrained_fasterrcnn(
num_classes=len(od_detection_dataset.labels) + 1,
min_size=100,
max_size=200,
rpn_pre_nms_top_n_train=500,
rpn_pre_nms_top_n_test=250,
rpn_post_nms_top_n_train=500,
rpn_post_nms_top_n_test=250,
)
learner = DetectionLearner(od_detection_dataset, model=model)
learner.fit(1)
return learner
@pytest.mark.gpu
@pytest.fixture(scope="session")
def od_detection_mask_learner(od_detection_mask_dataset):
""" returns a mask detection learner that has been trained for one epoch. """
model = get_pretrained_maskrcnn(
num_classes=len(od_detection_mask_dataset.labels) + 1,
min_size=100,
max_size=200,
rpn_pre_nms_top_n_train=500,
rpn_pre_nms_top_n_test=250,
rpn_post_nms_top_n_train=500,
rpn_post_nms_top_n_test=250,
)
learner = DetectionLearner(od_detection_mask_dataset, model=model)
learner.fit(1)
return learner
@pytest.mark.gpu
@pytest.fixture(scope="session")
def od_detection_keypoint_learner(tiny_od_detection_keypoint_dataset):
""" returns a keypoint detection learner that has been trained for one epoch. """
model = get_pretrained_keypointrcnn(
num_classes=len(tiny_od_detection_keypoint_dataset.labels) + 1,
num_keypoints=len(
tiny_od_detection_keypoint_dataset.keypoint_meta["labels"]
),
min_size=100,
max_size=200,
rpn_pre_nms_top_n_train=500,
rpn_pre_nms_top_n_test=250,
rpn_post_nms_top_n_train=500,
rpn_post_nms_top_n_test=250,
)
learner = DetectionLearner(tiny_od_detection_keypoint_dataset, model=model)
learner.fit(1, skip_evaluation=True)
return learner
@pytest.mark.gpu
@pytest.fixture(scope="session")
def od_detection_eval(od_detection_learner):
""" returns the eval results of a detection learner after one epoch of training. """
return od_detection_learner.evaluate()
@pytest.mark.gpu
@pytest.fixture(scope="session")
def od_detection_mask_eval(od_detection_mask_learner):
""" returns the eval results of a detection learner after one epoch of training. """
return od_detection_mask_learner.evaluate()
@pytest.mark.gpu
@pytest.fixture(scope="session")
def od_detections(od_detection_dataset):
""" returns output of the object detector for a given test set. """
learner = DetectionLearner(od_detection_dataset)
return learner.predict_dl(od_detection_dataset.test_dl, threshold=0)
# ----- AML Settings ----------------------------------------------------------
# TODO i can't find where this function is being used
def pytest_addoption(parser):
parser.addoption(
"--subscription_id",
help="Azure Subscription Id to create resources in",
)
parser.addoption("--resource_group", help="Name of the resource group")
parser.addoption("--workspace_name", help="Name of Azure ML Workspace")
parser.addoption(
"--workspace_region", help="Azure region to create the workspace in"
)
@pytest.fixture
def subscription_id(request):
return request.config.getoption("--subscription_id")
@pytest.fixture
def resource_group(request):
return request.config.getoption("--resource_group")
@pytest.fixture
def workspace_name(request):
return request.config.getoption("--workspace_name")
@pytest.fixture
def workspace_region(request):
return request.config.getoption("--workspace_region")
# @pytest.fixture(scope="session")
# def testing_im_list(tmp_session):
# """ Set of 5 images from the can/ folder of the Fridge Objects dataset
# used to test positive example rank calculations"""
# im_paths = unzip_url(
# Urls.fridge_objects_tiny_path, tmp_session, exist_ok=True
# )
# can_im_paths = os.listdir(os.path.join(im_paths, "can"))
# can_im_paths = [
# os.path.join(im_paths, "can", im_name) for im_name in can_im_paths
# ][0:5]
# return can_im_paths
| 30.667579
| 113
| 0.642073
|
8db1cb3abf1e9af230362a216411d9a31040ec2c
| 1,240
|
py
|
Python
|
enviroment_config/main.py
|
kringsman/exercises_python
|
054581a2ee1710984b04f5f6d288d6dc79e09240
|
[
"MIT"
] | null | null | null |
enviroment_config/main.py
|
kringsman/exercises_python
|
054581a2ee1710984b04f5f6d288d6dc79e09240
|
[
"MIT"
] | 7
|
2020-05-01T09:03:04.000Z
|
2022-03-12T00:31:28.000Z
|
enviroment_config/main.py
|
kringsman/exercises_python
|
054581a2ee1710984b04f5f6d288d6dc79e09240
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Flask, render_template, request, flash, redirect
from contact_model import Contact
app = Flask(__name__)
app.secret_key = 'some_secret'
app.debug = True
@app.route(r'/', methods=['GET'])
def contact_book():
contacts = Contact.query().fetch()
return render_template('contact_book.html', contacts=contacts)
@app.route(r'/add', methods=['GET', 'POST'])
def add_contact():
if request.form:
contact = Contact(name=request.form.get('name'),
phone=request.form.get('phone'),
email=request.form.get('email'))
contact.put()
flash('¡Se añadio el contacto!')
return render_template('add_contact.html')
@app.route(r'/contacts/<uid>', methods=['GET'])
def contact_details(uid):
contact = Contact.get_by_id(int(uid))
if not contact:
return redirect('/', code=301)
return render_template('contact.html', contact=contact)
@app.route(r'/delete', methods=['POST'])
def delete_contact():
contact = Contact.get_by_id(int(request.form.get('uid')))
contact.key.delete()
return redirect('/contacts/{}'.format(contact.key.id()))
if __name__ == '__main__':
app.run()
| 25.306122
| 67
| 0.637097
|
c8aabb962cce3ed34d57ca149e07d95967c7c5e3
| 3,264
|
py
|
Python
|
surgame/src/caveGenerate.py
|
anokata/pythonPetProjects
|
245c3ff11ae560b17830970061d8d60013948fd7
|
[
"MIT"
] | 3
|
2017-04-30T17:44:53.000Z
|
2018-02-03T06:02:11.000Z
|
surgame/src/caveGenerate.py
|
anokata/pythonPetProjects
|
245c3ff11ae560b17830970061d8d60013948fd7
|
[
"MIT"
] | 10
|
2021-03-18T20:17:19.000Z
|
2022-03-11T23:14:19.000Z
|
surgame/src/caveGenerate.py
|
anokata/pythonPetProjects
|
245c3ff11ae560b17830970061d8d60013948fd7
|
[
"MIT"
] | null | null | null |
import random
WALL = 0
FLOOR = 1
SPACE = 2
PLAYER = 8
TELEPORT = 3
INIT = 4
lastId = 9
DrawChars = {
WALL: 'x',
FLOOR: 'f',
SPACE: ' ',
PLAYER: '~',
TELEPORT: 'T',
INIT: 'o',
}
def addRectBorder(m, w, val=WALL):
for x in (0, w-1):
for y in range(len(m)):
m[x][y] = val
for y in (0, w-1):
for x in range(len(m)):
m[x][y] = val
def floodFill(m, x, y, val, newval):
tofill = list()
tofill.append((x, y))
while len(tofill) != 0:
x, y = tofill[0]
tofill.remove((x, y))
if m[x][y] == val:
m[x][y] = newval
tofill.append((x+1, y))
tofill.append((x-1, y))
tofill.append((x, y+1))
tofill.append((x, y-1))
return m
def stepn(m):
BL = 4
DL = 3
n = init_matrix(len(m))
for x in range(1, len(m) - 2):
for y in range(1, len(m) - 2):
ne = calcNei(m, x, y)
c = m[x][y]
if c == WALL:
if ne > BL:
n[x][y] = FLOOR
else:
if ne >= DL:
n[x][y] = FLOOR
return n
def calcNei(m, x, y):
r = 0
r += m[x-1][y]
r += m[x-1][y-1]
r += m[x-1][y+1]
r += m[x+1][y]
r += m[x+1][y+1]
r += m[x+1][y-1]
r += m[x][y-1]
r += m[x][y+1]
return r
def init_matrix(w, val=WALL):
m = [[val for x in range(w)] for y in range(w)]
return m
def objects_generate(m, TL=4):
for x in range(1, len(m)-1):
for y in range(1, len(m)-1):
ne = calcNei(m, x, y)
if ne < TL and m[x][y] == FLOOR:
m[x][y] = 3
def randomize(m, chance=55):
for x in range(0, len(m)):
for y in range(0, len(m)):
c = random.randint(0,100)
if c > chance:
m[x][y] = FLOOR
return m
def gen(w=100, steps=3):
m = init_matrix(w)
m = randomize(m)
for i in range(steps):
m = stepn(m)
objects_generate(m)
#floodFill(m, 20, 20, m[20][20], 2)
addRectBorder(m, w)
placePlayer(m)
return m
def getRandomFreePoint(m):
x = 0
y = 0
w = len(m)
notFree = True
while notFree:
x = random.randint(2, w-2)
y = random.randint(2, w-2)
n = calcNei(m, x, y)
notFree = n < 7
#notFree = m[x][y] == FLOOR
return (x, y)
def placePlayer(m):
x, y = getRandomFreePoint(m)
m[x][y] = PLAYER
return m
def drawString(m):
r = str()
for x in range(0, len(m)):
for y in range(0, len(m)):
char = DrawChars[m[x][y]]
r += char
r += '\n'
return r
def changeTile(m, old, new):
for x in range(0, len(m)):
for y in range(0, len(m)):
if m[x][y] == old:
m[x][y] = new
def addObjectsRandom(m, obj, count):
global lastId
idd = lastId
DrawChars.update({lastId:obj})
lastId += 1
for i in range(count):
x, y = getRandomFreePoint(m)
m[x][y] = idd
m = gen(30, 5)
changeTile(m, FLOOR, SPACE)
#addObjectsRandom(m, 'G', 10)
floor = init_matrix(30, FLOOR)
if __name__ == '__main__':
print(drawString(m))
#print(drawString(floor))
| 21.615894
| 51
| 0.460478
|
f6f3fe06e05f3a14f4acfd6893e88abc7d3a9284
| 1,574
|
py
|
Python
|
script.py
|
apsknight/crawlresult
|
d314990dcc919a6535503fcc019a0bfe4cca6ff6
|
[
"MIT"
] | 2
|
2020-11-02T22:18:25.000Z
|
2020-11-28T14:51:53.000Z
|
script.py
|
apsknight/crawlresult
|
d314990dcc919a6535503fcc019a0bfe4cca6ff6
|
[
"MIT"
] | null | null | null |
script.py
|
apsknight/crawlresult
|
d314990dcc919a6535503fcc019a0bfe4cca6ff6
|
[
"MIT"
] | null | null | null |
from generator import generate
import json
year = int(raw_input('Enter first two digit of Roll No. : '))
# assert (year >= 14 and year <= 17), 'Entered Year is not correct'
branch = str(raw_input('Enter branch abbreviation : ')).upper()
branches = ['CE', 'CS', 'ME', 'MM', 'EE', 'EC']
assert branch in branches, 'Entered branch abbreviation not correct'
degree = int(raw_input('Enter 1 for single degree and 2 for dual degree : '))
assert degree in [1, 2], 'Entered degree not correct'
lastroll = int(raw_input('Enter last possible Roll No. : '))
# assert lastroll <= 46 and lastroll >= 9, 'lastroll should be less than 46'
firstroll = int(raw_input('Enter first Roll No. : '))
depc = []
for i in range(firstroll, lastroll):
if i in depc:
continue
if (i < 10) :
roll = str(year) + str(branch) + '0' + str(degree) + '00' + str(i)
else :
roll = str(year) + str(branch) + '0' + str(degree) + '0' + str(i)
with open("stres.json") as f:
prev = json.load(f)
if roll in prev:
continue
scheme = 0
choice = [[2000 - 18, 2000 - 17, 2000 - 19, 2000 - 16, 2000 - 20], [2000 - 18, 2000 - 19, 2000 - 17, 2000 - 16, 2000 - 20], [2000 - 17, 2000 - 18, 2000 - 19, 2000 - 16, 2000 - 20], [2000 - 19, 2000 - 18, 2000 - 17, 2000 - 16, 2000 - 20]]
if generate(roll, year + choice[scheme][0]) == False:
if generate(roll, year + choice[scheme][1]) == False:
if generate(roll, year + choice[scheme][2]) == False:
# if generate(roll, year + choice[scheme][3]) == False:
# if generate(roll, year + choice[scheme][4]) == False:
print ("I fail for", roll)
| 39.35
| 238
| 0.627065
|
fad296641260275970ef95a3895efa75e7f1eb16
| 19,850
|
py
|
Python
|
methods/PPO_Hierarchy.py
|
vanstrn/RL_public
|
0e971e40e063b17918460e19728f95d7924af8db
|
[
"MIT"
] | 1
|
2021-03-19T17:57:51.000Z
|
2021-03-19T17:57:51.000Z
|
methods/PPO_Hierarchy.py
|
vanstrn/RL_public
|
0e971e40e063b17918460e19728f95d7924af8db
|
[
"MIT"
] | null | null | null |
methods/PPO_Hierarchy.py
|
vanstrn/RL_public
|
0e971e40e063b17918460e19728f95d7924af8db
|
[
"MIT"
] | null | null | null |
"""
To Do:
-Add an optional input for the networks so they can be defined in a main run script.
-Test
-Combine Training Operation
"""
from .method import Method
from .buffer import Trajectory,BatchDivider,MultiBatchDivider
from .AdvantageEstimator import gae
import tensorflow as tf
import numpy as np
from utils.utils import MovingAverage
def _log(val):
return tf.log(tf.clip_by_value(val, 1e-10, 10.0))
class PPO_Hierarchy(Method):
def __init__(self,sess,settings,netConfigOverride,stateShape,actionSize,nTrajs=1,**kwargs):
"""
Initializes a training method for a neural network.
Parameters
----------
Model : Keras Model Object
A Keras model object with fully defined layers and a call function. See examples in networks module.
sess : Tensorflow Session
Initialized Tensorflow session
stateShape : list
List of integers of the inputs shape size. Ex [39,39,6]
actionSize : int
Output size of the network.
HPs : dict
Dictionary that contains all hyperparameters to be used in the methods training
nTrajs : int (Optional)
Number that specifies the number of trajectories to be created for collecting training data.
scope : str (Optional)
Name of the PPO method. Used to group and differentiate variables between other networks.
Returns
-------
N/A
"""
#Processing inputs
self.actionSize = actionSize
self.sess=sess
self.Model = NetworkBuilder(networkConfig=settings["NetworkConfig"],netConfigOverride=netConfigOverride,actionSize=actionSize)
self.HPs=settings["HPs"]
self.subReward = False
self.UpdateSubpolicies = True
self.nTrajs = nTrajs
self.method = self.HPs["Method"]
#Creating two buffers to separate information between the different levels of the network.
if self.subReward:
self.buffer = [Trajectory(depth=12) for _ in range(nTrajs)]
#[s0,a,r,r_sub,s1,done]+[HL_actions, HL_log_logits, HL_v, flag, critics, logits]
else:
self.buffer = [Trajectory(depth=11) for _ in range(nTrajs)]
#[s0,a,r,s1,done]+[HL_action, HL_log_logits, HL_v, flag, critics, logits]
with self.sess.as_default(), self.sess.graph.as_default():
with tf.name_scope(scope):
#Generic placeholders
self.s = tf.placeholder(tf.float32, [None]+stateShape, 'S')
self.a_his = tf.placeholder(tf.int32, [None, ], 'A')
self.td_target_ = tf.placeholder(tf.float32, [None], 'Vtarget')
self.advantage_ = tf.placeholder(shape=[None], dtype=tf.float32, name='adv_hold')
#Initializing Netowrk I/O
inputs = {"state":self.s}
out = self.Model(inputs)
self.a_prob = out["metaActor"]
self.v = out["metaCritic"]
self.log_logits = out["metaLogLogits"]
self.sub_a_prob = out["subActor"]
self.sub_log_logits = out["subLogLogits"]
self.sub_v = out["subCritic"]
self.nPolicies = len(self.sub_a_prob)
#Placeholder for the Hierarchical Policy
self.old_log_logits_ = tf.placeholder(shape=[None, self.nPolicies], dtype=tf.float32, name='old_logit_hold')
#Placeholder for the Sub-Policies
self.old_log_logits_sub_ = tf.placeholder(shape=[None, actionSize], dtype=tf.float32, name='old_logit_sub_hold')
# Creating the Loss and update calls for the Hierarchical policy
self.hierarchicalLoss = self.CreateLossPPO(self.a_prob,self.td_target_,self.v,self.a_his,self.log_logits,self.old_log_logits_,self.advantage_,self.nPolicies)
variables = self.Model.getHierarchyVariables()
self.hierarchyUpdater = self.CreateUpdater(self.hierarchicalLoss,variables)
# Creating the Losses updaters for the Sub-policies.
self.subpolicyLoss = []
self.subpolicyUpdater = []
for i in range(self.nPolicies):
loss = self.CreateLossPPO(self.sub_a_prob[i],self.td_target_,self.sub_v[i],self.a_his,self.sub_log_logits[i],self.old_log_logits_sub_,self.advantage_,self.actionSize)
self.subpolicyLoss.append(loss)
variables = self.Model.getSubpolicyVariables(i)
self.subpolicyUpdater.append(self.CreateUpdater(loss,variables))
#Creating Variables for teh purpose of logging.
self.SubpolicyDistribution = MovingAverage(1000)
def CreateUpdater(self,loss,variables):
optimizer = tf.keras.optimizers.Adam(self.HPs["LR"])
gradients = optimizer.get_gradients(loss,variables)
return optimizer.apply_gradients(zip(gradients,variables))
def CreateLossPPO(self,a_prob,td_target_,v,a_his,log_logits,old_log_logits_,advantage_,actionSize):
# Entropy
entropy = -tf.reduce_mean(a_prob * _log(a_prob), name='entropy')
# Critic Loss
td_error = td_target_ - v
critic_loss = tf.reduce_mean(tf.square(td_error), name='critic_loss')
# Actor Loss
action_OH = tf.one_hot(a_his, actionSize, dtype=tf.float32)
log_prob = tf.reduce_sum(log_logits * action_OH, 1)
old_log_prob = tf.reduce_sum(old_log_logits_ * action_OH, 1)
# Clipped surrogate function
ratio = tf.exp(log_prob - old_log_prob)
surrogate = ratio * advantage_
clipped_surrogate = tf.clip_by_value(ratio, 1-self.HPs["eps"], 1+self.HPs["eps"]) * advantage_
surrogate_loss = tf.minimum(surrogate, clipped_surrogate, name='surrogate_loss')
actor_loss = -tf.reduce_mean(surrogate_loss, name='actor_loss')
actor_loss = actor_loss - entropy * self.HPs["EntropyBeta"]
loss = actor_loss + critic_loss * self.HPs["CriticBeta"]
return loss
def InitiateEpisode(self):
if self.method == "Greedy":
pass
elif self.method == "Fixed Step":
self.counter = 1
self.nStep = 4
elif self.method == "Constant":
pass
elif self.method == "Confidence":
self.pastActions = [None]*self.nTrajs
elif self.method == "Probabilistic Confidence":
pass
else:
pass
def GetAction(self, state, step,episode=0):
"""
Method to run data through hierarchical network
First run the state through the meta network to select subpolicy to use.
Second run the state through the proper Subpolicy
ToDo: Check if faster to run the entire network and select appropriate subpolicy afterwards. or run only the required bit.
Parameters
----------
state : np.array
Data with the shape of [N, self.stateShape] where N is number of smaples
Returns
-------
actions : list[int]
List of actions based on NN output.
extraData : list
List of data that is passed to the execution code to be bundled with state data.
"""
#Determine number of steps and whether to initiate confidence based on the length of the Buffer.
if step == 0:
self.InitiateEpisode()
# Run the Meta and Sub-policy Networks
targets = [self.a_prob,self.log_logits,self.v]+self.sub_a_prob+self.sub_log_logits+self.sub_v
res = self.sess.run(targets, {self.s: state})
LL_probs=res[0]
HL_log_logits =res[1]
HL_v = res[2]
sub_probs = res[3:3+self.nPolicies]
sub_log_logits = res[3+self.nPolicies:3+2*self.nPolicies]
sub_v = res[3+2*self.nPolicies:]
if self.method == "Greedy":
HL_actions = np.array([np.random.choice(LL_probs.shape[1], p=prob / sum(prob)) for prob in LL_probs])
flag=[True]*state.shape[0]
elif self.method == "Fixed Step":
if self.counter == self.nStep:
#Reseting Step counter and selecting New option
self.counter = 1
if self.counter == 1:
HL_actions = np.array([np.random.choice(LL_probs.shape[1], p=prob / sum(prob)) for prob in LL_probs])
self.traj_action = HL_actions
flag=[True]*state.shape[0]
else:
HL_actions = self.traj_action
flag=[False]*state.shape[0]
self.counter +=1
elif self.method == "Confidence":
flag = []
HL_actions = []
confids = -np.mean(LL_probs * np.log(LL_probs), axis=1)
for i,confid in enumerate(confids):
if confid < 0.1 or step==0:
action = np.random.choice(LL_probs.shape[1], p=LL_probs[i] / sum(LL_probs[i]))
HL_actions.append(action)
self.pastActions[i] = action
flag.append(True)
else:
HL_actions.append(self.pastActions[i])
flag.append(True)
self.traj_action = HL_actions
elif self.method == "Probabilistic Confidence":
pass
else:
pass
# Run the Subpolicy Network
actions = np.array([np.random.choice(self.actionSize, p=sub_probs[mod][idx] / sum(sub_probs[mod][idx])) for idx, mod in enumerate(HL_actions)])
critics = [sub_v[mod][idx] for idx, mod in enumerate(HL_actions)]
logits = [sub_log_logits[mod][idx] for idx, mod in enumerate(HL_actions)]
return actions, [HL_actions, HL_log_logits, HL_v, flag, critics, logits]
def Update(self,HPs):
"""
Process the buffer and backpropagates the loses through the NN.
Parameters
----------
HPs : dict
Hyperparameters for training.
Returns
-------
N/A
"""
samples=0
for i in range(len(self.buffer)):
samples +=len(self.buffer[i])
if samples < self.HPs["BatchSize"]:
return
for traj in range(len(self.buffer)):
td_target, advantage, td_target_hier, advantage_hier,actions_hier,ll_hier,s_hier = self.ProcessBuffer(HPs,traj)
# Updating the Hierarchical Controller
for epoch in range(self.HPs["Epochs"]):
for batch in MultiBatchDivider([s_hier,actions_hier,td_target_hier,advantage_hier,ll_hier],self.HPs["MinibatchSize"]):
feed_dict = {self.s: np.asarray(batch[0]).squeeze(),
self.a_his: np.asarray(batch[1]).squeeze(),
self.td_target_: np.asarray(batch[2]).squeeze(),
self.advantage_: np.reshape(batch[3], [-1]),
self.old_log_logits_: np.asarray(batch[4]).squeeze()}
self.sess.run(self.hierarchyUpdater, feed_dict)
if self.UpdateSubpolicies:
#Collecting the data into different sub-Policies
if self.subReward:
tmp, l1, l2, l3, l4, l5 = (list(t) for t in zip(*sorted(zip(self.buffer[traj][6], self.buffer[traj][0], self.buffer[traj][1], td_target, advantage, self.buffer[traj][10]),key=lambda x: x[0]))) #Sorting by the value in the actions_hier
#dividing at the splits
for subpolicyNum,data in SubpolicyIterator(tmp,[l1, l2, l3, l4, l5]):
#Updating each of the sub-policies.
for epoch in range(self.HPs["Epochs"]):
for batch in MultiBatchDivider(data,self.HPs["MinibatchSize"]):
feed_dict = {self.s: np.asarray(batch[0]).squeeze(),
self.a_his: np.asarray(batch[1]).squeeze(),
self.td_target_: np.asarray(batch[2]).squeeze(),
self.advantage_: np.reshape(batch[3], [-1]),
self.old_log_logits_sub_: np.asarray(batch[4]).squeeze()}
self.sess.run(self.subpolicyUpdater[subpolicyNum], feed_dict)
self.SubpolicyDistribution.extend(np.asarray(self.buffer[traj][6]))
else:
tmp, l1, l2, l3, l4, l5 = (list(t) for t in zip(*sorted(zip(self.buffer[traj][5], self.buffer[traj][0], self.buffer[traj][1], td_target, advantage, self.buffer[traj][10]),key=lambda x: x[0]))) #Sorting by the value in the actions_hier
#dividing at the splits
for subpolicyNum,data in SubpolicyIterator(tmp,[l1, l2, l3, l4, l5]):
#Updating each of the sub-policies.
for epoch in range(self.HPs["Epochs"]):
for batch in MultiBatchDivider(data,self.HPs["MinibatchSize"]):
feed_dict = {self.s: np.asarray(batch[0]).squeeze(),
self.a_his: np.asarray(batch[1]).squeeze(),
self.td_target_: np.asarray(batch[2]).squeeze(),
self.advantage_: np.reshape(batch[3], [-1]),
self.old_log_logits_sub_: np.asarray(batch[4]).squeeze()}
self.sess.run(self.subpolicyUpdater[subpolicyNum], feed_dict)
self.SubpolicyDistribution.extend(np.asarray(self.buffer[traj][5]))
self.ClearTrajectory()
def GetStatistics(self):
stats={}
for i in range(self.nPolicies):
length = len(self.SubpolicyDistribution.tolist())
if length == 0:
length=1
stats["Subpolicy Use/"+str(i)] = self.SubpolicyDistribution.tolist().count(i)/length
return stats
def ProcessBuffer(self,HPs,traj):
"""
Process the buffer and backpropagates the loses through the NN.
Parameters
----------
Model : HPs
Hyperparameters for training.
traj : Trajectory
Data stored by the neural network.
clip : list[bool]
List where the trajectory has finished.
Returns
-------
td_target : list
List Temporal Difference Target for particular states.
advantage : list
List of advantages for particular actions.
"""
#Splitting the buffer into different episodes based on the done tag.
split_loc = [i+1 for i, x in enumerate(self.buffer[traj][4]) if x]
if self.subReward:
#Stuff need to be processed for the Low Level Controllers
reward_lists = np.split(self.buffer[traj][2],split_loc[:-1])
sub_reward_lists = np.split(self.buffer[traj][3],split_loc[:-1])
value_lists = np.split(self.buffer[traj][10],split_loc[:-1])
#Stuff needed for the
HL_S_lists = np.split(self.buffer[traj][0],split_loc[:-1])
HL_Critic_lists = np.split(self.buffer[traj][8],split_loc[:-1])
HL_Logits_lists = np.split(self.buffer[traj][7],split_loc[:-1])
HL_action_lists = np.split(self.buffer[traj][6],split_loc[:-1])
HL_flag_lists = np.split(self.buffer[traj][9],split_loc[:-1])
td_target=[]; advantage=[]
td_target_hier=[]; advantage_hier=[]
ll=[];actions=[]
for rew,s_rew,value,HL_critic,HL_ll,HL_a,HL_flag,HL_s in zip(reward_lists,sub_reward_lists,value_lists,HL_Critic_lists,HL_Logits_lists,HL_action_lists,HL_flag_lists,HL_S_lists):
# Calculating the per step advantage of each of the different sections
td_target_i, advantage_i = gae(s_rew.reshape(-1).tolist(),value.reshape(-1).tolist(),0,self.HPs["Gamma"],self.HPs["lambda"])
td_target.extend(td_target_i); advantage.extend( advantage_i)
#Colapsing different trajectory lengths for the hierarchical controller
split_loc_ = [i+1 for i, x in enumerate(HL_flag[:-1]) if x]
rew_hier = [np.sum(l) for l in np.split(rew,split_loc_)]
value_hier = [l[0] for l in np.split(HL_critic,split_loc_)]
actions.extend([l[0] for l in np.split(HL_a,split_loc_)])
ll.extend([l[0] for l in np.split(HL_ll,split_loc_)])
s.extend([l[0] for l in np.split(HL_s,split_loc_)])
#Calculating the td_target and advantage for the hierarchical controller.
td_target_i_, advantage_i_ = gae(np.asarray(rew_hier).reshape(-1).tolist(),np.asarray(value_hier).reshape(-1).tolist(),0,self.HPs["Gamma"],self.HPs["lambda"])
td_target_hier.extend(td_target_i_); advantage_hier.extend( advantage_i_)
return td_target, advantage, td_target_hier, advantage_hier,actions,ll
else:
#Stuff need to be processed for the Low Level Controllers
reward_lists = np.split(self.buffer[traj][2],split_loc[:-1])
value_lists = np.split(self.buffer[traj][9],split_loc[:-1])
#Stuff needed for the
HL_S_lists = np.split(self.buffer[traj][0],split_loc[:-1])
HL_Critic_lists = np.split(self.buffer[traj][7],split_loc[:-1])
HL_Logits_lists = np.split(self.buffer[traj][6],split_loc[:-1])
HL_action_lists = np.split(self.buffer[traj][5],split_loc[:-1])
HL_flag_lists = np.split(self.buffer[traj][8],split_loc[:-1])
td_target=[]; advantage=[]
td_target_hier=[]; advantage_hier=[]
ll=[];actions=[];s=[]
for rew,value,HL_critic,HL_ll,HL_a,HL_flag,HL_s in zip(reward_lists,value_lists,HL_Critic_lists,HL_Logits_lists,HL_action_lists,HL_flag_lists,HL_S_lists):
# Calculating the per step advantage of each of the different sections
td_target_i, advantage_i = gae(rew.reshape(-1).tolist(),value.reshape(-1).tolist(),0,self.HPs["Gamma"],self.HPs["lambda"])
td_target.extend(td_target_i); advantage.extend( advantage_i)
#Colapsing different trajectory lengths for the hierarchical controller
split_loc_ = [i+1 for i, x in enumerate(HL_flag[:-1]) if x]
rew_hier = [np.sum(l) for l in np.split(rew,split_loc_)]
value_hier = [l[0] for l in np.split(HL_critic,split_loc_)]
actions.extend([l[0] for l in np.split(HL_a,split_loc_)])
ll.extend([l[0] for l in np.split(HL_ll,split_loc_)])
s.extend([l[0] for l in np.split(HL_s,split_loc_)])
#Calculating the td_target and advantage for the hierarchical controller.
td_target_i_, advantage_i_ = gae(np.asarray(rew_hier).reshape(-1).tolist(),np.asarray(value_hier).reshape(-1).tolist(),0,self.HPs["Gamma"],self.HPs["lambda"])
td_target_hier.extend(td_target_i_); advantage_hier.extend( advantage_i_)
return td_target, advantage, td_target_hier, advantage_hier,actions,ll,s
@property
def getVars(self):
return self.Model.getVars("PPO_Training")
def SubpolicyIterator(sortingList, dataLists):
list = np.asarray(sortingList).squeeze().tolist()
for num in set(list):
res = []
for dataList in dataLists:
index_first = list.index(num)
index_last = len(list) - 1 - list[::-1].index(num)
res.append(dataList[index_first:index_last])
yield num, res
| 48.179612
| 254
| 0.595567
|
d006b5b8728c6ce8462144067807d4cb217f47ab
| 2,516
|
py
|
Python
|
2016-2021 Miami University/CSE 564 Algorithms/Coding Assignments/src/hashing.py
|
0x326/academic-code-portfolio
|
8e76628608622b26608de52c56367e75942d4eb5
|
[
"MIT"
] | null | null | null |
2016-2021 Miami University/CSE 564 Algorithms/Coding Assignments/src/hashing.py
|
0x326/academic-code-portfolio
|
8e76628608622b26608de52c56367e75942d4eb5
|
[
"MIT"
] | null | null | null |
2016-2021 Miami University/CSE 564 Algorithms/Coding Assignments/src/hashing.py
|
0x326/academic-code-portfolio
|
8e76628608622b26608de52c56367e75942d4eb5
|
[
"MIT"
] | 1
|
2020-12-11T23:43:09.000Z
|
2020-12-11T23:43:09.000Z
|
#!/usr/bin/env python3
"""
2019-03-01 Hashing Assignment
"""
import logging
from itertools import chain
from typing import NamedTuple, List, Iterator
class Entry(NamedTuple):
present_as_positive_value: bool
present_as_negative_value: bool
class TrivialSet:
def __init__(self, max_range: int):
self.array: List[Entry] = [Entry(present_as_positive_value=False, present_as_negative_value=False)
for _ in range(max_range + 1)]
def add(self, item):
present_as_positive_value, present_as_negative_value = self.array[item]
if item >= 0:
present_as_positive_value = True
else:
present_as_negative_value = True
self.array[abs(item)] = Entry(present_as_positive_value, present_as_negative_value)
def discard(self, item):
present_as_positive_value, present_as_negative_value = self.array[item]
if item >= 0:
present_as_positive_value = False
else:
present_as_negative_value = False
self.array[abs(item)] = Entry(present_as_positive_value, present_as_negative_value)
def __contains__(self, item):
if item >= 0:
return self.array[abs(item)].present_as_positive_value
else:
return self.array[abs(item)].present_as_negative_value
def __iter__(self) -> Iterator[int]:
negative_items = (-index for index, entry in enumerate(self.array) if entry.present_as_negative_value)
positive_items = (index for index, entry in enumerate(self.array) if entry.present_as_positive_value)
return chain(negative_items, positive_items)
def __str__(self):
return '{%s}' % ', '.join(map(str, iter(self)))
def __repr__(self):
return f'{self.__class__.__name__}({str(self)})'
if __name__ == '__main__':
import argparse
import sys
from random import randint
parser = argparse.ArgumentParser(description='Sort a random array')
parser.add_argument('--max-range', metavar='MAX', type=int, default=10)
parser.add_argument('--test-count', metavar='MAX', type=int, default=10)
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
trivial_set = TrivialSet(max_range=args.max_range)
for _ in range(args.max_range):
num = randint(-args.max_range, args.max_range)
logging.debug(f'Adding value: {num}')
trivial_set.add(num)
logging.debug(f'Set after add: {trivial_set}')
| 33.546667
| 110
| 0.67806
|
bc11a5764c56d981d53a8b390ec16dda73590284
| 3,305
|
py
|
Python
|
data/external/repositories/120243/tradeshift-text-classification-master/src/xgb_meta_part2_predict.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/120243/tradeshift-text-classification-master/src/xgb_meta_part2_predict.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/120243/tradeshift-text-classification-master/src/xgb_meta_part2_predict.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
from tool import *
from xgb_classifier import xgb_classifier
import numpy as np
import pickle
def xgb_meta_predict(data_base_dir,data_meta_part2_dir,submission_dir):
test_id=pickle.load(open(data_base_dir+"test_id.p","rb"))
y_all=pickle.load(open(data_base_dir+"y.p","rb"))
y_part2=y_all[y_all.shape[0]/2:,:]
X_numerical=pickle.load(open(data_base_dir+"X_numerical.p","rb"))
X_numerical_part2=X_numerical[X_numerical.shape[0]/2:,:]
X_test_numerical=pickle.load(open(data_base_dir+"X_test_numerical.p","rb"))
X_part2_rf=pickle.load(open(data_meta_part2_dir+ "X_meta_part2_rf.p", "rb" ) )
X_test_rf=pickle.load(open(data_meta_part2_dir+ "X_test_meta_rf.p", "rb" ) )
X_part2_svc=pickle.load(open(data_meta_part2_dir+ "X_meta_part2_svc.p", "rb" ) )
X_test_svc=pickle.load(open(data_meta_part2_dir+ "X_test_meta_svc.p", "rb" ) )
X_part2_sgd=pickle.load(open(data_meta_part2_dir+ "X_meta_part2_sgd.p", "rb" ) )
X_test_sgd=pickle.load(open(data_meta_part2_dir+ "X_test_meta_sgd.p", "rb" ) )
X_part2_best_online=pickle.load(open(data_meta_part2_dir+ "X_meta_part2_online.p", "rb" ) )
X_test_best_online=pickle.load(open(data_meta_part2_dir+ "X_test_meta_online.p", "rb" ) )
# private LB 0.0048854
xgb_clf=xgb_classifier(eta=0.09,min_child_weight=6,depth=18,num_round=120,threads=16)
X_xgb_predict = xgb_clf.train_predict_all_labels(np.hstack([X_part2_best_online,X_part2_rf,X_numerical_part2]), y_part2,np.hstack([X_test_best_online, X_test_rf,X_test_numerical]),predict_y14=True)
save_predictions(submission_dir+'xgb-part2-d18-e0.09-min6-tree120.csv.gz', test_id , X_xgb_predict)
# private LB 0.0048763
xgb_clf=xgb_classifier(eta=0.07,min_child_weight=6,depth=20,num_round=150,threads=16)
X_xgb_predict = xgb_clf.train_predict_all_labels(np.hstack([X_part2_best_online,X_part2_rf,X_numerical_part2]), y_part2,np.hstack([X_test_best_online, X_test_rf,X_test_numerical]),predict_y14=True)
save_predictions(submission_dir+'xgb-part2-d20-e0.07-min6-tree150.csv.gz', test_id , X_xgb_predict)
# private LB 0.0048978
xgb_clf=xgb_classifier(eta=0.09,min_child_weight=6,depth=18,num_round=100,threads=16)
X_xgb_predict = xgb_clf.train_predict_all_labels(np.hstack([X_part2_best_online,X_part2_rf,X_part2_svc,X_numerical_part2]), y_part2,np.hstack([X_test_best_online, X_test_rf,X_test_svc,X_test_numerical]),predict_y14=True)
save_predictions(submission_dir+'xgb-part2-d18-svc-e0.09-min6-tree100.csv.gz', test_id , X_xgb_predict)
# private LB 0.0050270
xgb_clf=xgb_classifier(eta=0.1,min_child_weight=6,depth=20,num_round=110,threads=16)
X_xgb_predict = xgb_clf.train_predict_all_labels(np.hstack([X_part2_best_online,X_part2_rf,X_part2_svc,X_part2_sgd]), y_part2,np.hstack([X_test_best_online, X_test_rf,X_test_svc,X_test_sgd]),predict_y14=True)
save_predictions(submission_dir+'xgb-part2-d20-e0.1-min6-tree110-metaonly.csv.gz', test_id , X_xgb_predict)
import sys
if __name__ == "__main__":
data_base_dir=sys.argv[1]
data_meta_part2_dir=sys.argv[2]
submission_dir=sys.argv[3]
xgb_meta_predict(data_base_dir,data_meta_part2_dir,submission_dir)
| 47.898551
| 224
| 0.751589
|
cc30e164aa6a637649c95da8f0d93a8abcecfd01
| 1,513
|
py
|
Python
|
django/privat_gos_sait/about_company/views.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
django/privat_gos_sait/about_company/views.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
django/privat_gos_sait/about_company/views.py
|
Netromnik/python
|
630a9df63b1cade9af38de07bb9cd0c3b8694c93
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.shortcuts import get_object_or_404 ,redirect
from markdown import markdown
from .models import about_company,item_block,list_menu
from django.http import HttpResponse
from django.views.generic import ListView
from django.views.generic.detail import SingleObjectMixin
from blog.models import Post
def my_rederect(request):
return redirect("/news/")
from itertools import chain
# Create your views here.
class My_list_views(ListView):
model=list_menu
template_name="about_company/index.html"
def content_render(request):
response = HttpResponse()
items_block= item_block.objects.all()
response.write(render(request,'about_company/block-img.html',context={'name':request,'block':items_block}).content)
lists_menu= list_menu.objects.all()
response.write(render(request,'about_company/glob_meny.html',context={'name':request,'list_menu':lists_menu}).content)
return HttpResponse(render(request,'about_company/index.html',context={'post':str(response.content.decode('utf-8'))}).content)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['test']=Post.objects.all()
return context
def DetailView(request,slug):
model=list_menu
template_name="about_company/post.html"
my_object = get_object_or_404(model, slug=slug)
return render(request,template_name,context=({'post':my_object}))
| 39.815789
| 135
| 0.733642
|
b1c3b6ca95a9e404e7ae46ace7a1c737c87e383d
| 2,703
|
py
|
Python
|
disposable_email_checker/validators.py
|
jheld/DisposableEmailChecker
|
eca98a377865164ec2eef4d348916764fb7d7326
|
[
"BSD-3-Clause"
] | 2
|
2020-05-16T14:37:33.000Z
|
2022-03-18T11:38:29.000Z
|
disposable_email_checker/validators.py
|
jheld/DisposableEmailChecker
|
eca98a377865164ec2eef4d348916764fb7d7326
|
[
"BSD-3-Clause"
] | null | null | null |
disposable_email_checker/validators.py
|
jheld/DisposableEmailChecker
|
eca98a377865164ec2eef4d348916764fb7d7326
|
[
"BSD-3-Clause"
] | 2
|
2020-05-15T10:21:37.000Z
|
2021-12-13T15:44:45.000Z
|
# -*- coding: utf-8 -*-
import re
from django.conf import settings
from django.utils.encoding import force_text
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from bdea.client import BDEAClient
from django.urls import get_callable
class DisposableEmailChecker(object):
"""
Check if an email is from a disposable email service
"""
message = _("Blocked email provider.")
code = "invalid"
whitelist = []
def __init__(self, message=None, code=None, whitelist=None):
if message is not None:
self.message = message
elif hasattr(settings, "BDEA_MESSAGE"):
self.message = getattr(settings, "BDEA_MESSAGE")
if code is not None:
self.code = code
if whitelist is not None:
self.whitelist = whitelist
self.emails = self._load_emails()
self.BDEA_APIKEY = getattr(settings, "BDEA_APIKEY", None)
self.BDEA_TIMEOUT = getattr(settings, "BDEA_TIMEOUT", 5)
def __call__(self, value):
value = force_text(value)
# Catch invalid emails before we check if they're disposable
try:
validators.validate_email(value)
except ValidationError:
return
user_part, domain_part = value.rsplit("@", 1)
if domain_part not in self.whitelist:
if self.BDEA_APIKEY: # Validate using block-disposable-email.com
client = BDEAClient(
self.BDEA_APIKEY, timeout=self.BDEA_TIMEOUT
)
response = client.get_domain_status(domain_part)
if response.status() and response.is_disposable():
raise ValidationError(self.message, code=self.code)
"""
This will run if we are not using BDEA, we're out of BDEA credits,
there was an error contacting BDEA's servers or we did not get a
hit on BDEA. Basically always check using local list as a backup
"""
for email_group in self.chunk(self.emails, 20):
regex = "(.*" + "$)|(.*".join(email_group) + "$)"
if re.match(regex, value):
raise ValidationError(self.message, code=self.code)
def _load_emails(self):
loader = getattr(
settings,
"DEC_LOADER",
"disposable_email_checker.emails.email_domain_loader",
)
return get_callable(loader)()
def chunk(self, emails, n):
return (emails[i : i + n] for i in range(0, len(emails), n))
validate_disposable_email = DisposableEmailChecker()
| 32.963415
| 78
| 0.617832
|
8d050b77929daf8f663de4b769a02d447ddf518f
| 19,116
|
py
|
Python
|
venv/lib/python2.7/site-packages/cffi/model.py
|
deathping1994/gmail-sendmail-api
|
5f6cadf2a5a6a9d30581726647ca638661a040f5
|
[
"Apache-2.0"
] | 5
|
2015-10-11T04:36:12.000Z
|
2018-12-02T17:42:28.000Z
|
venv/lib/python2.7/site-packages/cffi/model.py
|
deathping1994/gmail-sendmail-api
|
5f6cadf2a5a6a9d30581726647ca638661a040f5
|
[
"Apache-2.0"
] | 2
|
2015-10-15T07:18:12.000Z
|
2015-10-15T07:23:48.000Z
|
venv/lib/python2.7/site-packages/cffi/model.py
|
deathping1994/gmail-sendmail-api
|
5f6cadf2a5a6a9d30581726647ca638661a040f5
|
[
"Apache-2.0"
] | 1
|
2020-11-01T20:40:01.000Z
|
2020-11-01T20:40:01.000Z
|
import types
import weakref
from .lock import allocate_lock
class BaseTypeByIdentity(object):
is_array_type = False
is_raw_function = False
def get_c_name(self, replace_with='', context='a C file'):
result = self.c_name_with_marker
assert result.count('&') == 1
# some logic duplication with ffi.getctype()... :-(
replace_with = replace_with.strip()
if replace_with:
if replace_with.startswith('*') and '&[' in result:
replace_with = '(%s)' % replace_with
elif not replace_with[0] in '[(':
replace_with = ' ' + replace_with
result = result.replace('&', replace_with)
if '$' in result:
from .ffiplatform import VerificationError
raise VerificationError(
"cannot generate '%s' in %s: unknown type name"
% (self._get_c_name(), context))
return result
def _get_c_name(self):
return self.c_name_with_marker.replace('&', '')
def has_c_name(self):
return '$' not in self._get_c_name()
def is_integer_type(self):
return False
def get_cached_btype(self, ffi, finishlist, can_delay=False):
try:
BType = ffi._cached_btypes[self]
except KeyError:
BType = self.build_backend_type(ffi, finishlist)
BType2 = ffi._cached_btypes.setdefault(self, BType)
assert BType2 is BType
return BType
def __repr__(self):
return '<%s>' % (self._get_c_name(),)
def _get_items(self):
return [(name, getattr(self, name)) for name in self._attrs_]
class BaseType(BaseTypeByIdentity):
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._get_items() == other._get_items())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.__class__, tuple(self._get_items())))
class VoidType(BaseType):
_attrs_ = ()
def __init__(self):
self.c_name_with_marker = 'void&'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_void_type')
void_type = VoidType()
class BasePrimitiveType(BaseType):
pass
class PrimitiveType(BasePrimitiveType):
_attrs_ = ('name',)
ALL_PRIMITIVE_TYPES = {
'char': 'c',
'short': 'i',
'int': 'i',
'long': 'i',
'long long': 'i',
'signed char': 'i',
'unsigned char': 'i',
'unsigned short': 'i',
'unsigned int': 'i',
'unsigned long': 'i',
'unsigned long long': 'i',
'float': 'f',
'double': 'f',
'long double': 'f',
'_Bool': 'i',
# the following types are not primitive in the C sense
'wchar_t': 'c',
'int8_t': 'i',
'uint8_t': 'i',
'int16_t': 'i',
'uint16_t': 'i',
'int32_t': 'i',
'uint32_t': 'i',
'int64_t': 'i',
'uint64_t': 'i',
'int_least8_t': 'i',
'uint_least8_t': 'i',
'int_least16_t': 'i',
'uint_least16_t': 'i',
'int_least32_t': 'i',
'uint_least32_t': 'i',
'int_least64_t': 'i',
'uint_least64_t': 'i',
'int_fast8_t': 'i',
'uint_fast8_t': 'i',
'int_fast16_t': 'i',
'uint_fast16_t': 'i',
'int_fast32_t': 'i',
'uint_fast32_t': 'i',
'int_fast64_t': 'i',
'uint_fast64_t': 'i',
'intptr_t': 'i',
'uintptr_t': 'i',
'intmax_t': 'i',
'uintmax_t': 'i',
'ptrdiff_t': 'i',
'size_t': 'i',
'ssize_t': 'i',
}
def __init__(self, name):
assert name in self.ALL_PRIMITIVE_TYPES
self.name = name
self.c_name_with_marker = name + '&'
def is_char_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'c'
def is_integer_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'i'
def is_float_type(self):
return self.ALL_PRIMITIVE_TYPES[self.name] == 'f'
def build_backend_type(self, ffi, finishlist):
return global_cache(self, ffi, 'new_primitive_type', self.name)
class UnknownIntegerType(BasePrimitiveType):
_attrs_ = ('name',)
def __init__(self, name):
self.name = name
self.c_name_with_marker = name + '&'
def is_integer_type(self):
return True # for now
def build_backend_type(self, ffi, finishlist):
raise NotImplementedError("integer type '%s' can only be used after "
"compilation" % self.name)
class BaseFunctionType(BaseType):
_attrs_ = ('args', 'result', 'ellipsis')
def __init__(self, args, result, ellipsis):
self.args = args
self.result = result
self.ellipsis = ellipsis
#
reprargs = [arg._get_c_name() for arg in self.args]
if self.ellipsis:
reprargs.append('...')
reprargs = reprargs or ['void']
replace_with = self._base_pattern % (', '.join(reprargs),)
self.c_name_with_marker = (
self.result.c_name_with_marker.replace('&', replace_with))
class RawFunctionType(BaseFunctionType):
# Corresponds to a C type like 'int(int)', which is the C type of
# a function, but not a pointer-to-function. The backend has no
# notion of such a type; it's used temporarily by parsing.
_base_pattern = '(&)(%s)'
is_raw_function = True
def build_backend_type(self, ffi, finishlist):
from . import api
raise api.CDefError("cannot render the type %r: it is a function "
"type, not a pointer-to-function type" % (self,))
def as_function_pointer(self):
return FunctionPtrType(self.args, self.result, self.ellipsis)
class FunctionPtrType(BaseFunctionType):
_base_pattern = '(*&)(%s)'
def build_backend_type(self, ffi, finishlist):
result = self.result.get_cached_btype(ffi, finishlist)
args = []
for tp in self.args:
args.append(tp.get_cached_btype(ffi, finishlist))
return global_cache(self, ffi, 'new_function_type',
tuple(args), result, self.ellipsis)
def as_raw_function(self):
return RawFunctionType(self.args, self.result, self.ellipsis)
class PointerType(BaseType):
_attrs_ = ('totype',)
_base_pattern = " *&"
_base_pattern_array = "(*&)"
def __init__(self, totype):
self.totype = totype
if totype.is_array_type:
extra = self._base_pattern_array
else:
extra = self._base_pattern
self.c_name_with_marker = totype.c_name_with_marker.replace('&', extra)
def build_backend_type(self, ffi, finishlist):
BItem = self.totype.get_cached_btype(ffi, finishlist, can_delay=True)
return global_cache(self, ffi, 'new_pointer_type', BItem)
voidp_type = PointerType(void_type)
class ConstPointerType(PointerType):
_base_pattern = " const *&"
_base_pattern_array = "(const *&)"
const_voidp_type = ConstPointerType(void_type)
class NamedPointerType(PointerType):
_attrs_ = ('totype', 'name')
def __init__(self, totype, name):
PointerType.__init__(self, totype)
self.name = name
self.c_name_with_marker = name + '&'
class ArrayType(BaseType):
_attrs_ = ('item', 'length')
is_array_type = True
def __init__(self, item, length):
self.item = item
self.length = length
#
if length is None:
brackets = '&[]'
elif length == '...':
brackets = '&[/*...*/]'
else:
brackets = '&[%s]' % length
self.c_name_with_marker = (
self.item.c_name_with_marker.replace('&', brackets))
def resolve_length(self, newlength):
return ArrayType(self.item, newlength)
def build_backend_type(self, ffi, finishlist):
if self.length == '...':
from . import api
raise api.CDefError("cannot render the type %r: unknown length" %
(self,))
self.item.get_cached_btype(ffi, finishlist) # force the item BType
BPtrItem = PointerType(self.item).get_cached_btype(ffi, finishlist)
return global_cache(self, ffi, 'new_array_type', BPtrItem, self.length)
char_array_type = ArrayType(PrimitiveType('char'), None)
class StructOrUnionOrEnum(BaseTypeByIdentity):
_attrs_ = ('name',)
forcename = None
def build_c_name_with_marker(self):
name = self.forcename or '%s %s' % (self.kind, self.name)
self.c_name_with_marker = name + '&'
def force_the_name(self, forcename):
self.forcename = forcename
self.build_c_name_with_marker()
def get_official_name(self):
assert self.c_name_with_marker.endswith('&')
return self.c_name_with_marker[:-1]
class StructOrUnion(StructOrUnionOrEnum):
fixedlayout = None
completed = 0
partial = False
packed = False
def __init__(self, name, fldnames, fldtypes, fldbitsize):
self.name = name
self.fldnames = fldnames
self.fldtypes = fldtypes
self.fldbitsize = fldbitsize
self.build_c_name_with_marker()
def has_anonymous_struct_fields(self):
if self.fldtypes is None:
return False
for name, type in zip(self.fldnames, self.fldtypes):
if name == '' and isinstance(type, StructOrUnion):
return True
return False
def enumfields(self):
for name, type, bitsize in zip(self.fldnames, self.fldtypes,
self.fldbitsize):
if name == '' and isinstance(type, StructOrUnion):
# nested anonymous struct/union
for result in type.enumfields():
yield result
else:
yield (name, type, bitsize)
def force_flatten(self):
# force the struct or union to have a declaration that lists
# directly all fields returned by enumfields(), flattening
# nested anonymous structs/unions.
names = []
types = []
bitsizes = []
for name, type, bitsize in self.enumfields():
names.append(name)
types.append(type)
bitsizes.append(bitsize)
self.fldnames = tuple(names)
self.fldtypes = tuple(types)
self.fldbitsize = tuple(bitsizes)
def get_cached_btype(self, ffi, finishlist, can_delay=False):
BType = StructOrUnionOrEnum.get_cached_btype(self, ffi, finishlist,
can_delay)
if not can_delay:
self.finish_backend_type(ffi, finishlist)
return BType
def finish_backend_type(self, ffi, finishlist):
if self.completed:
if self.completed != 2:
raise NotImplementedError("recursive structure declaration "
"for '%s'" % (self.name,))
return
BType = ffi._cached_btypes[self]
#
self.completed = 1
#
if self.fldtypes is None:
pass # not completing it: it's an opaque struct
#
elif self.fixedlayout is None:
fldtypes = [tp.get_cached_btype(ffi, finishlist)
for tp in self.fldtypes]
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize))
sflags = 0
if self.packed:
sflags = 8 # SF_PACKED
ffi._backend.complete_struct_or_union(BType, lst, self,
-1, -1, sflags)
#
else:
fldtypes = []
fieldofs, fieldsize, totalsize, totalalignment = self.fixedlayout
for i in range(len(self.fldnames)):
fsize = fieldsize[i]
ftype = self.fldtypes[i]
#
if isinstance(ftype, ArrayType) and ftype.length == '...':
# fix the length to match the total size
BItemType = ftype.item.get_cached_btype(ffi, finishlist)
nlen, nrest = divmod(fsize, ffi.sizeof(BItemType))
if nrest != 0:
self._verification_error(
"field '%s.%s' has a bogus size?" % (
self.name, self.fldnames[i] or '{}'))
ftype = ftype.resolve_length(nlen)
self.fldtypes = (self.fldtypes[:i] + (ftype,) +
self.fldtypes[i+1:])
#
BFieldType = ftype.get_cached_btype(ffi, finishlist)
if isinstance(ftype, ArrayType) and ftype.length is None:
assert fsize == 0
else:
bitemsize = ffi.sizeof(BFieldType)
if bitemsize != fsize:
self._verification_error(
"field '%s.%s' is declared as %d bytes, but is "
"really %d bytes" % (self.name,
self.fldnames[i] or '{}',
bitemsize, fsize))
fldtypes.append(BFieldType)
#
lst = list(zip(self.fldnames, fldtypes, self.fldbitsize, fieldofs))
ffi._backend.complete_struct_or_union(BType, lst, self,
totalsize, totalalignment)
self.completed = 2
def _verification_error(self, msg):
from .ffiplatform import VerificationError
raise VerificationError(msg)
def check_not_partial(self):
if self.partial and self.fixedlayout is None:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
finishlist.append(self)
#
return global_cache(self, ffi, 'new_%s_type' % self.kind,
self.get_official_name(), key=self)
class StructType(StructOrUnion):
kind = 'struct'
class UnionType(StructOrUnion):
kind = 'union'
class EnumType(StructOrUnionOrEnum):
kind = 'enum'
partial = False
partial_resolved = False
def __init__(self, name, enumerators, enumvalues, baseinttype=None):
self.name = name
self.enumerators = enumerators
self.enumvalues = enumvalues
self.baseinttype = baseinttype
self.build_c_name_with_marker()
def force_the_name(self, forcename):
StructOrUnionOrEnum.force_the_name(self, forcename)
if self.forcename is None:
name = self.get_official_name()
self.forcename = '$' + name.replace(' ', '_')
def check_not_partial(self):
if self.partial and not self.partial_resolved:
from . import ffiplatform
raise ffiplatform.VerificationMissing(self._get_c_name())
def build_backend_type(self, ffi, finishlist):
self.check_not_partial()
base_btype = self.build_baseinttype(ffi, finishlist)
return global_cache(self, ffi, 'new_enum_type',
self.get_official_name(),
self.enumerators, self.enumvalues,
base_btype, key=self)
def build_baseinttype(self, ffi, finishlist):
if self.baseinttype is not None:
return self.baseinttype.get_cached_btype(ffi, finishlist)
#
if self.enumvalues:
smallest_value = min(self.enumvalues)
largest_value = max(self.enumvalues)
else:
smallest_value = 0
largest_value = 0
if smallest_value < 0: # needs a signed type
sign = 1
candidate1 = PrimitiveType("int")
candidate2 = PrimitiveType("long")
else:
sign = 0
candidate1 = PrimitiveType("unsigned int")
candidate2 = PrimitiveType("unsigned long")
btype1 = candidate1.get_cached_btype(ffi, finishlist)
btype2 = candidate2.get_cached_btype(ffi, finishlist)
size1 = ffi.sizeof(btype1)
size2 = ffi.sizeof(btype2)
if (smallest_value >= ((-1) << (8*size1-1)) and
largest_value < (1 << (8*size1-sign))):
return btype1
if (smallest_value >= ((-1) << (8*size2-1)) and
largest_value < (1 << (8*size2-sign))):
return btype2
raise api.CDefError("%s values don't all fit into either 'long' "
"or 'unsigned long'" % self._get_c_name())
def unknown_type(name, structname=None):
if structname is None:
structname = '$%s' % name
tp = StructType(structname, None, None, None)
tp.force_the_name(name)
tp.origin = "unknown_type"
return tp
def unknown_ptr_type(name, structname=None):
if structname is None:
structname = '$$%s' % name
tp = StructType(structname, None, None, None)
return NamedPointerType(tp, name)
global_lock = allocate_lock()
def global_cache(srctype, ffi, funcname, *args, **kwds):
key = kwds.pop('key', (funcname, args))
assert not kwds
try:
return ffi._backend.__typecache[key]
except KeyError:
pass
except AttributeError:
# initialize the __typecache attribute, either at the module level
# if ffi._backend is a module, or at the class level if ffi._backend
# is some instance.
if isinstance(ffi._backend, types.ModuleType):
ffi._backend.__typecache = weakref.WeakValueDictionary()
else:
type(ffi._backend).__typecache = weakref.WeakValueDictionary()
try:
res = getattr(ffi._backend, funcname)(*args)
except NotImplementedError as e:
raise NotImplementedError("%s: %r: %s" % (funcname, srctype, e))
# note that setdefault() on WeakValueDictionary is not atomic
# and contains a rare bug (http://bugs.python.org/issue19542);
# we have to use a lock and do it ourselves
cache = ffi._backend.__typecache
with global_lock:
res1 = cache.get(key)
if res1 is None:
cache[key] = res
return res
else:
return res1
def pointer_cache(ffi, BType):
return global_cache('?', ffi, 'new_pointer_type', BType)
def attach_exception_info(e, name):
if e.args and type(e.args[0]) is str:
e.args = ('%s: %s' % (name, e.args[0]),) + e.args[1:]
| 34.443243
| 79
| 0.565129
|
01e960254b27f019287800280a88057de6a758c2
| 1,660
|
py
|
Python
|
dog-breed-identification/utils.py
|
rishabhiitbhu/kaggle
|
bdcfd406737b82bb7ba6e993bc481cb3a6975296
|
[
"MIT"
] | 1
|
2017-11-12T18:22:51.000Z
|
2017-11-12T18:22:51.000Z
|
dog-breed-identification/utils.py
|
rishabhiitbhu/kaggle
|
bdcfd406737b82bb7ba6e993bc481cb3a6975296
|
[
"MIT"
] | null | null | null |
dog-breed-identification/utils.py
|
rishabhiitbhu/kaggle
|
bdcfd406737b82bb7ba6e993bc481cb3a6975296
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
label_data = pd.read_csv('labels.csv')
train_label, dev_label = train_test_split(label_data, test_size=0.30, random_state=0)
unique_labels = pd.unique(label_data['breed'])
def load_data(type_):
label_data = train_label if type_=='train' else dev_label
X = np.zeros(([label_data.shape[0], 387, 443, 3]), dtype='float32')
Y = np.zeros(([120, label_data.shape[0]]), dtype='float32')
for i, id_ in enumerate(label_data['id']):
file = 'train/' + id_ + '.jpg'
X[i] = mpimg.imread(file)
breed = label_data.loc[label_data['id']==id_]['breed'].as_matrix()[0]
Y[np.where(unique_labels==breed)[0][0], i] = 1
return (X, Y)
def load_test_data():
X = np.zeros(([10357, 387, 443, 3]), dtype='float32')
for i, filename in enumerate(os.listdir('test')):
file = os.path.join('test', filename)
X[i] = mpimg.imread(file)
return X
def plot_training(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1,)
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.legend(['train', 'val'], loc='upper left')
plt.title(' accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.legend(['train', 'val'], loc='upper left')
plt.title('loss')
plt.show()
#if __name__=='main':
| 30.181818
| 85
| 0.640964
|
3a1f06baef78856de2d3fbebbcc9f5d9b069368f
| 4,256
|
py
|
Python
|
src/gretel_client/transformers/data_transform_pipeline.py
|
franccesco/gretel-python-client
|
fd20dee07eba9657262edc902779142bf32c5b7c
|
[
"Apache-2.0"
] | null | null | null |
src/gretel_client/transformers/data_transform_pipeline.py
|
franccesco/gretel-python-client
|
fd20dee07eba9657262edc902779142bf32c5b7c
|
[
"Apache-2.0"
] | null | null | null |
src/gretel_client/transformers/data_transform_pipeline.py
|
franccesco/gretel-python-client
|
fd20dee07eba9657262edc902779142bf32c5b7c
|
[
"Apache-2.0"
] | null | null | null |
"""This module exposes the primary class that contains all ``DataPath`` objects and is responsible for
running all transformations.
"""
import collections
from typing import TYPE_CHECKING
try:
import pandas as pd
except ImportError:
pd = None
from gretel_client.transformers.data_pipeline import DataPipeline
from gretel_client.readers import DataFrameReader
if TYPE_CHECKING:
from pandas import DataFrame as DataFrameT
else:
DataFrameT = None
GRETEL_ID = "gretel_id"
FIELDS = "fields"
METADATA = "metadata"
RECORD = "record"
DATA = "data"
RECORD_KEYS = [RECORD, DATA]
class DataTransformPipeline(DataPipeline):
"""This class is a container for data paths describing a records transformations.
It constructs a data pipeline from a list of ``DataPath`` objects and is used to process records
based on the order of the data path list.
Args:
data_paths: A list of ``data_path`` instancess
"""
def transform_record(self, payload: dict):
(
data_fields,
record_key,
_,
gretel_id,
) = DataTransformPipeline._get_data_and_schema(payload)
if not data_fields:
raise ValueError("Record does not seem to contain data.")
xform_payload_record = {}
xform_payload_metadata_fields = {}
data_path_list = self._build_datapath_list(data_fields)
meta_fields = payload.get(METADATA, {}).get(FIELDS, {})
for data_path in data_path_list:
field = data_path.input_field
value = data_fields.get(field)
meta = meta_fields.get(field, {})
meta[GRETEL_ID] = gretel_id
for transformation in data_path.get_data_path_transformations():
if transformation.labels:
if meta:
value, meta = transformation.transform_entities(value, meta)
if not value:
break
else:
for key, field_ref in transformation.field_ref_dict.items():
if isinstance(field_ref.field_name, list):
transformation.field_ref_dict[key].value = [
xform_payload_record.get(field_name)
or data_fields.get(field_name)
for field_name in field_ref.field_name
]
else:
transformation.field_ref_dict[
key
].value = xform_payload_record.get(
field_ref.field_name
) or data_fields.get(
field_ref.field_name
)
if transformation.__class__.__name__ == "Drop":
break
field_value = transformation.transform_field(field, value, meta)
if not field_value:
break
else:
value = field_value.get(field)
else:
xform_payload_record[data_path.output_field] = value
if meta and field in meta_fields.keys():
xform_payload_metadata_fields[data_path.output_field] = meta
xform_payload_record = collections.OrderedDict(
sorted([(k, v) for k, v in xform_payload_record.items()])
)
return DataTransformPipeline._build_return_record(
dict(xform_payload_record),
record_key,
xform_payload_metadata_fields,
gretel_id,
)
def transform_df(self, df: DataFrameT) -> DataFrameT:
"""Helper method that can consume a DataFrame and iterate over each record
as a dictionary, then run the transform pipeline on each record.
"""
if pd is None:
raise RuntimeError("Pandas must be installed to use this feature!")
records = []
reader = DataFrameReader(df)
for row in reader:
records.append(
self.transform_record(row)
)
return pd.DataFrame(records)
| 37.008696
| 102
| 0.570254
|
c40cd53cc57a699ae79d8b764f58105e6f5d7930
| 18,852
|
py
|
Python
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/sync_job_service_report_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 24
|
2018-06-22T14:13:23.000Z
|
2022-03-23T01:21:26.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/sync_job_service_report_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 46
|
2018-04-30T13:28:22.000Z
|
2022-03-21T21:11:07.000Z
|
isi_sdk_8_2_0/isi_sdk_8_2_0/models/sync_job_service_report_item.py
|
mohitjain97/isilon_sdk_python
|
a371f438f542568edb8cda35e929e6b300b1177c
|
[
"Unlicense"
] | 29
|
2018-06-19T00:14:04.000Z
|
2022-02-08T17:51:19.000Z
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SyncJobServiceReportItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'component': 'str',
'directory': 'str',
'end_time': 'int',
'error_msg': 'list[str]',
'filter': 'list[str]',
'handlers_failed': 'int',
'handlers_skipped': 'int',
'handlers_transferred': 'int',
'records_failed': 'int',
'records_skipped': 'int',
'records_transferred': 'int',
'start_time': 'int',
'status': 'str'
}
attribute_map = {
'component': 'component',
'directory': 'directory',
'end_time': 'end_time',
'error_msg': 'error_msg',
'filter': 'filter',
'handlers_failed': 'handlers_failed',
'handlers_skipped': 'handlers_skipped',
'handlers_transferred': 'handlers_transferred',
'records_failed': 'records_failed',
'records_skipped': 'records_skipped',
'records_transferred': 'records_transferred',
'start_time': 'start_time',
'status': 'status'
}
def __init__(self, component=None, directory=None, end_time=None, error_msg=None, filter=None, handlers_failed=None, handlers_skipped=None, handlers_transferred=None, records_failed=None, records_skipped=None, records_transferred=None, start_time=None, status=None): # noqa: E501
"""SyncJobServiceReportItem - a model defined in Swagger""" # noqa: E501
self._component = None
self._directory = None
self._end_time = None
self._error_msg = None
self._filter = None
self._handlers_failed = None
self._handlers_skipped = None
self._handlers_transferred = None
self._records_failed = None
self._records_skipped = None
self._records_transferred = None
self._start_time = None
self._status = None
self.discriminator = None
if component is not None:
self.component = component
if directory is not None:
self.directory = directory
if end_time is not None:
self.end_time = end_time
if error_msg is not None:
self.error_msg = error_msg
if filter is not None:
self.filter = filter
if handlers_failed is not None:
self.handlers_failed = handlers_failed
if handlers_skipped is not None:
self.handlers_skipped = handlers_skipped
if handlers_transferred is not None:
self.handlers_transferred = handlers_transferred
if records_failed is not None:
self.records_failed = records_failed
if records_skipped is not None:
self.records_skipped = records_skipped
if records_transferred is not None:
self.records_transferred = records_transferred
if start_time is not None:
self.start_time = start_time
if status is not None:
self.status = status
@property
def component(self):
"""Gets the component of this SyncJobServiceReportItem. # noqa: E501
The component that was processed. # noqa: E501
:return: The component of this SyncJobServiceReportItem. # noqa: E501
:rtype: str
"""
return self._component
@component.setter
def component(self, component):
"""Sets the component of this SyncJobServiceReportItem.
The component that was processed. # noqa: E501
:param component: The component of this SyncJobServiceReportItem. # noqa: E501
:type: str
"""
if component is not None and len(component) > 255:
raise ValueError("Invalid value for `component`, length must be less than or equal to `255`") # noqa: E501
if component is not None and len(component) < 0:
raise ValueError("Invalid value for `component`, length must be greater than or equal to `0`") # noqa: E501
self._component = component
@property
def directory(self):
"""Gets the directory of this SyncJobServiceReportItem. # noqa: E501
The directory of the service export. # noqa: E501
:return: The directory of this SyncJobServiceReportItem. # noqa: E501
:rtype: str
"""
return self._directory
@directory.setter
def directory(self, directory):
"""Sets the directory of this SyncJobServiceReportItem.
The directory of the service export. # noqa: E501
:param directory: The directory of this SyncJobServiceReportItem. # noqa: E501
:type: str
"""
if directory is not None and len(directory) > 255:
raise ValueError("Invalid value for `directory`, length must be less than or equal to `255`") # noqa: E501
if directory is not None and len(directory) < 0:
raise ValueError("Invalid value for `directory`, length must be greater than or equal to `0`") # noqa: E501
self._directory = directory
@property
def end_time(self):
"""Gets the end_time of this SyncJobServiceReportItem. # noqa: E501
The time the job ended this component. # noqa: E501
:return: The end_time of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this SyncJobServiceReportItem.
The time the job ended this component. # noqa: E501
:param end_time: The end_time of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if end_time is not None and end_time > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `end_time`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if end_time is not None and end_time < 0: # noqa: E501
raise ValueError("Invalid value for `end_time`, must be a value greater than or equal to `0`") # noqa: E501
self._end_time = end_time
@property
def error_msg(self):
"""Gets the error_msg of this SyncJobServiceReportItem. # noqa: E501
A list of error messages generated while exporting components. # noqa: E501
:return: The error_msg of this SyncJobServiceReportItem. # noqa: E501
:rtype: list[str]
"""
return self._error_msg
@error_msg.setter
def error_msg(self, error_msg):
"""Sets the error_msg of this SyncJobServiceReportItem.
A list of error messages generated while exporting components. # noqa: E501
:param error_msg: The error_msg of this SyncJobServiceReportItem. # noqa: E501
:type: list[str]
"""
self._error_msg = error_msg
@property
def filter(self):
"""Gets the filter of this SyncJobServiceReportItem. # noqa: E501
A list of path-based filters for exporting components. # noqa: E501
:return: The filter of this SyncJobServiceReportItem. # noqa: E501
:rtype: list[str]
"""
return self._filter
@filter.setter
def filter(self, filter):
"""Sets the filter of this SyncJobServiceReportItem.
A list of path-based filters for exporting components. # noqa: E501
:param filter: The filter of this SyncJobServiceReportItem. # noqa: E501
:type: list[str]
"""
self._filter = filter
@property
def handlers_failed(self):
"""Gets the handlers_failed of this SyncJobServiceReportItem. # noqa: E501
The number of handlers failed during export. # noqa: E501
:return: The handlers_failed of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._handlers_failed
@handlers_failed.setter
def handlers_failed(self, handlers_failed):
"""Sets the handlers_failed of this SyncJobServiceReportItem.
The number of handlers failed during export. # noqa: E501
:param handlers_failed: The handlers_failed of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if handlers_failed is not None and handlers_failed > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `handlers_failed`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if handlers_failed is not None and handlers_failed < 0: # noqa: E501
raise ValueError("Invalid value for `handlers_failed`, must be a value greater than or equal to `0`") # noqa: E501
self._handlers_failed = handlers_failed
@property
def handlers_skipped(self):
"""Gets the handlers_skipped of this SyncJobServiceReportItem. # noqa: E501
The number of handlers skipped during export. # noqa: E501
:return: The handlers_skipped of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._handlers_skipped
@handlers_skipped.setter
def handlers_skipped(self, handlers_skipped):
"""Sets the handlers_skipped of this SyncJobServiceReportItem.
The number of handlers skipped during export. # noqa: E501
:param handlers_skipped: The handlers_skipped of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if handlers_skipped is not None and handlers_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `handlers_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if handlers_skipped is not None and handlers_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `handlers_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._handlers_skipped = handlers_skipped
@property
def handlers_transferred(self):
"""Gets the handlers_transferred of this SyncJobServiceReportItem. # noqa: E501
The number of handlers exported. # noqa: E501
:return: The handlers_transferred of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._handlers_transferred
@handlers_transferred.setter
def handlers_transferred(self, handlers_transferred):
"""Sets the handlers_transferred of this SyncJobServiceReportItem.
The number of handlers exported. # noqa: E501
:param handlers_transferred: The handlers_transferred of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if handlers_transferred is not None and handlers_transferred > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `handlers_transferred`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if handlers_transferred is not None and handlers_transferred < 0: # noqa: E501
raise ValueError("Invalid value for `handlers_transferred`, must be a value greater than or equal to `0`") # noqa: E501
self._handlers_transferred = handlers_transferred
@property
def records_failed(self):
"""Gets the records_failed of this SyncJobServiceReportItem. # noqa: E501
The number of records failed during export. # noqa: E501
:return: The records_failed of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._records_failed
@records_failed.setter
def records_failed(self, records_failed):
"""Sets the records_failed of this SyncJobServiceReportItem.
The number of records failed during export. # noqa: E501
:param records_failed: The records_failed of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if records_failed is not None and records_failed > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `records_failed`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if records_failed is not None and records_failed < 0: # noqa: E501
raise ValueError("Invalid value for `records_failed`, must be a value greater than or equal to `0`") # noqa: E501
self._records_failed = records_failed
@property
def records_skipped(self):
"""Gets the records_skipped of this SyncJobServiceReportItem. # noqa: E501
The number of records skipped during export. # noqa: E501
:return: The records_skipped of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._records_skipped
@records_skipped.setter
def records_skipped(self, records_skipped):
"""Sets the records_skipped of this SyncJobServiceReportItem.
The number of records skipped during export. # noqa: E501
:param records_skipped: The records_skipped of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if records_skipped is not None and records_skipped > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `records_skipped`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if records_skipped is not None and records_skipped < 0: # noqa: E501
raise ValueError("Invalid value for `records_skipped`, must be a value greater than or equal to `0`") # noqa: E501
self._records_skipped = records_skipped
@property
def records_transferred(self):
"""Gets the records_transferred of this SyncJobServiceReportItem. # noqa: E501
The number of records exported. # noqa: E501
:return: The records_transferred of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._records_transferred
@records_transferred.setter
def records_transferred(self, records_transferred):
"""Sets the records_transferred of this SyncJobServiceReportItem.
The number of records exported. # noqa: E501
:param records_transferred: The records_transferred of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if records_transferred is not None and records_transferred > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `records_transferred`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if records_transferred is not None and records_transferred < 0: # noqa: E501
raise ValueError("Invalid value for `records_transferred`, must be a value greater than or equal to `0`") # noqa: E501
self._records_transferred = records_transferred
@property
def start_time(self):
"""Gets the start_time of this SyncJobServiceReportItem. # noqa: E501
The time the job began this component. # noqa: E501
:return: The start_time of this SyncJobServiceReportItem. # noqa: E501
:rtype: int
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this SyncJobServiceReportItem.
The time the job began this component. # noqa: E501
:param start_time: The start_time of this SyncJobServiceReportItem. # noqa: E501
:type: int
"""
if start_time is not None and start_time > 9223372036854775807: # noqa: E501
raise ValueError("Invalid value for `start_time`, must be a value less than or equal to `9223372036854775807`") # noqa: E501
if start_time is not None and start_time < 0: # noqa: E501
raise ValueError("Invalid value for `start_time`, must be a value greater than or equal to `0`") # noqa: E501
self._start_time = start_time
@property
def status(self):
"""Gets the status of this SyncJobServiceReportItem. # noqa: E501
The current status of export for this component. # noqa: E501
:return: The status of this SyncJobServiceReportItem. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this SyncJobServiceReportItem.
The current status of export for this component. # noqa: E501
:param status: The status of this SyncJobServiceReportItem. # noqa: E501
:type: str
"""
if status is not None and len(status) > 255:
raise ValueError("Invalid value for `status`, length must be less than or equal to `255`") # noqa: E501
if status is not None and len(status) < 0:
raise ValueError("Invalid value for `status`, length must be greater than or equal to `0`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SyncJobServiceReportItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 38.084848
| 284
| 0.644918
|
742cb308b3cb02c117f8ae7ec3d096b4e82449c2
| 17,954
|
bzl
|
Python
|
tensorflow/compiler/aot/tfcompile.bzl
|
so5462/tensorflow
|
0f9acc15ba56ecf01a79889d29295310559c681a
|
[
"Apache-2.0"
] | 10
|
2021-04-29T16:31:02.000Z
|
2021-08-10T13:17:55.000Z
|
tensorflow/compiler/aot/tfcompile.bzl
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 88
|
2020-11-24T08:18:10.000Z
|
2022-03-25T20:28:30.000Z
|
tensorflow/compiler/aot/tfcompile.bzl
|
sseung0703/tensorflow
|
be084bd7a4dd241eb781fc704f57bcacc5c9b6dd
|
[
"Apache-2.0"
] | 9
|
2020-11-06T22:50:15.000Z
|
2021-12-30T01:45:55.000Z
|
"""Build macro that compiles a TensorFlow graph into a cc_library.
To use from your BUILD file, add the following line to load the macro:
load("//tensorflow/compiler/aot:tfcompile.bzl", "tf_library")
Then call the macro like this:
tf_library(
name = "test_graph_tfmatmul",
config = "test_graph_tfmatmul.config.pbtxt",
cpp_class = "MatMulComp",
graph = ":test_graph_tfmatmul.pb",
)
"""
load(
"//tensorflow:tensorflow.bzl",
"if_android",
"tf_cc_test",
"tf_copts",
)
load("//tensorflow:tensorflow.bzl", "tfcompile_target_cpu")
def tf_library(
name,
graph,
config,
debug_info = None,
freeze_checkpoint = None,
freeze_saver = None,
cpp_class = None,
gen_test = True,
gen_benchmark = True,
visibility = None,
testonly = None,
tfcompile_flags = None,
tfcompile_tool = "//tensorflow/compiler/aot:tfcompile",
include_standard_runtime_deps = True,
enable_xla_hlo_profiling = False,
enable_tracemes = False,
mlir_components = "None",
deps = None,
tags = []):
"""Runs tfcompile to compile a TensorFlow graph into executable code with fast
math enabled on cpu.
Given an invocation of tf_library(name="foo", ...), generates the following
build targets:
foo: A cc_library containing the generated header and
computation.
foo_test: A cc_test with simple tests and benchmarks. Only created if
gen_test=True.
foo_benchmark: A cc_binary that runs a minimal-dependency benchmark,
useful for mobile devices or other platforms that can't
compile the full test libraries. Only created if
gen_benchmark=True.
The output header is called <name>.h.
Args:
name: The name of the build rule.
graph: The TensorFlow GraphDef to compile. If the file ends in '.pbtxt'
it is expected to be in the human-readable proto text format, otherwise
it is expected to be in the proto binary format.
config: File containing tensorflow.tf2xla.Config proto. If the file ends
in '.pbtxt' it is expected to be in the human-readable proto text
format, otherwise it is expected to be in the proto binary format.
freeze_checkpoint: If provided, run freeze_graph with this checkpoint to
convert variables into constants.
freeze_saver: If provided, run freeze_graph with this saver, in SaverDef
binary form, to convert variables into constants.
cpp_class: The name of the generated C++ class, wrapping the generated
function. The syntax of this flag is
[[<optional_namespace>::],...]<class_name>. This mirrors the C++ syntax
for referring to a class, where multiple namespaces may precede the
class name, separated by double-colons. The class will be generated in
the given namespace(s), or if no namespaces are given, within the global
namespace.
gen_test: If True, also generate a cc_test rule that builds a simple
test and benchmark.
gen_benchmark: If True, also generate a binary with a simple benchmark.
Unlike the output of gen_test, this benchmark can be run on android.
visibility: Bazel build visibility.
testonly: Bazel testonly attribute.
tfcompile_flags: Extra flags to pass to tfcompile to control compilation.
tfcompile_tool: The tfcompile binary. A non-default can be passed to
use a tfcompile built with extra dependencies.
include_standard_runtime_deps: If True, the standard list of
kernel/runtime deps is added to deps. If False, deps must contain the
full set of deps needed by the generated library.
enable_xla_hlo_profiling: Enable XLA HLO profiling in the generated
program, and emit metadata that lets us pretty-print the gathered
profile counters.
enable_tracemes: Tell tfcompile to generate calls to
TraceMe::Activity{Start|End} around HLO instructions that can be used by
Xprof to construct profiler timelines.
mlir_components: When the value is "None", no components use MLIR. When
the value is "Bridge", use MLIR to translate GraphDef to HLO.
deps: a list of deps to include on the build rules for the generated
library, added to the standard deps if standard_runtime_deps is True.
tags: tags to apply to subsidiary build rules.
"""
if not cpp_class:
fail("cpp_class must be specified")
tfcompile_graph = graph
if freeze_checkpoint or freeze_saver:
if not freeze_checkpoint:
fail("freeze_checkpoint must be specified when freeze_saver is " +
"specified")
freeze_name = "freeze_" + name
freeze_file = freeze_name + ".pb"
# First run tfcompile to generate the list of out_nodes.
#
# Here and below, we set CUDA_VISIBLE_DEVICES='' to prevent the code we
# launch from using any GPUs which might be present. This is important
# because builds may run concurrently with tests, and tests need to be
# able to assume that they have control of the full GPU.
out_nodes_file = "out_nodes_" + freeze_name
native.genrule(
name = ("gen_" + out_nodes_file),
srcs = [config],
outs = [out_nodes_file],
cmd = ("CUDA_VISIBLE_DEVICES='' " +
"$(location " + tfcompile_tool + ")" +
" --config=$(location " + config + ")" +
" --dump_fetch_nodes > $@"),
exec_tools = [tfcompile_tool],
# Run tfcompile on the build host, rather than forge, since it's
# typically way faster on the local machine.
local = 1,
tags = tags,
)
# Now run freeze_graph to convert variables into constants.
freeze_args = (
" --input_graph=$(location " + graph + ")" +
" --checkpoint_version=1" +
" --input_binary=" + str(not graph.endswith(".pbtxt")) +
" --input_checkpoint=$(location " + freeze_checkpoint + ")" +
" --output_graph=$(location " + freeze_file + ")" +
" --output_node_names=$$(<$(location " + out_nodes_file +
"))"
)
freeze_saver_srcs = []
if freeze_saver:
freeze_args += " --input_saver=$(location " + freeze_saver + ")"
freeze_saver_srcs += [freeze_saver]
native.genrule(
name = freeze_name,
srcs = [
graph,
freeze_checkpoint,
out_nodes_file,
] + freeze_saver_srcs,
outs = [freeze_file],
cmd = (
"CUDA_VISIBLE_DEVICES='' " +
"$(location " +
"//tensorflow/python/tools:freeze_graph)" +
freeze_args
),
exec_tools = ["//tensorflow/python/tools:freeze_graph"],
tags = tags,
)
tfcompile_graph = freeze_file
# Rule that runs tfcompile to produce the header and object file.
header_file = name + ".h"
metadata_object_file = name + "_tfcompile_metadata.o"
function_object_file = name + "_tfcompile_function.o"
# The XLA backends morph kernal name prefix __ that is not in the form of
# __xla_.
ep = ("__xla_" + native.package_name() + "__" + name).replace("/", "_")
if type(tfcompile_flags) == type(""):
flags = tfcompile_flags
else:
flags = " ".join([
"'" + arg.replace("'", "'\\''") + "'"
for arg in (tfcompile_flags or [])
])
# Do this before we append the `select` into `flags`, because doing so
# transforms `flags` into a variable of type `select`, and we can't call
# `find` on such an object.
need_xla_data_proto = flags and flags.find("--gen_program_shape") != -1
target_cpu = tfcompile_target_cpu()
extra_flags = "--target_cpu=" + target_cpu + " " if target_cpu else " "
flags = extra_flags + flags
if enable_xla_hlo_profiling:
profiling_flag = "--xla_hlo_profile"
else:
profiling_flag = ""
if enable_tracemes:
traceme_flag = "--xla_cpu_enable_xprof_traceme=true"
else:
traceme_flag = "--xla_cpu_enable_xprof_traceme=false"
mlir_flag = "--mlir_components=" + mlir_components
srcs = [tfcompile_graph, config]
debug_info_flag = ""
if debug_info:
srcs.append(debug_info)
debug_info_flag = " --debug_info=$(location " + debug_info + ")"
default_fast_math_xla_flags = ("XLA_FLAGS='" +
"--xla_cpu_enable_fast_math=true " +
"--xla_cpu_fast_math_honor_nans=false " +
"--xla_cpu_fast_math_honor_infs=false " +
"--xla_cpu_fast_math_honor_functions=false " +
"--xla_cpu_fast_math_honor_division=false " +
"--xla_cpu_enable_fast_min_max=true " +
"$${XLA_FLAGS:-}' ")
native.genrule(
name = ("gen_" + name),
srcs = srcs,
outs = [
header_file,
metadata_object_file,
function_object_file,
],
cmd = (
default_fast_math_xla_flags +
"CUDA_VISIBLE_DEVICES='' " +
"$(location " + tfcompile_tool + ")" +
" --graph=$(location " + tfcompile_graph + ")" +
debug_info_flag +
" --config=$(location " + config + ")" +
" --entry_point=" + ep +
" --cpp_class=" + cpp_class +
" --target_triple=" + target_llvm_triple() +
" --out_header=$(@D)/" + header_file +
" --out_metadata_object=$(@D)/" + metadata_object_file +
" --out_function_object=$(@D)/" + function_object_file +
" " + flags + " " + profiling_flag + " " + mlir_flag + " " + traceme_flag
),
exec_tools = [tfcompile_tool],
visibility = visibility,
testonly = testonly,
# Run tfcompile on the build host since it's typically faster on the
# local machine.
#
# Note that setting the local=1 attribute on a *test target* causes the
# test infrastructure to skip that test. However this is a genrule, not
# a test target, and runs with --strategy=Genrule=forced_forge, meaning
# the local=1 attribute is ignored, and the genrule is still run.
#
# https://www.bazel.io/versions/master/docs/be/general.html#genrule
local = 1,
tags = tags,
)
# Rule that runs tfcompile to produce the SessionModule proto, useful for
# debugging. TODO(b/64813587): Once the SessionModule proto is
# deterministic, move this into the main rule above.
session_module_pb = name + "_session_module.pb"
native.genrule(
name = (name + "_session_module"),
srcs = srcs,
outs = [
session_module_pb,
],
cmd = (
default_fast_math_xla_flags +
"CUDA_VISIBLE_DEVICES='' " +
"$(location " + tfcompile_tool + ")" +
" --graph=$(location " + tfcompile_graph + ")" +
debug_info_flag +
" --config=$(location " + config + ")" +
" --entry_point=" + ep +
" --cpp_class=" + cpp_class +
" --target_triple=" + target_llvm_triple() +
" --out_session_module=$(@D)/" + session_module_pb +
" " + flags
),
exec_tools = [tfcompile_tool],
visibility = visibility,
testonly = testonly,
local = 1,
tags = tags,
)
# The cc_library rule packaging up the header and object file, and needed
# kernel implementations.
native.cc_library(
name = name,
srcs = [function_object_file, metadata_object_file],
hdrs = [header_file],
visibility = visibility,
testonly = testonly,
deps = [
# These deps are required by all tf_library targets even if
# include_standard_runtime_deps is False. Without them, the
# generated code will fail to compile.
"//tensorflow/compiler/tf2xla:xla_compiled_cpu_function",
"//tensorflow/core:framework_lite",
] + (need_xla_data_proto and [
# If we're generating the program shape, we must depend on the
# proto.
"//tensorflow/compiler/xla:xla_data_proto_cc",
] or []) + (enable_xla_hlo_profiling and [
"//tensorflow/compiler/xla/service:hlo_profile_printer_data_cc",
] or []) + (include_standard_runtime_deps and [
# TODO(cwhipkey): only depend on kernel code that the model actually
# needed.
"//tensorflow/compiler/xla/service/cpu:runtime_conv2d",
"//tensorflow/compiler/xla/service/cpu:runtime_key_value_sort",
"//tensorflow/compiler/xla/service/cpu:runtime_matmul",
"//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_conv2d",
"//tensorflow/compiler/xla/service/cpu:runtime_single_threaded_matmul",
"//third_party/eigen3",
] or []) + (deps or []),
tags = tags,
)
# Variables used for gen_test and gen_benchmark.
cpp_class_split = cpp_class.rsplit("::", 2)
if len(cpp_class_split) == 1:
no_ns_name = cpp_class_split[0]
else:
no_ns_name = cpp_class_split[1]
sed_replace = (
"-e \"s|{{TFCOMPILE_HEADER}}|$(location " + header_file + ")|g\" " +
"-e \"s|{{TFCOMPILE_CPP_CLASS}}|" + cpp_class + "|g\" " +
"-e \"s|{{TFCOMPILE_NAME}}|" + no_ns_name + "|g\" "
)
if gen_test:
test_name = name + "_test"
test_file = test_name + ".cc"
# Rule to rewrite test.cc to produce the test_file.
native.genrule(
name = ("gen_" + test_name),
testonly = 1,
srcs = [
"//tensorflow/compiler/aot:test.cc",
header_file,
],
outs = [test_file],
cmd = (
"sed " + sed_replace +
" $(location //tensorflow/compiler/aot:test.cc) " +
"> $(OUTS)"
),
tags = tags,
)
# The cc_test rule for the generated code. To ensure that this works
# reliably across build configurations, we must use tf_cc_test instead
# of native.cc_test. This is related to how we build
# //tensorflow/core:lib -- see the note in
# tensorflow/core/BUILD for more details.
tf_cc_test(
name = test_name,
srcs = [test_file],
deps = [
":" + name,
"//tensorflow/compiler/aot:tf_library_test_main",
"//tensorflow/compiler/xla:executable_run_options",
"//third_party/eigen3",
"//tensorflow/core:lib",
"//tensorflow/core:test",
],
tags = tags,
)
if gen_benchmark:
benchmark_name = name + "_benchmark"
benchmark_file = benchmark_name + ".cc"
benchmark_main = ("//tensorflow/compiler/aot:" +
"benchmark_main.template")
# Rule to rewrite benchmark.cc to produce the benchmark_file.
native.genrule(
name = ("gen_" + benchmark_name),
srcs = [
benchmark_main,
header_file,
],
testonly = testonly,
outs = [benchmark_file],
cmd = ("sed " + sed_replace +
" $(location " + benchmark_main + ") " +
"> $(OUTS)"),
tags = tags,
)
# The cc_benchmark rule for the generated code. This does not need the
# tf_cc_binary since we (by deliberate design) do not depend on
# //tensorflow/core:lib.
#
# Note: to get smaller size on android for comparison, compile with:
# --copt=-fvisibility=hidden
# --copt=-D_LIBCPP_TYPE_VIS=_LIBCPP_HIDDEN
# --copt=-D_LIBCPP_EXCEPTION_ABI=_LIBCPP_HIDDEN
native.cc_binary(
name = benchmark_name,
srcs = [benchmark_file],
testonly = testonly,
copts = tf_copts(),
linkopts = if_android(["-pie", "-s"]),
deps = [
":" + name,
"//tensorflow/compiler/aot:benchmark",
"//tensorflow/compiler/xla:executable_run_options",
"//third_party/eigen3",
] + if_android([
"//tensorflow/compiler/aot:benchmark_extra_android",
]),
tags = tags,
)
def target_llvm_triple():
"""Returns the target LLVM triple to be used for compiling the target."""
# TODO(toddw): Add target_triple for other targets. For details see:
# http://llvm.org/docs/doxygen/html/Triple_8h_source.html
return select({
"//tensorflow:android_armeabi": "armv5-none-android",
"//tensorflow:android_arm": "armv7-none-android",
"//tensorflow:android_arm64": "aarch64-none-android",
"//tensorflow:android_x86": "i686-none-android",
"//tensorflow:ios": "arm64-none-ios",
"//tensorflow:ios_x86_64": "x86_64-apple-ios",
"//tensorflow:linux_ppc64le": "ppc64le-ibm-linux-gnu",
"//tensorflow:macos": "x86_64-none-darwin",
"//tensorflow:windows": "x86_64-none-windows",
"//tensorflow:linux_s390x": "systemz-none-linux-gnu",
"//conditions:default": "x86_64-pc-linux",
})
| 40.804545
| 85
| 0.58104
|
10b6cb3d2da7857a2b193c7e1c4a88244ac992e8
| 18,609
|
py
|
Python
|
model.py
|
18wh1a0590/NLP-Text-Generation
|
1a4115875d6898d7491953cffded6153cb9ff221
|
[
"MIT"
] | 25
|
2021-06-12T11:38:07.000Z
|
2022-03-17T13:32:02.000Z
|
model.py
|
18wh1a0590/NLP-Text-Generation
|
1a4115875d6898d7491953cffded6153cb9ff221
|
[
"MIT"
] | 3
|
2021-07-18T19:20:41.000Z
|
2022-03-09T09:35:37.000Z
|
model.py
|
18wh1a0590/NLP-Text-Generation
|
1a4115875d6898d7491953cffded6153cb9ff221
|
[
"MIT"
] | 6
|
2021-07-27T15:08:32.000Z
|
2022-02-18T19:01:35.000Z
|
import torch
from torch.functional import norm
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
import math
from itertools import combinations
from torch.nn.init import xavier_normal_
from torch.nn.modules.activation import MultiheadAttention
from torch.autograd import Variable
import torchvision.models as models
from utils import extract_class_indices, cos_sim
from einops import rearrange
class CNN_FSHead(nn.Module):
"""
Base class which handles a few-shot method. Contains a resnet backbone which computes features.
"""
def __init__(self, args):
super(CNN_FSHead, self).__init__()
self.train()
self.args = args
last_layer_idx = -1
if self.args.backbone == "resnet18":
backbone = models.resnet18(pretrained=True)
elif self.args.backbone == "resnet34":
backbone = models.resnet34(pretrained=True)
elif self.args.backbone == "resnet50":
backbone = models.resnet50(pretrained=True)
if self.args.pretrained_backbone is not None:
checkpoint = torch.load(self.args.pretrained_backbone)
backbone.load_state_dict(checkpoint)
self.backbone = nn.Sequential(*list(backbone.children())[:last_layer_idx])
def get_feats(self, support_images, target_images):
"""
Takes in images from the support set and query video and returns CNN features.
"""
support_features = self.backbone(support_images).squeeze()
target_features = self.backbone(target_images).squeeze()
dim = int(support_features.shape[1])
support_features = support_features.reshape(-1, self.args.seq_len, dim)
target_features = target_features.reshape(-1, self.args.seq_len, dim)
return support_features, target_features
def forward(self, support_images, support_labels, target_images):
"""
Should return a dict containing logits which are required for computing accuracy. Dict can also contain
other info needed to compute the loss. E.g. inter class distances.
"""
raise NotImplementedError
def distribute_model(self):
"""
Use to split the backbone evenly over all GPUs. Modify if you have other components
"""
if self.args.num_gpus > 1:
self.backbone.cuda(0)
self.backbone = torch.nn.DataParallel(self.backbone, device_ids=[i for i in range(0, self.args.num_gpus)])
def loss(self, task_dict, model_dict):
"""
Takes in a the task dict containing labels etc.
Takes in the model output dict, which contains "logits", as well as any other info needed to compute the loss.
Default is cross entropy loss.
"""
return F.cross_entropy(model_dict["logits"], task_dict["target_labels"].long())
class PositionalEncoding(nn.Module):
"""
Positional encoding from the Transformer paper.
"""
def __init__(self, d_model, dropout, max_len=5000, pe_scale_factor=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.pe_scale_factor = pe_scale_factor
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * -(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term) * self.pe_scale_factor
pe[:, 1::2] = torch.cos(position * div_term) * self.pe_scale_factor
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)], requires_grad=False)
return self.dropout(x)
class TemporalCrossTransformer(nn.Module):
"""
A temporal cross transformer for a single tuple cardinality. E.g. pairs or triples.
"""
def __init__(self, args, temporal_set_size=3):
super(TemporalCrossTransformer, self).__init__()
self.args = args
self.temporal_set_size = temporal_set_size
max_len = int(self.args.seq_len * 1.5)
self.pe = PositionalEncoding(self.args.trans_linear_in_dim, self.args.trans_dropout, max_len=max_len)
self.k_linear = nn.Linear(self.args.trans_linear_in_dim * temporal_set_size, self.args.trans_linear_out_dim)#.cuda()
self.v_linear = nn.Linear(self.args.trans_linear_in_dim * temporal_set_size, self.args.trans_linear_out_dim)#.cuda()
self.norm_k = nn.LayerNorm(self.args.trans_linear_out_dim)
self.norm_v = nn.LayerNorm(self.args.trans_linear_out_dim)
self.class_softmax = torch.nn.Softmax(dim=1)
# generate all tuples
frame_idxs = [i for i in range(self.args.seq_len)]
frame_combinations = combinations(frame_idxs, temporal_set_size)
self.tuples = nn.ParameterList([nn.Parameter(torch.tensor(comb), requires_grad=False) for comb in frame_combinations])
self.tuples_len = len(self.tuples)
def forward(self, support_set, support_labels, queries):
n_queries = queries.shape[0]
n_support = support_set.shape[0]
# static pe
support_set = self.pe(support_set)
queries = self.pe(queries)
# construct new queries and support set made of tuples of images after pe
s = [torch.index_select(support_set, -2, p).reshape(n_support, -1) for p in self.tuples]
q = [torch.index_select(queries, -2, p).reshape(n_queries, -1) for p in self.tuples]
support_set = torch.stack(s, dim=-2)
queries = torch.stack(q, dim=-2)
# apply linear maps
support_set_ks = self.k_linear(support_set)
queries_ks = self.k_linear(queries)
support_set_vs = self.v_linear(support_set)
queries_vs = self.v_linear(queries)
# apply norms where necessary
mh_support_set_ks = self.norm_k(support_set_ks)
mh_queries_ks = self.norm_k(queries_ks)
mh_support_set_vs = support_set_vs
mh_queries_vs = queries_vs
unique_labels = torch.unique(support_labels)
# init tensor to hold distances between every support tuple and every target tuple
all_distances_tensor = torch.zeros(n_queries, self.args.way, device=queries.device)
for label_idx, c in enumerate(unique_labels):
# select keys and values for just this class
class_k = torch.index_select(mh_support_set_ks, 0, extract_class_indices(support_labels, c))
class_v = torch.index_select(mh_support_set_vs, 0, extract_class_indices(support_labels, c))
k_bs = class_k.shape[0]
class_scores = torch.matmul(mh_queries_ks.unsqueeze(1), class_k.transpose(-2,-1)) / math.sqrt(self.args.trans_linear_out_dim)
# reshape etc. to apply a softmax for each query tuple
class_scores = class_scores.permute(0,2,1,3)
class_scores = class_scores.reshape(n_queries, self.tuples_len, -1)
class_scores = [self.class_softmax(class_scores[i]) for i in range(n_queries)]
class_scores = torch.cat(class_scores)
class_scores = class_scores.reshape(n_queries, self.tuples_len, -1, self.tuples_len)
class_scores = class_scores.permute(0,2,1,3)
# get query specific class prototype
query_prototype = torch.matmul(class_scores, class_v)
query_prototype = torch.sum(query_prototype, dim=1)
# calculate distances from queries to query-specific class prototypes
diff = mh_queries_vs - query_prototype
norm_sq = torch.norm(diff, dim=[-2,-1])**2
distance = torch.div(norm_sq, self.tuples_len)
# multiply by -1 to get logits
distance = distance * -1
c_idx = c.long()
all_distances_tensor[:,c_idx] = distance
return_dict = {'logits': all_distances_tensor}
return return_dict
class CNN_TRX(CNN_FSHead):
"""
Backbone connected to Temporal Cross Transformers of multiple cardinalities.
"""
def __init__(self, args):
super(CNN_TRX, self).__init__(args)
#fill default args
self.args.trans_linear_out_dim = 1152
self.args.temp_set = [2,3]
self.args.trans_dropout = 0.1
self.transformers = nn.ModuleList([TemporalCrossTransformer(args, s) for s in args.temp_set])
def forward(self, support_images, support_labels, target_images):
support_features, target_features = self.get_feats(support_images, target_images)
all_logits = [t(support_features, support_labels, target_features)['logits'] for t in self.transformers]
all_logits = torch.stack(all_logits, dim=-1)
sample_logits = all_logits
sample_logits = torch.mean(sample_logits, dim=[-1])
return_dict = {'logits': sample_logits}
return return_dict
def distribute_model(self):
"""
Distributes the CNNs over multiple GPUs. Leaves TRX on GPU 0.
:return: Nothing
"""
if self.args.num_gpus > 1:
self.backbone.cuda(0)
self.backbone = torch.nn.DataParallel(self.backbone, device_ids=[i for i in range(0, self.args.num_gpus)])
self.transformers.cuda(0)
def OTAM_cum_dist(dists, lbda=0.1):
"""
Calculates the OTAM distances for sequences in one direction (e.g. query to support).
:input: Tensor with frame similarity scores of shape [n_queries, n_support, query_seq_len, support_seq_len]
TODO: clearn up if possible - currently messy to work with pt1.8. Possibly due to stack operation?
"""
dists = F.pad(dists, (1,1), 'constant', 0)
cum_dists = torch.zeros(dists.shape, device=dists.device)
# top row
for m in range(1, dists.shape[3]):
# cum_dists[:,:,0,m] = dists[:,:,0,m] - lbda * torch.log( torch.exp(- cum_dists[:,:,0,m-1]))
# paper does continuous relaxation of the cum_dists entry, but it trains faster without, so using the simpler version for now:
cum_dists[:,:,0,m] = dists[:,:,0,m] + cum_dists[:,:,0,m-1]
# remaining rows
for l in range(1,dists.shape[2]):
#first non-zero column
cum_dists[:,:,l,1] = dists[:,:,l,1] - lbda * torch.log( torch.exp(- cum_dists[:,:,l-1,0] / lbda) + torch.exp(- cum_dists[:,:,l-1,1] / lbda) + torch.exp(- cum_dists[:,:,l,0] / lbda) )
#middle columns
for m in range(2,dists.shape[3]-1):
cum_dists[:,:,l,m] = dists[:,:,l,m] - lbda * torch.log( torch.exp(- cum_dists[:,:,l-1,m-1] / lbda) + torch.exp(- cum_dists[:,:,l,m-1] / lbda ) )
#last column
#cum_dists[:,:,l,-1] = dists[:,:,l,-1] - lbda * torch.log( torch.exp(- cum_dists[:,:,l-1,-2] / lbda) + torch.exp(- cum_dists[:,:,l,-2] / lbda) )
cum_dists[:,:,l,-1] = dists[:,:,l,-1] - lbda * torch.log( torch.exp(- cum_dists[:,:,l-1,-2] / lbda) + torch.exp(- cum_dists[:,:,l-1,-1] / lbda) + torch.exp(- cum_dists[:,:,l,-2] / lbda) )
return cum_dists[:,:,-1,-1]
class CNN_OTAM(CNN_FSHead):
"""
OTAM with a CNN backbone.
"""
def __init__(self, args):
super(CNN_OTAM, self).__init__(args)
def forward(self, support_images, support_labels, target_images):
support_features, target_features = self.get_feats(support_images, target_images)
unique_labels = torch.unique(support_labels)
n_queries = target_features.shape[0]
n_support = support_features.shape[0]
support_features = rearrange(support_features, 'b s d -> (b s) d')
target_features = rearrange(target_features, 'b s d -> (b s) d')
frame_sim = cos_sim(target_features, support_features)
frame_dists = 1 - frame_sim
dists = rearrange(frame_dists, '(tb ts) (sb ss) -> tb sb ts ss', tb = n_queries, sb = n_support)
# calculate query -> support and support -> query
cum_dists = OTAM_cum_dist(dists) + OTAM_cum_dist(rearrange(dists, 'tb sb ts ss -> tb sb ss ts'))
class_dists = [torch.mean(torch.index_select(cum_dists, 1, extract_class_indices(support_labels, c)), dim=1) for c in unique_labels]
class_dists = torch.stack(class_dists)
class_dists = rearrange(class_dists, 'c q -> q c')
return_dict = {'logits': - class_dists}
return return_dict
def loss(self, task_dict, model_dict):
return F.cross_entropy(model_dict["logits"], task_dict["target_labels"].long())
class CNN_TSN(CNN_FSHead):
"""
TSN with a CNN backbone.
Either cosine similarity or negative norm squared distance.
Use mean distance from query to class videos.
"""
def __init__(self, args):
super(CNN_TSN, self).__init__(args)
self.norm_sq_dist = False
def forward(self, support_images, support_labels, target_images):
support_features, target_features = self.get_feats(support_images, target_images)
unique_labels = torch.unique(support_labels)
support_features = torch.mean(support_features, dim=1)
target_features = torch.mean(target_features, dim=1)
if self.norm_sq_dist:
class_prototypes = [torch.mean(torch.index_select(support_features, 0, extract_class_indices(support_labels, c)), dim=0) for c in unique_labels]
class_prototypes = torch.stack(class_prototypes)
diffs = [target_features - class_prototypes[i] for i in unique_labels]
diffs = torch.stack(diffs)
norm_sq = torch.norm(diffs, dim=[-1])**2
distance = - rearrange(norm_sq, 'c q -> q c')
return_dict = {'logits': distance}
else:
class_sim = cos_sim(target_features, support_features)
class_sim = [torch.mean(torch.index_select(class_sim, 1, extract_class_indices(support_labels, c)), dim=1) for c in unique_labels]
class_sim = torch.stack(class_sim)
class_sim = rearrange(class_sim, 'c q -> q c')
return_dict = {'logits': class_sim}
return return_dict
class CNN_PAL(CNN_FSHead):
"""
PAL with a CNN backbone. Cosine similarity as distance measure.
"""
def __init__(self, args):
super(CNN_PAL, self).__init__(args)
self.mha = MultiheadAttention(embed_dim=self.args.trans_linear_in_dim, num_heads=1, dropout=0)
self.cos_sim = torch.nn.CosineSimilarity()
self.loss_lambda = 1
def forward(self, support_images, support_labels, target_images):
support_features, target_features = self.get_feats(support_images, target_images)
unique_labels = torch.unique(support_labels)
support_features = torch.mean(support_features, dim=1)
target_features = torch.mean(target_features, dim=1)
support_features = rearrange(support_features, 'n d -> n 1 d')
target_features = rearrange(target_features, 'n d -> n 1 d')
support_features = support_features + self.mha(support_features, support_features, support_features)[0]
target_features = target_features + self.mha(target_features, support_features, support_features)[0]
support_features = rearrange(support_features, 'b 1 d -> b d')
target_features = rearrange(target_features, 'b 1 d -> b d')
prototypes = [torch.mean(torch.index_select(support_features, 0, extract_class_indices(support_labels, c)), dim=0) for c in unique_labels]
prototypes = torch.stack(prototypes)
q_s_sim = cos_sim(target_features, prototypes)
return_dict = {'logits': q_s_sim}
return return_dict
def loss(self, task_dict, model_dict):
"""
Computes cross entropy loss on the logits, and the additional loss between the queries and their correct classes.
"""
q_s_sim = model_dict["logits"]
l_meta = F.cross_entropy(q_s_sim, task_dict["target_labels"].long())
pcc_q_s_sim = q_s_sim
pcc_q_s_sim = torch.sigmoid(q_s_sim)
unique_labels = torch.unique(task_dict["support_labels"])
total_q_c_sim = torch.sum(pcc_q_s_sim, dim=0) + 0.1
q_c_sim = [torch.sum(torch.index_select(pcc_q_s_sim, 0, extract_class_indices(task_dict["target_labels"], c)), dim=0) for c in unique_labels]
q_c_sim = torch.stack(q_c_sim)
q_c_sim = torch.diagonal(q_c_sim)
q_c_sim = torch.div(q_c_sim, total_q_c_sim)
l_pcc = - torch.mean(torch.log(q_c_sim))
return l_meta + self.loss_lambda * l_pcc
if __name__ == "__main__":
class ArgsObject(object):
def __init__(self):
self.trans_linear_in_dim = 512
self.trans_linear_out_dim = 128
self.way = 4
self.shot = 3
self.query_per_class = 2
self.trans_dropout = 0.1
self.seq_len = 5
self.img_size = 84
self.backbone = "resnet18"
self.num_gpus = 1
self.temp_set = [2,3]
self.pretrained_backbone = None
args = ArgsObject()
torch.manual_seed(0)
device = 'cpu'
# device = 'cuda:0'
# model = CNN_TRX(args).to(device)
model = CNN_OTAM(args).to(device)
# model = CNN_TSN(args).to(device)
# model = CNN_PAL(args).to(device)
support_imgs = torch.rand(args.way * args.shot * args.seq_len,3, args.img_size, args.img_size).to(device)
target_imgs = torch.rand(args.way * args.query_per_class * args.seq_len ,3, args.img_size, args.img_size).to(device)
support_labels = torch.tensor([n for n in range(args.way)] * args.shot).to(device)
target_labels = torch.tensor([n for n in range(args.way)] * args.query_per_class).to(device)
print("Support images input shape: {}".format(support_imgs.shape))
print("Target images input shape: {}".format(target_imgs.shape))
print("Support labels input shape: {}".format(support_imgs.shape))
task_dict = {}
task_dict["support_set"] = support_imgs
task_dict["support_labels"] = support_labels
task_dict["target_set"] = target_imgs
task_dict["target_labels"] = target_labels
model_dict = model(support_imgs, support_labels, target_imgs)
print("Model returns the distances from each query to each class prototype. Use these as logits. Shape: {}".format(model_dict['logits'].shape))
loss = model.loss(task_dict, model_dict)
| 40.542484
| 195
| 0.653501
|
441b2a22730130d816811a6f411973bc2e8a43aa
| 43
|
py
|
Python
|
Python_Codes_for_BJ/stage03 for문 사용하기/N 찍기.py
|
ch96an/BaekJoonSolution
|
25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33
|
[
"MIT"
] | null | null | null |
Python_Codes_for_BJ/stage03 for문 사용하기/N 찍기.py
|
ch96an/BaekJoonSolution
|
25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33
|
[
"MIT"
] | null | null | null |
Python_Codes_for_BJ/stage03 for문 사용하기/N 찍기.py
|
ch96an/BaekJoonSolution
|
25594fda5ba1c0c4d26ff0828ec8dcf2f6572d33
|
[
"MIT"
] | null | null | null |
for i in range(1,int(input())+1):
print(i)
| 21.5
| 33
| 0.627907
|
238ee3a7c703fe6e5d3d7c31bfbfbc24bdafbc5d
| 13,505
|
py
|
Python
|
tests/flow/test_function_calls.py
|
TatianaJin/RedisGraph
|
a03a79f95f12d1376d848b3963d4a729019b6b0b
|
[
"MIT",
"Ruby",
"ISC",
"BSD-3-Clause"
] | 1
|
2021-01-26T21:02:32.000Z
|
2021-01-26T21:02:32.000Z
|
tests/flow/test_function_calls.py
|
danitseitlin/RedisGraph
|
6e7c5cc2c06574292b88f905d271e83c95b7ee3b
|
[
"MIT",
"Ruby",
"ISC",
"BSD-3-Clause"
] | null | null | null |
tests/flow/test_function_calls.py
|
danitseitlin/RedisGraph
|
6e7c5cc2c06574292b88f905d271e83c95b7ee3b
|
[
"MIT",
"Ruby",
"ISC",
"BSD-3-Clause"
] | null | null | null |
import redis
import os
import sys
from RLTest import Env
from base import FlowTestsBase
from redisgraph import Graph, Node, Edge
graph = None
redis_con = None
people = ["Roi", "Alon", "Ailon", "Boaz"]
class testFunctionCallsFlow(FlowTestsBase):
def __init__(self):
self.env = Env()
global graph
global redis_con
redis_con = self.env.getConnection()
graph = Graph("G", redis_con)
self.populate_graph()
def populate_graph(self):
global graph
nodes = {}
# Create entities
for idx, p in enumerate(people):
node = Node(label="person", properties={"name": p, "val": idx})
graph.add_node(node)
nodes[p] = node
# Fully connected graph
for src in nodes:
for dest in nodes:
if src != dest:
edge = Edge(nodes[src], "know", nodes[dest])
graph.add_edge(edge)
for src in nodes:
for dest in nodes:
if src != dest:
edge = Edge(nodes[src], "works_with", nodes[dest])
graph.add_edge(edge)
graph.commit()
query = """MATCH (a)-[:know]->(b) CREATE (a)-[:know]->(b)"""
graph.query(query)
def expect_type_error(self, query):
try:
graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting a type error.
self.env.assertIn("Type mismatch", e.message)
def expect_error(self, query, expected_err_msg):
try:
graph.query(query)
assert(False)
except redis.exceptions.ResponseError as e:
# Expecting a type error.
self.env.assertIn(expected_err_msg, e.message)
# Validate capturing of errors prior to query execution.
def test01_compile_time_errors(self):
query = """RETURN toUpper(5)"""
self.expect_type_error(query)
query = """RETURN 'a' * 2"""
self.expect_type_error(query)
query = """RETURN max(1 + min(2))"""
self.expect_error(query, "Can't use aggregate functions inside of aggregate functions")
def test02_boolean_comparisons(self):
query = """RETURN true = 5"""
actual_result = graph.query(query)
expected_result = [[False]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """RETURN true <> 'str'"""
actual_result = graph.query(query)
expected_result = [[True]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """RETURN 'anything' <> NULL"""
actual_result = graph.query(query)
expected_result = [[None]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """RETURN 'anything' = NULL"""
actual_result = graph.query(query)
expected_result = [[None]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """RETURN 10 >= 1.5"""
actual_result = graph.query(query)
expected_result = [[True]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """RETURN -1 < 1"""
actual_result = graph.query(query)
expected_result = [[True]]
self.env.assertEquals(actual_result.result_set, expected_result)
def test03_boolean_errors(self):
query = """RETURN 'str' < 5.5"""
self.expect_type_error(query)
query = """RETURN true > 5"""
self.expect_type_error(query)
query = """MATCH (a) RETURN a < 'anything' LIMIT 1"""
self.expect_type_error(query)
def test04_entity_functions(self):
query = "RETURN ID(5)"
self.expect_type_error(query)
query = "MATCH (a) RETURN ID(a) ORDER BY ID(a) LIMIT 3"
actual_result = graph.query(query)
expected_result = [[0], [1], [2]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = "MATCH (a)-[e]->() RETURN ID(e) ORDER BY ID(e) LIMIT 3"
actual_result = graph.query(query)
expected_result = [[0], [1], [2]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = "RETURN EXISTS(null)"
actual_result = graph.query(query)
expected_result = [[False]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = "RETURN EXISTS('anything')"
actual_result = graph.query(query)
expected_result = [[True]]
self.env.assertEquals(actual_result.result_set, expected_result)
def test07_nonmap_errors(self):
query = """MATCH (a) WITH a.name AS scalar RETURN scalar.name"""
self.expect_type_error(query)
def test08_apply_all_function(self):
query = "MATCH () RETURN COUNT(*)"
actual_result = graph.query(query)
expected_result = [[4]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = "UNWIND [1, 2] AS a RETURN COUNT(*)"
actual_result = graph.query(query)
expected_result = [[2]]
self.env.assertEquals(actual_result.result_set, expected_result)
def test09_static_aggregation(self):
query = "RETURN count(*)"
actual_result = graph.query(query)
expected_result = [[1]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = "RETURN max(2)"
actual_result = graph.query(query)
expected_result = [[2]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = "RETURN min(3)"
actual_result = graph.query(query)
expected_result = [[3]]
self.env.assertEquals(actual_result.result_set, expected_result)
def test10_modulo_inputs(self):
# Validate modulo with integer inputs.
query = "RETURN 5 % 2"
actual_result = graph.query(query)
expected_result = [[1]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Validate modulo with a floating-point dividend.
query = "RETURN 5.5 % 2"
actual_result = graph.query(query)
expected_result = [[1.5]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Validate modulo with a floating-point divisor.
query = "RETURN 5 % 2.5"
actual_result = graph.query(query)
expected_result = [[0]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Validate modulo with both a floating-point dividen and a floating-point divisor.
query = "RETURN 5.5 % 2.5"
actual_result = graph.query(query)
expected_result = [[0.5]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Validate modulo with negative integer inputs.
query = "RETURN -5 % -2"
actual_result = graph.query(query)
expected_result = [[-1]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Validate modulo with negative floating-point inputs.
query = "RETURN -5.5 % -2.5"
actual_result = graph.query(query)
expected_result = [[-0.5]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Aggregate functions should handle null inputs appropriately.
def test11_null_aggregate_function_inputs(self):
# SUM should sum all non-null inputs.
query = """UNWIND [1, NULL, 3] AS a RETURN sum(a)"""
actual_result = graph.query(query)
expected_result = [[4]]
self.env.assertEquals(actual_result.result_set, expected_result)
# SUM should return 0 given a fully NULL input.
query = """WITH NULL AS a RETURN sum(a)"""
actual_result = graph.query(query)
expected_result = [[0]]
self.env.assertEquals(actual_result.result_set, expected_result)
# COUNT should count all non-null inputs.
query = """UNWIND [1, NULL, 3] AS a RETURN count(a)"""
actual_result = graph.query(query)
expected_result = [[2]]
self.env.assertEquals(actual_result.result_set, expected_result)
# COUNT should return 0 given a fully NULL input.
query = """WITH NULL AS a RETURN count(a)"""
actual_result = graph.query(query)
expected_result = [[0]]
self.env.assertEquals(actual_result.result_set, expected_result)
# COLLECT should ignore null inputs.
query = """UNWIND [1, NULL, 3] AS a RETURN collect(a)"""
actual_result = graph.query(query)
expected_result = [[[1, 3]]]
self.env.assertEquals(actual_result.result_set, expected_result)
# COLLECT should return an empty array on all null inputs.
query = """WITH NULL AS a RETURN collect(a)"""
actual_result = graph.query(query)
expected_result = [[[]]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Verify that nested functions that perform heap allocations return properly.
def test12_nested_heap_functions(self):
query = """MATCH p = (n) WITH head(nodes(p)) AS node RETURN node.name ORDER BY node.name"""
actual_result = graph.query(query)
expected_result = [['Ailon'],
['Alon'],
['Boaz'],
['Roi']]
self.env.assertEquals(actual_result.result_set, expected_result)
# CASE...WHEN statements should properly handle NULL, false, and true evaluations.
def test13_case_when_inputs(self):
# Simple case form: single value evaluation.
query = """UNWIND [NULL, true, false] AS v RETURN v, CASE v WHEN true THEN v END"""
actual_result = graph.query(query)
expected_result = [[None, None],
[True, True],
[False, None]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """UNWIND [NULL, true, false] AS v RETURN v, CASE v WHEN true THEN v WHEN false THEN v END"""
actual_result = graph.query(query)
expected_result = [[None, None],
[True, True],
[False, False]]
self.env.assertEquals(actual_result.result_set, expected_result)
# Generic case form: evaluation for each case.
query = """UNWIND [NULL, true, false] AS v RETURN v, CASE WHEN v THEN v END"""
actual_result = graph.query(query)
# Only the true value should return non-NULL.
expected_result = [[None, None],
[True, True],
[False, None]]
self.env.assertEquals(actual_result.result_set, expected_result)
query = """UNWIND [NULL, true, false] AS v RETURN v, CASE WHEN v IS NOT NULL THEN v END"""
actual_result = graph.query(query)
# The true and false values should both return non-NULL.
expected_result = [[None, None],
[True, True],
[False, False]]
self.env.assertEquals(actual_result.result_set, expected_result)
# CASE...WHEN statements should manage allocated values properly.
def test14_case_when_memory_management(self):
# Simple case form: single value evaluation.
query = """WITH 'A' AS a WITH CASE a WHEN 'A' THEN toString(a) END AS key RETURN toLower(key)"""
actual_result = graph.query(query)
expected_result = [['a']]
self.env.assertEquals(actual_result.result_set, expected_result)
# Generic case form: evaluation for each case.
query = """WITH 'A' AS a WITH CASE WHEN true THEN toString(a) END AS key RETURN toLower(key)"""
actual_result = graph.query(query)
expected_result = [['a']]
self.env.assertEquals(actual_result.result_set, expected_result)
def test15_aggregate_error_handling(self):
functions = ["avg",
"collect",
"count",
"max",
"min",
"sum",
"percentileDisc",
"percentileCont",
"stDev"]
# Test all functions for invalid argument counts.
for function in functions:
query = """UNWIND range(0, 10) AS val RETURN %s(val, val, val)""" % (function)
self.expect_error(query, "Received 3 arguments")
# Test numeric functions for invalid input types.
numeric_functions = ["avg",
"sum",
"stDev"]
for function in numeric_functions:
query = """UNWIND ['a', 'b', 'c'] AS val RETURN %s(val)""" % (function)
self.expect_type_error(query)
# Test invalid numeric input for percentile function.
query = """UNWIND range(0, 10) AS val RETURN percentileDisc(val, -1)"""
self.expect_error(query, "must be a number in the range 0.0 to 1.0")
# startNode and endNode calls should return the appropriate nodes.
def test16_edge_endpoints(self):
query = """MATCH (a)-[e]->(b) RETURN a.name, startNode(e).name, b.name, endNode(e).name"""
actual_result = graph.query(query)
for row in actual_result.result_set:
self.env.assertEquals(row[0], row[1])
self.env.assertEquals(row[2], row[3])
| 39.955621
| 109
| 0.606886
|
a620a35ba2096f9ff489fd28d358ca7a4712e612
| 305
|
py
|
Python
|
data/multilingual/Latn.NDS/Serif_12/pdf_to_json_test_Latn.NDS_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | 1
|
2021-09-19T19:47:35.000Z
|
2021-09-19T19:47:35.000Z
|
data/multilingual/Latn.NDS/Serif_12/pdf_to_json_test_Latn.NDS_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
data/multilingual/Latn.NDS/Serif_12/pdf_to_json_test_Latn.NDS_Serif_12.py
|
antoinecarme/pdf_to_json_tests
|
d57a024fde862e698d916a1178f285883d7a3b2f
|
[
"BSD-3-Clause"
] | null | null | null |
import pdf_to_json as p2j
import json
url = "file:data/multilingual/Latn.NDS/Serif_12/udhr_Latn.NDS_Serif_12.pdf"
lConverter = p2j.pdf_to_json.pdf_to_json_converter()
lConverter.mImageHashOnly = True
lDict = lConverter.convert(url)
print(json.dumps(lDict, indent=4, ensure_ascii=False, sort_keys=True))
| 30.5
| 75
| 0.813115
|
190fc37d153e9a196b62ac804907ed1e180bd8a6
| 72,149
|
py
|
Python
|
show/muxcable.py
|
Yakiv-Huryk/sonic-utilities
|
a8a7edbb4a0bc1c743dc9f963767520188b593a9
|
[
"Apache-2.0"
] | null | null | null |
show/muxcable.py
|
Yakiv-Huryk/sonic-utilities
|
a8a7edbb4a0bc1c743dc9f963767520188b593a9
|
[
"Apache-2.0"
] | null | null | null |
show/muxcable.py
|
Yakiv-Huryk/sonic-utilities
|
a8a7edbb4a0bc1c743dc9f963767520188b593a9
|
[
"Apache-2.0"
] | null | null | null |
import json
import sys
import time
import click
import re
import utilities_common.cli as clicommon
from natsort import natsorted
from collections import OrderedDict
from operator import itemgetter
from sonic_py_common import multi_asic
from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector
from swsscommon import swsscommon
from tabulate import tabulate
from utilities_common import platform_sfputil_helper
platform_sfputil = None
REDIS_TIMEOUT_MSECS = 0
SELECT_TIMEOUT = 1000
# The empty namespace refers to linux host namespace.
EMPTY_NAMESPACE = ''
CONFIG_SUCCESSFUL = 0
CONFIG_FAIL = 1
EXIT_FAIL = 1
EXIT_SUCCESS = 0
STATUS_FAIL = 1
STATUS_SUCCESSFUL = 0
VENDOR_NAME = "Credo"
VENDOR_MODEL_REGEX = re.compile(r"CAC\w{3}321P2P\w{2}MS")
def db_connect(db_name, namespace=EMPTY_NAMESPACE):
return swsscommon.DBConnector(db_name, REDIS_TIMEOUT_MSECS, True, namespace)
def delete_all_keys_in_db_table(db_type, table_name):
redis_db = {}
table = {}
table_keys = {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
redis_db[asic_id] = db_connect(db_type, namespace)
table[asic_id] = swsscommon.Table(redis_db[asic_id], table_name)
table_keys[asic_id] = table[asic_id].getKeys()
for key in table_keys[asic_id]:
table[asic_id]._del(key)
target_dict = { "NIC":"0",
"TORA":"1",
"TORB":"2",
"LOCAL":"3"}
def parse_target(target):
return target_dict.get(target, None)
def check_port_in_mux_cable_table(port):
per_npu_configdb = {}
mux_tbl_cfg_db = {}
port_mux_tbl_keys = {}
# Getting all front asic namespace and correspding config and state DB connector
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
# TO-DO replace the macros with correct swsscommon names
per_npu_configdb[asic_id] = ConfigDBConnector(use_unix_socket_path=False, namespace=namespace)
per_npu_configdb[asic_id].connect()
mux_tbl_cfg_db[asic_id] = per_npu_configdb[asic_id].get_table("MUX_CABLE")
port_mux_tbl_keys[asic_id] = mux_tbl_cfg_db[asic_id].keys()
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
click.echo("Got invalid asic index for port {}, cant retrieve mux cable table entries".format(port))
return False
if port in port_mux_tbl_keys[asic_index]:
return True
return False
def get_response_for_version(port, mux_info_dict):
state_db = {}
xcvrd_show_fw_res_tbl = {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = db_connect("STATE_DB", namespace)
xcvrd_show_fw_res_tbl[asic_id] = swsscommon.Table(state_db[asic_id], "XCVRD_SHOW_FW_RES")
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
click.echo("ERR: This is not a valid port, valid ports ({})".format(", ".join(logical_port_list)))
rc = EXIT_FAIL
res_dict[1] = rc
return mux_info_dict
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port))
rc = CONFIG_FAIL
res_dict[1] = rc
return mux_info_dict
(status, fvp) = xcvrd_show_fw_res_tbl[asic_index].get(port)
res_dir = dict(fvp)
mux_info_dict["version_nic_active"] = res_dir.get("version_nic_active", None)
mux_info_dict["version_nic_inactive"] = res_dir.get("version_nic_inactive", None)
mux_info_dict["version_nic_next"] = res_dir.get("version_nic_next", None)
mux_info_dict["version_peer_active"] = res_dir.get("version_peer_active", None)
mux_info_dict["version_peer_inactive"] = res_dir.get("version_peer_inactive", None)
mux_info_dict["version_peer_next"] = res_dir.get("version_peer_next", None)
mux_info_dict["version_self_active"] = res_dir.get("version_self_active", None)
mux_info_dict["version_self_inactive"] = res_dir.get("version_self_inactive", None)
mux_info_dict["version_self_next"] = res_dir.get("version_self_next", None)
return mux_info_dict
def get_event_logs(port, res_dict, mux_info_dict):
state_db = {}
xcvrd_show_fw_res_tbl = {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = db_connect("STATE_DB", namespace)
xcvrd_show_fw_res_tbl[asic_id] = swsscommon.Table(state_db[asic_id], "XCVRD_EVENT_LOG_RES")
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
click.echo("ERR: This is not a valid port, valid ports ({})".format(", ".join(logical_port_list)))
rc = EXIT_FAIL
res_dict[1] = rc
return mux_info_dict
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port))
rc = CONFIG_FAIL
res_dict[1] = rc
return mux_info_dict
(status, fvp) = xcvrd_show_fw_res_tbl[asic_index].get(port)
res_dir = dict(fvp)
for key, value in res_dir.items():
mux_info_dict[key] = value;
return mux_info_dict
def get_result(port, res_dict, cmd ,result, table_name):
state_db = {}
xcvrd_show_fw_res_tbl = {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = db_connect("STATE_DB", namespace)
xcvrd_show_fw_res_tbl[asic_id] = swsscommon.Table(state_db[asic_id], table_name)
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
click.echo("ERR: This is not a valid port, valid ports ({})".format(", ".join(logical_port_list)))
rc = EXIT_FAIL
res_dict[1] = rc
return result
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port))
rc = CONFIG_FAIL
res_dict[1] = rc
return result
(status, fvp) = xcvrd_show_fw_res_tbl[asic_index].get(port)
res_dir = dict(fvp)
return res_dir
def update_and_get_response_for_xcvr_cmd(cmd_name, rsp_name, exp_rsp, cmd_table_name, cmd_arg_table_name, rsp_table_name ,port, cmd_timeout_secs, param_dict= None, arg=None):
res_dict = {}
state_db, appl_db = {}, {}
firmware_rsp_tbl, firmware_rsp_tbl_keys = {}, {}
firmware_rsp_sub_tbl = {}
firmware_cmd_tbl = {}
firmware_cmd_arg_tbl = {}
CMD_TIMEOUT_SECS = cmd_timeout_secs
time_start = time.time()
sel = swsscommon.Select()
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = db_connect("STATE_DB", namespace)
appl_db[asic_id] = db_connect("APPL_DB", namespace)
firmware_cmd_tbl[asic_id] = swsscommon.Table(appl_db[asic_id], cmd_table_name)
firmware_rsp_sub_tbl[asic_id] = swsscommon.SubscriberStateTable(state_db[asic_id], rsp_table_name)
firmware_rsp_tbl[asic_id] = swsscommon.Table(state_db[asic_id], rsp_table_name)
if cmd_arg_table_name is not None:
firmware_cmd_arg_tbl[asic_id] = swsscommon.Table(appl_db[asic_id], cmd_arg_table_name)
firmware_rsp_tbl_keys[asic_id] = firmware_rsp_tbl[asic_id].getKeys()
for key in firmware_rsp_tbl_keys[asic_id]:
firmware_rsp_tbl[asic_id]._del(key)
sel.addSelectable(firmware_rsp_sub_tbl[asic_id])
rc = CONFIG_FAIL
res_dict[0] = CONFIG_FAIL
res_dict[1] = 'unknown'
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
click.echo("ERR: This is not a valid port, valid ports ({})".format(", ".join(logical_port_list)))
res_dict[0] = rc
return res_dict
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
click.echo("Got invalid asic index for port {}, cant perform firmware cmd".format(port))
res_dict[0] = rc
return res_dict
if arg is None:
cmd_arg = "null"
else:
cmd_arg = str(arg)
if param_dict is not None:
for key, value in param_dict.items():
fvs = swsscommon.FieldValuePairs([(str(key), str(value))])
firmware_cmd_arg_tbl[asic_index].set(port, fvs)
fvs = swsscommon.FieldValuePairs([(cmd_name, cmd_arg)])
firmware_cmd_tbl[asic_index].set(port, fvs)
# Listen indefinitely for changes to the HW_MUX_CABLE_TABLE in the Application DB's
while True:
# Use timeout to prevent ignoring the signals we want to handle
# in signal_handler() (e.g. SIGTERM for graceful shutdown)
(state, selectableObj) = sel.select(SELECT_TIMEOUT)
time_now = time.time()
time_diff = time_now - time_start
if time_diff >= CMD_TIMEOUT_SECS:
return res_dict
if state == swsscommon.Select.TIMEOUT:
# Do not flood log when select times out
continue
if state != swsscommon.Select.OBJECT:
click.echo("sel.select() did not return swsscommon.Select.OBJECT for sonic_y_cable updates")
continue
# Get the redisselect object from selectable object
redisSelectObj = swsscommon.CastSelectableToRedisSelectObj(
selectableObj)
# Get the corresponding namespace from redisselect db connector object
namespace = redisSelectObj.getDbConnector().getNamespace()
asic_index = multi_asic.get_asic_index_from_namespace(namespace)
(port_m, op_m, fvp_m) = firmware_rsp_sub_tbl[asic_index].pop()
if not port_m:
click.echo("Did not receive a port response {}".format(port))
res_dict[1] = 'unknown'
res_dict[0] = CONFIG_FAIL
firmware_rsp_tbl[asic_index]._del(port)
break
if port_m != port:
res_dict[1] = 'unknown'
res_dict[0] = CONFIG_FAIL
firmware_rsp_tbl[asic_index]._del(port)
continue
if fvp_m:
fvp_dict = dict(fvp_m)
if rsp_name in fvp_dict:
# check if xcvrd got a probe command
result = fvp_dict[rsp_name]
res_dict[1] = result
res_dict[0] = 0
else:
res_dict[1] = 'unknown'
res_dict[0] = CONFIG_FAIL
firmware_rsp_tbl[asic_index]._del(port)
break
else:
res_dict[1] = 'unknown'
res_dict[0] = CONFIG_FAIL
firmware_rsp_tbl[asic_index]._del(port)
break
delete_all_keys_in_db_table("STATE_DB", rsp_table_name)
return res_dict
# 'muxcable' command ("show muxcable")
#
@click.group(name='muxcable', cls=clicommon.AliasedGroup)
def muxcable():
"""SONiC command line - 'show muxcable' command"""
global platform_sfputil
# Load platform-specific sfputil class
platform_sfputil_helper.load_platform_sfputil()
# Load port info
platform_sfputil_helper.platform_sfputil_read_porttab_mappings()
platform_sfputil = platform_sfputil_helper.platform_sfputil
def get_value_for_key_in_dict(mdict, port, key, table_name):
value = mdict.get(key, None)
if value is None:
click.echo("could not retrieve key {} value for port {} inside table {}".format(key, port, table_name))
sys.exit(STATUS_FAIL)
return value
def get_value_for_key_in_config_tbl(config_db, port, key, table):
info_dict = {}
info_dict = config_db.get_entry(table, port)
if info_dict is None:
click.echo("could not retrieve key {} value for port {} inside table {}".format(key, port, table))
sys.exit(STATUS_FAIL)
value = get_value_for_key_in_dict(info_dict, port, key, table)
return value
def get_switch_name(config_db):
info_dict = {}
info_dict = config_db.get_entry("DEVICE_METADATA", "localhost")
#click.echo("{} ".format(info_dict))
switch_name = get_value_for_key_in_dict(info_dict, "localhost", "peer_switch", "DEVICE_METADATA")
if switch_name is not None:
return switch_name
else:
click.echo("could not retreive switch name")
sys.exit(STATUS_FAIL)
def create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port):
res_dict = {}
status_value = get_value_for_key_in_dict(muxcable_info_dict[asic_index], port, "state", "MUX_CABLE_TABLE")
port_name = platform_sfputil_helper.get_interface_alias(port, db)
port_status_dict["MUX_CABLE"][port_name] = {}
port_status_dict["MUX_CABLE"][port_name]["STATUS"] = status_value
health_value = get_value_for_key_in_dict(muxcable_health_dict[asic_index], port, "state", "MUX_LINKMGR_TABLE")
port_status_dict["MUX_CABLE"][port_name]["HEALTH"] = health_value
res_dict = get_hwmode_mux_direction_port(db, port)
if res_dict[2] == "False":
hwstatus = "absent"
elif res_dict[1] == "not Y-Cable port":
hwstatus = "not Y-Cable port"
elif res_dict[1] == status_value:
hwstatus = "consistent"
else:
hwstatus = "inconsistent"
port_status_dict["MUX_CABLE"][port_name]["HWSTATUS"] = hwstatus
last_switch_end_time = ""
if "linkmgrd_switch_standby_end" in muxcable_metrics_dict[asic_index]:
last_switch_end_time = muxcable_metrics_dict[asic_index].get("linkmgrd_switch_standby_end")
elif "linkmgrd_switch_active_end" in muxcable_metrics_dict[asic_index]:
last_switch_end_time = muxcable_metrics_dict[asic_index].get("linkmgrd_switch_active_end")
port_status_dict["MUX_CABLE"][port_name]["LAST_SWITCHOVER_TIME"] = last_switch_end_time
def create_table_dump_per_port_status(db, print_data, muxcable_info_dict, muxcable_health_dict, muxcable_metrics_dict, asic_index, port):
print_port_data = []
res_dict = {}
res_dict = get_hwmode_mux_direction_port(db, port)
status_value = get_value_for_key_in_dict(muxcable_info_dict[asic_index], port, "state", "MUX_CABLE_TABLE")
#status_value = get_value_for_key_in_tbl(y_cable_asic_table, port, "status")
health_value = get_value_for_key_in_dict(muxcable_health_dict[asic_index], port, "state", "MUX_LINKMGR_TABLE")
last_switch_end_time = ""
if "linkmgrd_switch_standby_end" in muxcable_metrics_dict[asic_index]:
last_switch_end_time = muxcable_metrics_dict[asic_index].get("linkmgrd_switch_standby_end")
elif "linkmgrd_switch_active_end" in muxcable_metrics_dict[asic_index]:
last_switch_end_time = muxcable_metrics_dict[asic_index].get("linkmgrd_switch_active_end")
port_name = platform_sfputil_helper.get_interface_alias(port, db)
print_port_data.append(port_name)
print_port_data.append(status_value)
print_port_data.append(health_value)
if res_dict[2] == "False":
hwstatus = "absent"
elif res_dict[1] == "not Y-Cable port":
hwstatus = "not Y-Cable port"
elif res_dict[1] == status_value:
hwstatus = "consistent"
else:
hwstatus = "inconsistent"
print_port_data.append(hwstatus)
print_port_data.append(last_switch_end_time)
print_data.append(print_port_data)
def create_table_dump_per_port_config(db ,print_data, per_npu_configdb, asic_id, port):
port_list = []
port_name = platform_sfputil_helper.get_interface_alias(port, db)
port_list.append(port_name)
state_value = get_value_for_key_in_config_tbl(per_npu_configdb[asic_id], port, "state", "MUX_CABLE")
port_list.append(state_value)
ipv4_value = get_value_for_key_in_config_tbl(per_npu_configdb[asic_id], port, "server_ipv4", "MUX_CABLE")
port_list.append(ipv4_value)
ipv6_value = get_value_for_key_in_config_tbl(per_npu_configdb[asic_id], port, "server_ipv6", "MUX_CABLE")
port_list.append(ipv6_value)
print_data.append(port_list)
def create_json_dump_per_port_config(db, port_status_dict, per_npu_configdb, asic_id, port):
state_value = get_value_for_key_in_config_tbl(per_npu_configdb[asic_id], port, "state", "MUX_CABLE")
port_name = platform_sfputil_helper.get_interface_alias(port, db)
port_status_dict["MUX_CABLE"]["PORTS"][port_name] = {"STATE": state_value}
port_status_dict["MUX_CABLE"]["PORTS"][port_name]["SERVER"] = {}
ipv4_value = get_value_for_key_in_config_tbl(per_npu_configdb[asic_id], port, "server_ipv4", "MUX_CABLE")
port_status_dict["MUX_CABLE"]["PORTS"][port_name]["SERVER"]["IPv4"] = ipv4_value
ipv6_value = get_value_for_key_in_config_tbl(per_npu_configdb[asic_id], port, "server_ipv6", "MUX_CABLE")
port_status_dict["MUX_CABLE"]["PORTS"][port_name]["SERVER"]["IPv6"] = ipv6_value
@muxcable.command()
@click.argument('port', required=False, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def status(db, port, json_output):
"""Show muxcable status information"""
port = platform_sfputil_helper.get_interface_name(port, db)
port_table_keys = {}
port_health_table_keys = {}
port_metrics_table_keys = {}
per_npu_statedb = {}
muxcable_info_dict = {}
muxcable_health_dict = {}
muxcable_metrics_dict = {}
# Getting all front asic namespace and correspding config and state DB connector
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
per_npu_statedb[asic_id] = SonicV2Connector(use_unix_socket_path=False, namespace=namespace)
per_npu_statedb[asic_id].connect(per_npu_statedb[asic_id].STATE_DB)
port_table_keys[asic_id] = per_npu_statedb[asic_id].keys(
per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|*')
port_health_table_keys[asic_id] = per_npu_statedb[asic_id].keys(
per_npu_statedb[asic_id].STATE_DB, 'MUX_LINKMGR_TABLE|*')
port_metrics_table_keys[asic_id] = per_npu_statedb[asic_id].keys(
per_npu_statedb[asic_id].STATE_DB, 'MUX_METRICS_TABLE|*')
if port is not None:
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name))
sys.exit(STATUS_FAIL)
muxcable_info_dict[asic_index] = per_npu_statedb[asic_index].get_all(
per_npu_statedb[asic_index].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port))
muxcable_health_dict[asic_index] = per_npu_statedb[asic_index].get_all(
per_npu_statedb[asic_index].STATE_DB, 'MUX_LINKMGR_TABLE|{}'.format(port))
muxcable_metrics_dict[asic_index] = per_npu_statedb[asic_index].get_all(
per_npu_statedb[asic_index].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port))
if muxcable_info_dict[asic_index] is not None:
logical_key = "MUX_CABLE_TABLE|{}".format(port)
logical_health_key = "MUX_LINKMGR_TABLE|{}".format(port)
logical_metrics_key = "MUX_METRICS_TABLE|{}".format(port)
if logical_key in port_table_keys[asic_index] and logical_health_key in port_health_table_keys[asic_index]:
if logical_metrics_key not in port_metrics_table_keys[asic_index]:
muxcable_metrics_dict[asic_index] = {}
if json_output:
port_status_dict = {}
port_status_dict["MUX_CABLE"] = {}
create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict,
muxcable_health_dict, muxcable_metrics_dict, asic_index, port)
click.echo("{}".format(json.dumps(port_status_dict, indent=4)))
sys.exit(STATUS_SUCCESSFUL)
else:
print_data = []
create_table_dump_per_port_status(db, print_data, muxcable_info_dict,
muxcable_health_dict, muxcable_metrics_dict, asic_index, port)
headers = ['PORT', 'STATUS', 'HEALTH', 'HWSTATUS', 'LAST_SWITCHOVER_TIME']
click.echo(tabulate(print_data, headers=headers))
sys.exit(STATUS_SUCCESSFUL)
else:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo("this is not a valid port present on mux_cable".format(port_name))
sys.exit(STATUS_FAIL)
else:
click.echo("there is not a valid asic table for this asic_index".format(asic_index))
sys.exit(STATUS_FAIL)
else:
if json_output:
port_status_dict = {}
port_status_dict["MUX_CABLE"] = {}
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
for key in natsorted(port_table_keys[asic_id]):
port = key.split("|")[1]
muxcable_info_dict[asic_id] = per_npu_statedb[asic_id].get_all(
per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port))
muxcable_health_dict[asic_id] = per_npu_statedb[asic_id].get_all(
per_npu_statedb[asic_id].STATE_DB, 'MUX_LINKMGR_TABLE|{}'.format(port))
muxcable_metrics_dict[asic_id] = per_npu_statedb[asic_id].get_all(
per_npu_statedb[asic_id].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port))
if not muxcable_metrics_dict[asic_id]:
muxcable_metrics_dict[asic_id] = {}
create_json_dump_per_port_status(db, port_status_dict, muxcable_info_dict,
muxcable_health_dict, muxcable_metrics_dict, asic_id, port)
click.echo("{}".format(json.dumps(port_status_dict, indent=4)))
else:
print_data = []
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
for key in natsorted(port_table_keys[asic_id]):
port = key.split("|")[1]
muxcable_health_dict[asic_id] = per_npu_statedb[asic_id].get_all(
per_npu_statedb[asic_id].STATE_DB, 'MUX_LINKMGR_TABLE|{}'.format(port))
muxcable_info_dict[asic_id] = per_npu_statedb[asic_id].get_all(
per_npu_statedb[asic_id].STATE_DB, 'MUX_CABLE_TABLE|{}'.format(port))
muxcable_metrics_dict[asic_id] = per_npu_statedb[asic_id].get_all(
per_npu_statedb[asic_id].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port))
if not muxcable_metrics_dict[asic_id]:
muxcable_metrics_dict[asic_id] = {}
create_table_dump_per_port_status(db, print_data, muxcable_info_dict,
muxcable_health_dict, muxcable_metrics_dict, asic_id, port)
headers = ['PORT', 'STATUS', 'HEALTH', 'HWSTATUS','LAST_SWITCHOVER_TIME']
click.echo(tabulate(print_data, headers=headers))
sys.exit(STATUS_SUCCESSFUL)
@muxcable.command()
@click.argument('port', required=False, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def config(db, port, json_output):
"""Show muxcable config information"""
port = platform_sfputil_helper.get_interface_name(port, db)
port_mux_tbl_keys = {}
asic_start_idx = None
per_npu_configdb = {}
mux_tbl_cfg_db = {}
peer_switch_tbl_cfg_db = {}
# Getting all front asic namespace and correspding config and state DB connector
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
if asic_start_idx is None:
asic_start_idx = asic_id
# TO-DO replace the macros with correct swsscommon names
#config_db[asic_id] = swsscommon.DBConnector("CONFIG_DB", REDIS_TIMEOUT_MSECS, True, namespace)
#mux_tbl_cfg_db[asic_id] = swsscommon.Table(config_db[asic_id], swsscommon.CFG_MUX_CABLE_TABLE_NAME)
per_npu_configdb[asic_id] = ConfigDBConnector(use_unix_socket_path=False, namespace=namespace)
per_npu_configdb[asic_id].connect()
mux_tbl_cfg_db[asic_id] = per_npu_configdb[asic_id].get_table("MUX_CABLE")
peer_switch_tbl_cfg_db[asic_id] = per_npu_configdb[asic_id].get_table("PEER_SWITCH")
#peer_switch_tbl_cfg_db[asic_id] = swsscommon.Table(config_db[asic_id], swsscommon.CFG_PEER_SWITCH_TABLE_NAME)
port_mux_tbl_keys[asic_id] = mux_tbl_cfg_db[asic_id].keys()
if port is not None:
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name))
sys.exit(CONFIG_FAIL)
port_status_dict = {}
port_status_dict["MUX_CABLE"] = {}
port_status_dict["MUX_CABLE"]["PEER_TOR"] = {}
peer_switch_value = None
switch_name = get_switch_name(per_npu_configdb[asic_start_idx])
if asic_start_idx is not None:
peer_switch_value = get_value_for_key_in_config_tbl(
per_npu_configdb[asic_start_idx], switch_name, "address_ipv4", "PEER_SWITCH")
port_status_dict["MUX_CABLE"]["PEER_TOR"] = peer_switch_value
if port_mux_tbl_keys[asic_id] is not None:
if port in port_mux_tbl_keys[asic_id]:
if json_output:
port_status_dict["MUX_CABLE"] = {}
port_status_dict["MUX_CABLE"]["PORTS"] = {}
create_json_dump_per_port_config(db, port_status_dict, per_npu_configdb, asic_id, port)
click.echo("{}".format(json.dumps(port_status_dict, indent=4)))
sys.exit(CONFIG_SUCCESSFUL)
else:
print_data = []
print_peer_tor = []
create_table_dump_per_port_config(db, print_data, per_npu_configdb, asic_id, port)
headers = ['SWITCH_NAME', 'PEER_TOR']
peer_tor_data = []
peer_tor_data.append(switch_name)
peer_tor_data.append(peer_switch_value)
print_peer_tor.append(peer_tor_data)
click.echo(tabulate(print_peer_tor, headers=headers))
headers = ['port', 'state', 'ipv4', 'ipv6']
click.echo(tabulate(print_data, headers=headers))
sys.exit(CONFIG_SUCCESSFUL)
else:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo("this is not a valid port present on mux_cable".format(port_name))
sys.exit(CONFIG_FAIL)
else:
click.echo("there is not a valid asic table for this asic_index".format(asic_index))
sys.exit(CONFIG_FAIL)
else:
port_status_dict = {}
port_status_dict["MUX_CABLE"] = {}
port_status_dict["MUX_CABLE"]["PEER_TOR"] = {}
peer_switch_value = None
switch_name = get_switch_name(per_npu_configdb[asic_start_idx])
if asic_start_idx is not None:
peer_switch_value = get_value_for_key_in_config_tbl(
per_npu_configdb[asic_start_idx], switch_name, "address_ipv4", "PEER_SWITCH")
port_status_dict["MUX_CABLE"]["PEER_TOR"] = peer_switch_value
if json_output:
port_status_dict["MUX_CABLE"]["PORTS"] = {}
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
for port in natsorted(port_mux_tbl_keys[asic_id]):
create_json_dump_per_port_config(db, port_status_dict, per_npu_configdb, asic_id, port)
click.echo("{}".format(json.dumps(port_status_dict, indent=4)))
else:
print_data = []
print_peer_tor = []
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
for port in natsorted(port_mux_tbl_keys[asic_id]):
create_table_dump_per_port_config(db, print_data, per_npu_configdb, asic_id, port)
headers = ['SWITCH_NAME', 'PEER_TOR']
peer_tor_data = []
peer_tor_data.append(switch_name)
peer_tor_data.append(peer_switch_value)
print_peer_tor.append(peer_tor_data)
click.echo(tabulate(print_peer_tor, headers=headers))
headers = ['port', 'state', 'ipv4', 'ipv6']
click.echo(tabulate(print_data, headers=headers))
sys.exit(CONFIG_SUCCESSFUL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.argument('target', metavar='<target> NIC TORA TORB LOCAL', required=True, default=None, type=click.Choice(["NIC", "TORA", "TORB", "LOCAL"]))
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def berinfo(db, port, target, json_output):
"""Show muxcable BER (bit error rate) information"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
if port is not None:
res_dict = {}
result = {}
param_dict = {}
target = parse_target(target)
param_dict["target"] = target
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "ber")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for ber value".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.argument('target', metavar='<target> NIC TORA TORB LOCAL', required=True, default=None, type=click.Choice(["NIC", "TORA", "TORB", "LOCAL"]))
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def eyeinfo(db, port, target, json_output):
"""Show muxcable eye information in mv"""
port = platform_sfputil_helper.get_interface_alias(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
if port is not None:
res_dict = {}
result = {}
param_dict = {}
target = parse_target(target)
param_dict["target"] = target
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "eye")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for ber value".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.argument('target', metavar='<target> NIC TORA TORB LOCAL', required=True, default=None, type=click.Choice(["NIC", "TORA", "TORB", "LOCAL"]))
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def fecstatistics(db, port, target, json_output):
"""Show muxcable fec layer statistics information, target NIC TORA TORB"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
if port is not None:
res_dict = {}
result = {}
param_dict = {}
target = parse_target(target)
param_dict["target"] = target
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "fec_stats")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for ber value".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.argument('target', metavar='<target> NIC TORA TORB LOCAL', required=True, default=None, type=click.Choice(["NIC", "TORA", "TORB", "LOCAL"]))
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def pcsstatistics(db, port, target, json_output):
"""Show muxcable pcs layer statistics information"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
if port is not None:
res_dict = {}
result = {}
param_dict = {}
target = parse_target(target)
param_dict["target"] = target
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 10, param_dict, "pcs_stats")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for pcs statistics".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.argument('option', required=False, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def debugdumpregisters(db, port, option, json_output):
"""Show muxcable debug deump registers information, preagreed by vendors"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
if port is not None:
res_dict = {}
result = {}
param_dict = {}
param_dict["option"] = option
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_ber", "status", "True", "XCVRD_GET_BER_CMD", "XCVRD_GET_BER_CMD_ARG", "XCVRD_GET_BER_RSP", port, 100, param_dict, "debug_dump")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD_ARG")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for debug dump registers".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def alivecablestatus(db, port, json_output):
"""Show muxcable alive information """
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
if port is not None:
res_dict = {}
result = {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_ber", "status", "True", "XCVRD_GET_BER_CMD", None, "XCVRD_GET_BER_RSP", port, 10, None, "cable_alive")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_BER_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_BER_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_BER_RES")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for cable alive status".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', required=True, default=None)
@clicommon.pass_db
def cableinfo(db, port):
"""Show muxcable cable information"""
port = platform_sfputil_helper.get_interface_name(port, db)
if platform_sfputil is not None:
physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port)
if not isinstance(physical_port_list, list):
click.echo("ERR: Unable to get a port on muxcable port")
sys.exit(EXIT_FAIL)
if len(physical_port_list) != 1:
click.echo("ERR: Unable to get a single port on muxcable")
sys.exit(EXIT_FAIL)
physical_port = physical_port_list[0]
import sonic_y_cable.y_cable
part_num = sonic_y_cable.y_cable.get_part_number(physical_port)
if part_num == False or part_num == -1:
click.echo("ERR: Unable to get cable info part number")
sys.exit(EXIT_FAIL)
vendor = sonic_y_cable.y_cable.get_vendor(physical_port)
if vendor == False or vendor == -1:
click.echo("ERR: Unable to get cable info vendor name")
sys.exit(EXIT_FAIL)
headers = ['Vendor', 'Model']
body = [[vendor, part_num]]
click.echo(tabulate(body, headers=headers))
def get_hwmode_mux_direction_port(db, port):
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES")
res_dict = {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict[2] = "unknown"
result = {}
if port is not None:
res_dict = update_and_get_response_for_xcvr_cmd(
"state", "state", "True", "XCVRD_SHOW_HWMODE_DIR_CMD", "XCVRD_SHOW_HWMODE_DIR_RES", "XCVRD_SHOW_HWMODE_DIR_RSP", port, 1, None, "probe")
result = get_result(port, res_dict, "muxdirection" , result, "XCVRD_SHOW_HWMODE_DIR_RES")
res_dict[2] = result.get("presence","unknown")
return res_dict
@muxcable.group(cls=clicommon.AbbreviationGroup)
def hwmode():
"""Shows the muxcable hardware information directly"""
pass
@hwmode.command()
@click.argument('port', metavar='<port_name>', required=False, default=None)
@clicommon.pass_db
def muxdirection(db, port):
"""Shows the current direction of the muxcable {active/standy}"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES")
if port is not None:
if check_port_in_mux_cable_table(port) == False:
click.echo("Not Y-cable port")
return CONFIG_FAIL
res_dict = get_hwmode_mux_direction_port(db, port)
body = []
temp_list = []
headers = ['Port', 'Direction', 'Presence']
port = platform_sfputil_helper.get_interface_alias(port, db)
temp_list.append(port)
temp_list.append(res_dict[1])
temp_list.append(res_dict[2])
body.append(temp_list)
rc = res_dict[0]
click.echo(tabulate(body, headers=headers))
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES")
return rc
else:
logical_port_list = platform_sfputil_helper.get_logical_list()
rc_exit = True
body = []
for port in logical_port_list:
if platform_sfputil is not None:
physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port)
if not isinstance(physical_port_list, list):
continue
if len(physical_port_list) != 1:
continue
if not check_port_in_mux_cable_table(port):
continue
physical_port = physical_port_list[0]
logical_port_list_for_physical_port = platform_sfputil_helper.get_physical_to_logical()
logical_port_list_per_port = logical_port_list_for_physical_port.get(physical_port, None)
""" This check is required for checking whether or not this logical port is the one which is
actually mapped to physical port and by convention it is always the first port.
TODO: this should be removed with more logic to check which logical port maps to actual physical port
being used"""
if port != logical_port_list_per_port[0]:
continue
temp_list = []
res_dict = get_hwmode_mux_direction_port(db, port)
port = platform_sfputil_helper.get_interface_alias(port, db)
temp_list.append(port)
temp_list.append(res_dict[1])
temp_list.append(res_dict[2])
body.append(temp_list)
rc = res_dict[0]
if rc != 0:
rc_exit = False
headers = ['Port', 'Direction', 'Presence']
click.echo(tabulate(body, headers=headers))
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_DIR_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_DIR_RES")
if rc_exit == False:
sys.exit(EXIT_FAIL)
@hwmode.command()
@click.argument('port', metavar='<port_name>', required=False, default=None)
@clicommon.pass_db
def switchmode(db, port):
"""Shows the current switching mode of the muxcable {auto/manual}"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_SWMODE_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_SWMODE_RSP")
if port is not None:
if check_port_in_mux_cable_table(port) == False:
click.echo("Not Y-cable port")
return CONFIG_FAIL
res_dict = {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", port, 1, None, "probe")
body = []
temp_list = []
headers = ['Port', 'Switching']
port = platform_sfputil_helper.get_interface_alias(port, db)
temp_list.append(port)
temp_list.append(res_dict[1])
body.append(temp_list)
rc = res_dict[0]
click.echo(tabulate(body, headers=headers))
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_SWMODE_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_SWMODE_RSP")
return rc
else:
logical_port_list = platform_sfputil_helper.get_logical_list()
rc_exit = True
body = []
for port in logical_port_list:
if platform_sfputil is not None:
physical_port_list = platform_sfputil_helper.logical_port_name_to_physical_port_list(port)
if not isinstance(physical_port_list, list):
continue
if len(physical_port_list) != 1:
continue
if not check_port_in_mux_cable_table(port):
continue
physical_port = physical_port_list[0]
logical_port_list_for_physical_port = platform_sfputil_helper.get_physical_to_logical()
logical_port_list_per_port = logical_port_list_for_physical_port.get(physical_port, None)
""" This check is required for checking whether or not this logical port is the one which is
actually mapped to physical port and by convention it is always the first port.
TODO: this should be removed with more logic to check which logical port maps to actual physical port
being used"""
if port != logical_port_list_per_port[0]:
continue
temp_list = []
res_dict = {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"state", "state", "True", "XCVRD_SHOW_HWMODE_SWMODE_CMD", None, "XCVRD_SHOW_HWMODE_SWMODE_RSP", port, 1, None, "probe")
port = platform_sfputil_helper.get_interface_alias(port, db)
temp_list.append(port)
temp_list.append(res_dict[1])
rc = res_dict[1]
if rc != 0:
rc_exit = False
body.append(temp_list)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_HWMODE_SWMODE_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_HWMODE_SWMODE_RSP")
headers = ['Port', 'Switching']
click.echo(tabulate(body, headers=headers))
if rc_exit == False:
sys.exit(EXIT_FAIL)
def get_single_port_firmware_version(port, res_dict, mux_info_dict):
state_db, appl_db = {}, {}
xcvrd_show_fw_rsp_sts_tbl_keys = {}
xcvrd_show_fw_rsp_sts_tbl = {}
xcvrd_show_fw_rsp_tbl = {}
xcvrd_show_fw_cmd_tbl, xcvrd_show_fw_res_tbl = {}, {}
sel = swsscommon.Select()
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
state_db[asic_id] = db_connect("STATE_DB", namespace)
appl_db[asic_id] = db_connect("APPL_DB", namespace)
xcvrd_show_fw_cmd_tbl[asic_id] = swsscommon.Table(appl_db[asic_id], "XCVRD_SHOW_FW_CMD")
xcvrd_show_fw_rsp_tbl[asic_id] = swsscommon.SubscriberStateTable(state_db[asic_id], "XCVRD_SHOW_FW_RSP")
xcvrd_show_fw_rsp_sts_tbl[asic_id] = swsscommon.Table(state_db[asic_id], "XCVRD_SHOW_FW_RSP")
xcvrd_show_fw_res_tbl[asic_id] = swsscommon.Table(state_db[asic_id], "XCVRD_SHOW_FW_RES")
xcvrd_show_fw_rsp_sts_tbl_keys[asic_id] = xcvrd_show_fw_rsp_sts_tbl[asic_id].getKeys()
for key in xcvrd_show_fw_rsp_sts_tbl_keys[asic_id]:
xcvrd_show_fw_rsp_sts_tbl[asic_id]._del(key)
sel.addSelectable(xcvrd_show_fw_rsp_tbl[asic_id])
rc = 0
res_dict[0] = 'unknown'
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
click.echo("ERR: This is not a valid port, valid ports ({})".format(", ".join(logical_port_list)))
rc = EXIT_FAIL
res_dict[1] = rc
return
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port))
rc = CONFIG_FAIL
res_dict[1] = rc
return
fvs = swsscommon.FieldValuePairs([('firmware_version', 'probe')])
xcvrd_show_fw_cmd_tbl[asic_index].set(port, fvs)
# Listen indefinitely for changes to the HW_MUX_CABLE_TABLE in the Application DB's
while True:
# Use timeout to prevent ignoring the signals we want to handle
# in signal_handler() (e.g. SIGTERM for graceful shutdown)
(state, selectableObj) = sel.select(SELECT_TIMEOUT)
if state == swsscommon.Select.TIMEOUT:
# Do not flood log when select times out
continue
if state != swsscommon.Select.OBJECT:
click.echo("sel.select() did not return swsscommon.Select.OBJECT for sonic_y_cable updates")
continue
# Get the redisselect object from selectable object
redisSelectObj = swsscommon.CastSelectableToRedisSelectObj(
selectableObj)
# Get the corresponding namespace from redisselect db connector object
namespace = redisSelectObj.getDbConnector().getNamespace()
asic_index = multi_asic.get_asic_index_from_namespace(namespace)
(port_m, op_m, fvp_m) = xcvrd_show_fw_rsp_tbl[asic_index].pop()
if not port_m:
click.echo("Did not receive a port response {}".format(port))
res_dict[0] = 'False'
res_dict[1] = EXIT_FAIL
xcvrd_show_fw_rsp_sts_tbl[asic_index]._del(port)
break
if port_m != port:
res_dict[0] = 'False'
res_dict[1] = EXIT_FAIL
xcvrd_show_fw_rsp_sts_tbl[asic_index]._del(port)
continue
if fvp_m:
fvp_dict = dict(fvp_m)
if "status" in fvp_dict:
# check if xcvrd got a probe command
state = fvp_dict["status"]
res_dict[0] = state
res_dict[1] = EXIT_FAIL
xcvrd_show_fw_rsp_sts_tbl[asic_index]._del(port)
(status, fvp) = xcvrd_show_fw_res_tbl[asic_index].get(port)
res_dir = dict(fvp)
mux_info_dict["version_nic_active"] = res_dir.get("version_nic_active", None)
mux_info_dict["version_nic_inactive"] = res_dir.get("version_nic_inactive", None)
mux_info_dict["version_nic_next"] = res_dir.get("version_nic_next", None)
mux_info_dict["version_peer_active"] = res_dir.get("version_peer_active", None)
mux_info_dict["version_peer_inactive"] = res_dir.get("version_peer_inactive", None)
mux_info_dict["version_peer_next"] = res_dir.get("version_peer_next", None)
mux_info_dict["version_self_active"] = res_dir.get("version_self_active", None)
mux_info_dict["version_self_inactive"] = res_dir.get("version_self_inactive", None)
mux_info_dict["version_self_next"] = res_dir.get("version_self_next", None)
break
else:
res_dict[0] = 'False'
res_dict[1] = EXIT_FAIL
xcvrd_show_fw_rsp_sts_tbl[asic_index]._del(port)
break
else:
res_dict[0] = 'False'
res_dict[1] = EXIT_FAIL
xcvrd_show_fw_rsp_sts_tbl[asic_index]._del(port)
break
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_FW_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_FW_RES")
return
@muxcable.group(cls=clicommon.AbbreviationGroup)
def firmware():
"""Show muxcable firmware command"""
pass
@firmware.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.option('--active', 'active', required=False, is_flag=True, type=click.BOOL, help="display the firmware version of only active bank within MCU's")
@clicommon.pass_db
def version(db, port, active):
"""Show muxcable firmware version"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_DOWN_FW_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_DOWN_FW_RSP")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_SHOW_FW_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_FW_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_FW_RES")
if port is not None:
res_dict = {}
mux_info_dict, mux_info_active_dict = {}, {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
mux_info_dict["version_nic_active"] = "N/A"
mux_info_dict["version_nic_inactive"] = "N/A"
mux_info_dict["version_nic_next"] = "N/A"
mux_info_dict["version_peer_active"] = "N/A"
mux_info_dict["version_peer_inactive"] = "N/A"
mux_info_dict["version_peer_next"] = "N/A"
mux_info_dict["version_self_active"] = "N/A"
mux_info_dict["version_self_inactive"] = "N/A"
mux_info_dict["version_self_next"] = "N/A"
res_dict = update_and_get_response_for_xcvr_cmd(
"firmware_version", "status", "True", "XCVRD_SHOW_FW_CMD", None, "XCVRD_SHOW_FW_RSP", port, 20, None, "probe")
if res_dict[1] == "True":
mux_info_dict = get_response_for_version(port, mux_info_dict)
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_FW_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_SHOW_FW_RES")
if active is True:
for key in mux_info_dict:
if key.endswith("_active"):
mux_info_active_dict[key] = mux_info_dict[key]
click.echo("{}".format(json.dumps(mux_info_active_dict, indent=4)))
else:
click.echo("{}".format(json.dumps(mux_info_dict, indent=4)))
else:
port_name = platform_sfputil_helper.get_interface_name(port, db)
click.echo("Did not get a valid Port for mux firmware version".format(port_name))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def metrics(db, port, json_output):
"""Show muxcable metrics <port>"""
port = platform_sfputil_helper.get_interface_name(port, db)
metrics_table_keys = {}
per_npu_statedb = {}
metrics_dict = {}
# Getting all front asic namespace and correspding config and state DB connector
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
# replace these with correct macros
per_npu_statedb[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=False, namespace=namespace)
per_npu_statedb[asic_id].connect(per_npu_statedb[asic_id].STATE_DB)
metrics_table_keys[asic_id] = per_npu_statedb[asic_id].keys(
per_npu_statedb[asic_id].STATE_DB, 'MUX_METRICS_TABLE|*')
if port is not None:
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo(("ERR: Not a valid logical port for muxcable firmware {}".format(port_name)))
sys.exit(CONFIG_FAIL)
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo("Got invalid asic index for port {}, cant retreive mux status".format(port_name))
metrics_dict[asic_index] = per_npu_statedb[asic_index].get_all(
per_npu_statedb[asic_index].STATE_DB, 'MUX_METRICS_TABLE|{}'.format(port))
ordered_dict = OrderedDict(sorted(metrics_dict[asic_index].items(), key=itemgetter(1)))
if json_output:
click.echo("{}".format(json.dumps(ordered_dict, indent=4)))
else:
print_data = []
for key, val in ordered_dict.items():
print_port_data = []
port = platform_sfputil_helper.get_interface_alias(port, db)
print_port_data.append(port)
print_port_data.append(key)
print_port_data.append(val)
print_data.append(print_port_data)
headers = ['PORT', 'EVENT', 'TIME']
click.echo(tabulate(print_data, headers=headers))
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def event_log(db, port, json_output):
"""Show muxcable event log <port>"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_EVENT_LOG_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_EVENT_LOG_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_EVENT_LOG_RES")
if port is not None:
res_dict = {}
result = {}
mux_info_dict = {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"show_event", "status", "True", "XCVRD_EVENT_LOG_CMD", None, "XCVRD_EVENT_LOG_RSP", port, 1000, None, "probe")
if res_dict[1] == "True":
result = get_event_logs(port, res_dict, mux_info_dict)
delete_all_keys_in_db_table("STATE_DB", "XCVRD_EVENT_LOG_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_EVENT_LOG_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_EVENT_LOG_CMD")
port = platform_sfputil_helper.get_interface_alias(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for event log".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def get_fec_anlt_speed(db, port, json_output):
"""Show muxcable configurations for fec anlt speed <port>"""
port = platform_sfputil_helper.get_interface_name(port, db)
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_FEC_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_FEC_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_FEC_RES")
if port is not None:
res_dict = {}
result = {}
res_dict[0] = CONFIG_FAIL
res_dict[1] = "unknown"
res_dict = update_and_get_response_for_xcvr_cmd(
"get_fec", "status", "True", "XCVRD_GET_FEC_CMD", None, "XCVRD_GET_FEC_RSP", port, 10, None, "probe")
if res_dict[1] == "True":
result = get_result(port, res_dict, "fec" , result, "XCVRD_GET_FEC_RES")
delete_all_keys_in_db_table("APPL_DB", "XCVRD_GET_FEC_CMD")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_FEC_RSP")
delete_all_keys_in_db_table("STATE_DB", "XCVRD_GET_FEC_RES")
port = platform_sfputil_helper.get_interface_name(port, db)
if json_output:
click.echo("{}".format(json.dumps(result, indent=4)))
else:
headers = ['PORT', 'ATTR', 'VALUE']
res = [[port]+[key] + [val] for key, val in result.items()]
click.echo(tabulate(res, headers=headers))
else:
click.echo("Did not get a valid Port for fec value speed anlt".format(port))
sys.exit(CONFIG_FAIL)
@muxcable.command()
@click.argument('port', metavar='<port_name>', required=True, default=None)
@click.option('--json', 'json_output', required=False, is_flag=True, type=click.BOOL, help="display the output in json format")
@clicommon.pass_db
def packetloss(db, port, json_output):
"""show muxcable packetloss <port>"""
port = platform_sfputil_helper.get_interface_name(port, db)
pckloss_table_keys = {}
per_npu_statedb = {}
pckloss_dict = {}
namespaces = multi_asic.get_front_end_namespaces()
for namespace in namespaces:
asic_id = multi_asic.get_asic_index_from_namespace(namespace)
per_npu_statedb[asic_id] = swsscommon.SonicV2Connector(use_unix_socket_path=True, namespace=namespace)
per_npu_statedb[asic_id].connect(per_npu_statedb[asic_id].STATE_DB)
pckloss_table_keys[asic_id] = per_npu_statedb[asic_id].keys(
per_npu_statedb[asic_id].STATE_DB, 'LINK_PROBE_STATS|*')
if port is not None:
logical_port_list = platform_sfputil_helper.get_logical_list()
if port not in logical_port_list:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo(("ERR: Not a valid logical port for muxcable firmware {}".format(port_name)))
sys.exit(CONFIG_FAIL)
asic_index = None
if platform_sfputil is not None:
asic_index = platform_sfputil_helper.get_asic_id_for_logical_port(port)
if asic_index is None:
# TODO this import is only for unit test purposes, and should be removed once sonic_platform_base
# is fully mocked
import sonic_platform_base.sonic_sfp.sfputilhelper
asic_index = sonic_platform_base.sonic_sfp.sfputilhelper.SfpUtilHelper().get_asic_id_for_logical_port(port)
if asic_index is None:
port_name = platform_sfputil_helper.get_interface_alias(port, db)
click.echo("Got invalid asic index for port {}, cant retreive pck loss info".format(port_name))
pckloss_dict[asic_index] = per_npu_statedb[asic_index].get_all(
per_npu_statedb[asic_index].STATE_DB, 'LINK_PROBE_STATS|{}'.format(port))
ordered_dict = OrderedDict(sorted(pckloss_dict[asic_index].items(), key=itemgetter(1)))
if json_output:
click.echo("{}".format(json.dumps(ordered_dict, indent=4)))
else:
print_count = []
print_event = []
for key, val in ordered_dict.items():
print_port_data = []
port = platform_sfputil_helper.get_interface_alias(port, db)
print_port_data.append(port)
print_port_data.append(key)
print_port_data.append(val)
if "count" in key:
print_count.append(print_port_data)
else:
print_event.append(print_port_data)
count_headers = ['PORT', 'COUNT', 'VALUE']
event_headers = ['PORT', 'EVENT', 'TIME']
click.echo(tabulate(print_count, headers=count_headers))
click.echo(tabulate(print_event, headers=event_headers))
| 42.192398
| 174
| 0.673648
|
0d744718f04029fb305ecf660b16088f097f8355
| 1,531
|
py
|
Python
|
problem_solving/warmup/diagonal_difference/aut_tests.py
|
hugolribeiro/hackerrank_exercises
|
d2757b24479c26ec39e01091e3a15e8980e97864
|
[
"MIT"
] | null | null | null |
problem_solving/warmup/diagonal_difference/aut_tests.py
|
hugolribeiro/hackerrank_exercises
|
d2757b24479c26ec39e01091e3a15e8980e97864
|
[
"MIT"
] | null | null | null |
problem_solving/warmup/diagonal_difference/aut_tests.py
|
hugolribeiro/hackerrank_exercises
|
d2757b24479c26ec39e01091e3a15e8980e97864
|
[
"MIT"
] | null | null | null |
from main import diagonalDifference
arr1 = [[11, 2, 4],
[4, 5, 6],
[10, 8, -12]]
result1 = diagonalDifference(arr1)
answer1 = 15
if result1 == answer1:
print(f'Test number 1 \nExpected Output: {answer1}\nThe output: {result1} \n\033[0;32;1mTest OK\033[m')
else:
print(f'Test number 1 \nExpected Output: {answer1}\nThe output: {result1} \n\033[0;31;1mWRONG\033[m')
print('#' * 20)
# Test 2
arr2 = [[6, 7, 8],
[5, 2, 12],
[10, 9, -10]]
result2 = diagonalDifference(arr2)
answer2 = 22
if result2 == answer2:
print(f'Test number 2 \nExpected Output: {answer2}\nThe output: {result2} \n\033[0;32;1mTest OK\033[m')
else:
print(f'Test number 2 \nExpected Output: {answer2}\nThe output: {result2} \n\033[0;31;1mWRONG\033[m')
print('#' * 20)
# Test 3
arr3 = [[-5, 23, 4],
[8, 15, 6],
[23, 0, 5]]
result3 = diagonalDifference(arr3)
answer3 = 27
if result3 == answer3:
print(f'Test number 3 \nExpected Output: {answer3}\nThe output: {result3} \n\033[0;32;1mTest OK\033[m')
else:
print(f'Test number 3 \nExpected Output: {answer3}\nThe output: {result3} \n\033[0;31;1mWRONG\033[m')
print('#' * 20)
# Test 4
arr4 = [[36, 23, -4],
[41, -50, 6],
[10, -15, 7]]
result4 = diagonalDifference(arr4)
answer4 = 37
if result4 == answer4:
print(f'Test number 4 \nExpected Output: {answer4}\nThe output: {result4} \n\033[0;32;1mTest OK\033[m')
else:
print(f'Test number 4 \nExpected Output: {answer4}\nThe output: {result4} \n\033[0;31;1mWRONG\033[m')
| 30.019608
| 107
| 0.629001
|
8f0de34e577655e7cbab08143fb27ed8741f34cf
| 845
|
py
|
Python
|
yaya/cmdlineparser.py
|
libyoung/yaya
|
4df4b360ce90bab0e3a62a4ef482c38739b25695
|
[
"MIT"
] | 1
|
2017-09-25T06:34:02.000Z
|
2017-09-25T06:34:02.000Z
|
yaya/cmdlineparser.py
|
libyoung/yaya
|
4df4b360ce90bab0e3a62a4ef482c38739b25695
|
[
"MIT"
] | null | null | null |
yaya/cmdlineparser.py
|
libyoung/yaya
|
4df4b360ce90bab0e3a62a4ef482c38739b25695
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##############################
# Create Time: 20170427
# Author: liuyang
# Email: czyang.liu@jrdcom.com
# Content: Flow Object
##############################
import argparse
from argparse import ArgumentDefaultsHelpFormatter
class CmdLineParser(object):
def __init__(self):
self.parser = argparse.ArgumentParser(description='Argument Parser for MTBF Handler',
formatter_class=ArgumentDefaultsHelpFormatter)
self.parser.add_argument('-loop', help='specify the current test loop ')
self.parser.add_argument('-name', help='specify the current text module name')
self.parser.add_argument('-mode', help='specify test mode')
def parse(self, input):
return self.parser.parse_args(input)
| 38.409091
| 93
| 0.615385
|
f8547657492a4e26961c0de783a01fcc6b24ff30
| 11,206
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/databoxedge/latest/share.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/databoxedge/latest/share.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/databoxedge/latest/share.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Share']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:databoxedge:Share'.""", DeprecationWarning)
class Share(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:databoxedge:Share'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents a share on the Data Box Edge/Gateway device.
Latest API Version: 2020-09-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input[pulumi.InputType['RefreshDetailsArgs']] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
pulumi.log.warn("Share is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:databoxedge:Share'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if access_protocol is None and not opts.urn:
raise TypeError("Missing required property 'access_protocol'")
__props__['access_protocol'] = access_protocol
__props__['azure_container_info'] = azure_container_info
__props__['client_access_rights'] = client_access_rights
__props__['data_policy'] = data_policy
__props__['description'] = description
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__['device_name'] = device_name
if monitoring_status is None and not opts.urn:
raise TypeError("Missing required property 'monitoring_status'")
__props__['monitoring_status'] = monitoring_status
__props__['name'] = name
__props__['refresh_details'] = refresh_details
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if share_status is None and not opts.urn:
raise TypeError("Missing required property 'share_status'")
__props__['share_status'] = share_status
__props__['user_access_rights'] = user_access_rights
__props__['share_mappings'] = None
__props__['system_data'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Share")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Share, __self__).__init__(
'azure-nextgen:databoxedge/latest:Share',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Share':
"""
Get an existing Share resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Share(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Output[str]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> pulumi.Output[Optional['outputs.AzureContainerInfoResponse']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.ClientAccessRightResponse']]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> pulumi.Output[Optional[str]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output[Optional['outputs.RefreshDetailsResponse']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> pulumi.Output[Sequence['outputs.MountPointMapResponse']]:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Share on ASE device
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccessRightResponse']]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 47.084034
| 512
| 0.663127
|
dd505f44b116ba0e69cb475c389a18c0b9a2ee9e
| 56
|
py
|
Python
|
cartpy/__init__.py
|
lucascr91/cartpy
|
b7e6302d8b4b5406b5f4304f4d05f17e7d2ac552
|
[
"MIT"
] | 1
|
2020-11-21T03:46:21.000Z
|
2020-11-21T03:46:21.000Z
|
cartpy/__init__.py
|
lucascr91/cartpy
|
b7e6302d8b4b5406b5f4304f4d05f17e7d2ac552
|
[
"MIT"
] | null | null | null |
cartpy/__init__.py
|
lucascr91/cartpy
|
b7e6302d8b4b5406b5f4304f4d05f17e7d2ac552
|
[
"MIT"
] | null | null | null |
from cartpy.cartpy import Municipio, Year, name_to_code
| 28
| 55
| 0.839286
|
14b8b663f07d6ef1a102a6e0338661b3a89f0152
| 7,694
|
py
|
Python
|
seahub/dingtalk/views.py
|
Jonatino/seahub
|
0bff0c1104dd52bb0d09daa6bc241e63dffe2b23
|
[
"Apache-2.0"
] | 101
|
2021-05-16T06:00:03.000Z
|
2021-12-01T02:02:29.000Z
|
seahub/dingtalk/views.py
|
Jonatino/seahub
|
0bff0c1104dd52bb0d09daa6bc241e63dffe2b23
|
[
"Apache-2.0"
] | null | null | null |
seahub/dingtalk/views.py
|
Jonatino/seahub
|
0bff0c1104dd52bb0d09daa6bc241e63dffe2b23
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import uuid
import json
import time
import hmac
import base64
import urllib
import logging
import requests
from hashlib import sha256
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.utils.translation import ugettext as _
from seahub.api2.utils import get_api_token
from seahub import auth
from seahub.profile.models import Profile
from seahub.utils import render_error, get_site_scheme_and_netloc
from seahub.utils.auth import gen_user_virtual_id
from seahub.base.accounts import User
from seahub.auth.models import SocialAuthUser
from seahub.auth.decorators import login_required
from seahub.dingtalk.settings import ENABLE_DINGTALK, \
DINGTALK_QR_CONNECT_APP_ID, DINGTALK_QR_CONNECT_APP_SECRET, \
DINGTALK_QR_CONNECT_AUTHORIZATION_URL, \
DINGTALK_QR_CONNECT_USER_INFO_URL, DINGTALK_QR_CONNECT_RESPONSE_TYPE, \
DINGTALK_QR_CONNECT_SCOPE, DINGTALK_QR_CONNECT_LOGIN_REMEMBER_ME
logger = logging.getLogger(__name__)
def dingtalk_login(request):
if not ENABLE_DINGTALK:
return render_error(request, _('Error, please contact administrator.'))
state = str(uuid.uuid4())
request.session['dingtalk_login_state'] = state
request.session['dingtalk_login_redirect'] = request.GET.get(auth.REDIRECT_FIELD_NAME, '/')
data = {
'appid': DINGTALK_QR_CONNECT_APP_ID,
'response_type': DINGTALK_QR_CONNECT_RESPONSE_TYPE,
'scope': DINGTALK_QR_CONNECT_SCOPE,
'redirect_uri': get_site_scheme_and_netloc() + reverse('dingtalk_callback'),
'state': state,
}
url = DINGTALK_QR_CONNECT_AUTHORIZATION_URL + '?' + urllib.parse.urlencode(data)
return HttpResponseRedirect(url)
def dingtalk_callback(request):
if not ENABLE_DINGTALK:
return render_error(request, _('Error, please contact administrator.'))
state = request.GET.get('state', '')
if not state or state != request.session.get('dingtalk_login_state', ''):
logger.error('invalid state')
return render_error(request, _('Error, please contact administrator.'))
timestamp = str(int(time.time()*1000)).encode('utf-8')
appsecret = DINGTALK_QR_CONNECT_APP_SECRET.encode('utf-8')
signature = base64.b64encode(hmac.new(appsecret, timestamp, digestmod=sha256).digest())
parameters = {
'accessKey': DINGTALK_QR_CONNECT_APP_ID,
'timestamp': timestamp,
'signature': signature,
}
code = request.GET.get('code')
data = {"tmp_auth_code": code}
full_user_info_url = DINGTALK_QR_CONNECT_USER_INFO_URL + '?' + urllib.parse.urlencode(parameters)
user_info_resp = requests.post(full_user_info_url, data=json.dumps(data))
user_info = user_info_resp.json()['user_info']
# seahub authenticate user
if 'unionid' not in user_info:
logger.error('Required user info not found.')
logger.error(user_info)
return render_error(request, _('Error, please contact administrator.'))
auth_user = SocialAuthUser.objects.get_by_provider_and_uid('dingtalk', user_info['unionid'])
if auth_user:
email = auth_user.username
else:
email = gen_user_virtual_id()
SocialAuthUser.objects.add(email, 'dingtalk', user_info['unionid'])
try:
user = auth.authenticate(remote_user=email)
except User.DoesNotExist:
user = None
except Exception as e:
logger.error(e)
return render_error(request, _('Error, please contact administrator.'))
if not user or not user.is_active:
return render_error(request, _('User %s not found or inactive.') % email)
# User is valid. Set request.user and persist user in the session
# by logging the user in.
request.user = user
request.session['remember_me'] = DINGTALK_QR_CONNECT_LOGIN_REMEMBER_ME
auth.login(request, user)
# update user's profile
name = user_info['nick'] if 'nick' in user_info else ''
if name:
profile = Profile.objects.get_profile_by_user(email)
if not profile:
profile = Profile(user=email)
profile.nickname = name.strip()
profile.save()
# generate auth token for Seafile client
api_token = get_api_token(request)
# redirect user to home page
response = HttpResponseRedirect(request.session['dingtalk_login_redirect'])
response.set_cookie('seahub_auth', email + '@' + api_token.key)
return response
@login_required
def dingtalk_connect(request):
if not ENABLE_DINGTALK:
return render_error(request, _('Error, please contact administrator.'))
state = str(uuid.uuid4())
request.session['dingtalk_connect_state'] = state
request.session['dingtalk_connect_redirect'] = request.GET.get(auth.REDIRECT_FIELD_NAME, '/')
data = {
'appid': DINGTALK_QR_CONNECT_APP_ID,
'response_type': DINGTALK_QR_CONNECT_RESPONSE_TYPE,
'scope': DINGTALK_QR_CONNECT_SCOPE,
'redirect_uri': get_site_scheme_and_netloc() + reverse('dingtalk_connect_callback'),
'state': state,
}
url = DINGTALK_QR_CONNECT_AUTHORIZATION_URL + '?' + urllib.parse.urlencode(data)
return HttpResponseRedirect(url)
@login_required
def dingtalk_connect_callback(request):
if not ENABLE_DINGTALK:
return render_error(request, _('Error, please contact administrator.'))
state = request.GET.get('state', '')
if not state or state != request.session.get('dingtalk_connect_state', ''):
logger.error('invalid state')
return render_error(request, _('Error, please contact administrator.'))
timestamp = str(int(time.time()*1000)).encode('utf-8')
appsecret = DINGTALK_QR_CONNECT_APP_SECRET.encode('utf-8')
signature = base64.b64encode(hmac.new(appsecret, timestamp, digestmod=sha256).digest())
parameters = {
'accessKey': DINGTALK_QR_CONNECT_APP_ID,
'timestamp': timestamp,
'signature': signature,
}
code = request.GET.get('code')
data = {"tmp_auth_code": code}
full_user_info_url = DINGTALK_QR_CONNECT_USER_INFO_URL + '?' + urllib.parse.urlencode(parameters)
user_info_resp = requests.post(full_user_info_url, data=json.dumps(data))
user_info = user_info_resp.json()['user_info']
# seahub authenticate user
if 'unionid' not in user_info:
logger.error('Required user info not found.')
logger.error(user_info)
return render_error(request, _('Error, please contact administrator.'))
username = request.user.username
dingtalk_user_id = user_info['unionid']
auth_user = SocialAuthUser.objects.get_by_provider_and_uid('dingtalk',
dingtalk_user_id)
if auth_user:
logger.error('dingtalk account already exists %s' % dingtalk_user_id)
return render_error(request, '出错了,此钉钉账号已被绑定')
SocialAuthUser.objects.add(username, 'dingtalk', dingtalk_user_id)
# update user's profile
name = user_info['nick'] if 'nick' in user_info else ''
if name:
profile = Profile.objects.get_profile_by_user(username)
if not profile:
profile = Profile(user=username)
profile.nickname = name.strip()
profile.save()
response = HttpResponseRedirect(request.session['dingtalk_connect_redirect'])
return response
@login_required
def dingtalk_disconnect(request):
if not ENABLE_DINGTALK:
return render_error(request, _('Error, please contact administrator.'))
username = request.user.username
SocialAuthUser.objects.delete_by_username_and_provider(username, 'dingtalk')
response = HttpResponseRedirect(request.GET.get(auth.REDIRECT_FIELD_NAME, '/'))
return response
| 35.62037
| 101
| 0.713803
|
af5d0b37c4a9894442cc30435fe3cd880970d2f1
| 1,471
|
py
|
Python
|
.history/run_update_20220325112242.py
|
miguel-fresh/geoip-translation
|
ccf9dbc0330e597704e57d8b2967fc9be16017ed
|
[
"Info-ZIP"
] | null | null | null |
.history/run_update_20220325112242.py
|
miguel-fresh/geoip-translation
|
ccf9dbc0330e597704e57d8b2967fc9be16017ed
|
[
"Info-ZIP"
] | null | null | null |
.history/run_update_20220325112242.py
|
miguel-fresh/geoip-translation
|
ccf9dbc0330e597704e57d8b2967fc9be16017ed
|
[
"Info-ZIP"
] | null | null | null |
import subprocess
from sys import stderr, stdout
from pathlib import Path
from os import rename, getcwd, path
START_DOWNLOAD = False
START_CONVERT = True
CURRENT_DIR = Path(getcwd())
ZIP_NAME = 'GeoLite2-City-CSV.zip'
DAT_NAME = 'GeoLiteCity.dat'
DOWNLOAD_DIRNAME = './data'
OUTPUT_DIRNAME = '../output'
DOWNLOAD_ABSPATH = CURRENT_DIR.joinpath(DOWNLOAD_DIRNAME)
OUTPUT_ABSPATH = CURRENT_DIR.joinpath(OUTPUT_DIRNAME)
ZIP_ABSPATH = DOWNLOAD_ABSPATH.joinpath(ZIP_NAME)
DAT_ABSPATH = OUTPUT_ABSPATH.joinpath(DAT_NAME)
if START_DOWNLOAD:
# Download .zip
download_output = subprocess.run(['composer', 'update', 'tronovav/geoip2-update'],
capture_output=True,
shell=True,
cwd='./geoip2-update')
print(download_output)
# TODO: Rename .zip to GeoLite2-City-CSV.zip
# Convert format
if START_CONVERT:
# python geolite2legacy.py -i GeoLite2-City-CSV.zip -o GeoLiteCity.dat -f geoname2fips.csv
downloaded_zip_asbpath = CURRENT_DIR.joinpath(ZIP_NAME)
print(downloaded_zip_asbpath)
update_output = subprocess.run(['python', 'geolite2legacy.py',
'-i', ZIP_ABSPATH,
'-o', DAT_ABSPATH,
'-f', 'geoname2fips.csv'],
cwd='./geolite2legacy')
print(update_output)
print(DOWNLOAD_ABSPATH)
| 28.288462
| 94
| 0.626105
|
12255ec753630cb2efaffc1b52d2cdd4f4c59fd3
| 5,430
|
py
|
Python
|
pyiron_base/project/store.py
|
pmrv/pyiron_base
|
af1729708a8226575ca2c84f574e7cb046b7f7cd
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron_base/project/store.py
|
pmrv/pyiron_base
|
af1729708a8226575ca2c84f574e7cb046b7f7cd
|
[
"BSD-3-Clause"
] | null | null | null |
pyiron_base/project/store.py
|
pmrv/pyiron_base
|
af1729708a8226575ca2c84f574e7cb046b7f7cd
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import print_function
from datetime import datetime
from pyiron_base.job.generic import GenericJob
"""
Class for storing user aggregated information in an pyiron object
"""
__author__ = "Jan Janssen"
__copyright__ = (
"Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
"Computational Materials Design (CM) Department"
)
__version__ = "1.0"
__maintainer__ = "Jan Janssen"
__email__ = "janssen@mpie.de"
__status__ = "testing"
__date__ = "Sep 1, 2017"
class ProjectStore(GenericJob):
"""
The ProjectStore object, is derived from the GenericJob class and allows the user to store
aggregated information in an HDF5 file associated with the corresponding project. To the user
the ProjectStore object behaves like a dictionary.
Args:
project: Project object (defines path where job will be created and stored)
job_name: name of the job (must be unique within this project path)
Attributes:
.. attribute:: key
keys of the ProjectStore object (like a dictionary)
.. attribute:: items
items of the ProjectStore object (like a dictionary)
.. attribute:: values
values of the ProjectStore object (like a dictionary)
.. attribute:: time_created
date when the ProjecStore object was created
.. attribute:: time_modified
date when the ProjecStore object was modified
"""
def __init__(self, project, job_name):
super(ProjectStore, self).__init__(project, job_name)
self.__version__ = "0.1"
self.__name__ = "ProjectStore"
self._lib = {"available": True, "enabled": True}
self._store = {}
@property
def keys(self):
"""
a set-like object providing a view on ProjectStore's dictionary keys
"""
return self._store.keys()
@property
def items(self):
"""
a set-like object providing a view on ProjectStore's dictionary items
"""
return self._store.items()
@property
def values(self):
"""
an object providing a view on ProjectStore's dictionary values
"""
return self._store.values()
@property
def time_created(self):
"""
Return the date when the ProjectStore object was created
Returns:
DateTime: the date when the ProjectStore object was created
"""
if self.job_id:
return self.project.db.get_item_by_id(self._job_id)["timestart"]
return None
@property
def time_modified(self):
"""
Return the date when the ProjectStore object was modified
Returns:
DateTime: the date when the ProjectStore object was modified
"""
if self.job_id:
return self.project.db.get_item_by_id(self._job_id)["timestop"]
return None
def run_if_lib(self):
"""
Internal function to handle jobs with Python based executables
Returns:
int: Database ID of the ProjectStore object
"""
job_id = self.save()
self._write_to_database()
return job_id
def from_hdf(self, hdf=None, group_name=None):
"""
Restore object from hdf5 format
Args:
hdf: Optional hdf5 file, otherwise self is used.
group_name (str): Optional hdf5 group in the hdf5 file.
"""
super(ProjectStore, self).from_hdf(hdf=hdf, group_name=group_name)
for node in self.project_hdf5.list_nodes():
value = self.project_hdf5[node]
self._store[node] = value
self.__setattr__(node, value)
def __setitem__(self, key, value):
"""
Store values in the ProjectStore
Args:
key (str): Key for the dictionary
value: corresponding values
"""
self._hdf5[key] = value
self._store[key] = value
self.__setattr__(key, value)
self.run()
def _run_if_finished(self, delete_existing_job=False, run_again=True):
"""
Internal function overwriting the default behaviour when the job is finished,
to update the database entry when the job was modified
Args:
delete_existing_job (bool): not used for this job type
run_again (bool): not used for this job type
"""
self._write_to_database()
def _write_to_database(self):
"""
If a job_id exists update the timestop entry in the database, to validate when this object was updated.
"""
if self.job_id:
self.project.db.item_update({"timestop": datetime.now()}, self._job_id)
def write_input(self):
"""
Implement required function template - even though it is not required for this job type.
"""
pass
def collect_output(self):
"""
Implement required function template - even though it is not required for this job type.
"""
pass
def collect_logfiles(self):
"""
Implement required function template - even though it is not required for this job type.
"""
pass
| 29.193548
| 111
| 0.628545
|
e0f463df9b204df35b819405cd3a9398947c9a40
| 2,847
|
py
|
Python
|
syn/python/get_kge.py
|
mundaym/ibex
|
71a8763553a25b90394de92f3f97e56b1009030b
|
[
"Apache-2.0"
] | 1,375
|
2019-11-05T15:11:00.000Z
|
2022-03-28T17:50:43.000Z
|
syn/python/get_kge.py
|
mundaym/ibex
|
71a8763553a25b90394de92f3f97e56b1009030b
|
[
"Apache-2.0"
] | 7,045
|
2019-11-05T16:05:45.000Z
|
2022-03-31T23:08:08.000Z
|
syn/python/get_kge.py
|
mundaym/ibex
|
71a8763553a25b90394de92f3f97e56b1009030b
|
[
"Apache-2.0"
] | 428
|
2019-11-05T15:00:20.000Z
|
2022-03-28T15:34:57.000Z
|
#!/usr/bin/python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# Parse a yosys area report and give a kGE equivalient
import argparse
def read_lib(lib_file_path, ref_cell):
with open(lib_file_path, 'r') as f:
lib_file = f.readlines()
cell_dict = {}
weighted_dict = {}
cell_name = None
for line_idx, line in enumerate(lib_file):
if line.startswith(' cell ('):
if cell_name is not None:
raise RuntimeError('{}:{} Found cell while searching for area'
.format(lib_file_path, line_idx + 1))
cell_name = line.split()[1].strip('()')
elif line.startswith('\tarea'):
if cell_name is None:
raise RuntimeError('{}:{} Found area while searching for cell'
.format(lib_file_path, line_idx + 1))
try:
cell_area = line.split()[2].strip(';')
cell_dict[cell_name] = float(cell_area)
cell_name = None
except (IndexError, ValueError):
raise RuntimeError('{}:{} Area declaration misformatted'
.format(lib_file_path, line_idx + 1))
if ref_cell not in cell_dict:
raise RuntimeError('Specified reference cell: {} was not found in '
'library: {}' .format(ref_cell, lib_file_path))
for cell in cell_dict:
weighted_dict[cell] = cell_dict[cell] / cell_dict[ref_cell]
return weighted_dict
def get_kge(report_path, weighted_dict):
with open(report_path, 'r') as f:
report = f.readlines()
ge = 0.0
for line_idx, line in enumerate(report):
data = line.split()
if not data:
continue
weight = weighted_dict.get(data[0])
if weight is not None:
try:
ge += float(data[1]) * weight
except (IndexError, ValueError):
raise RuntimeError('{}:{} Cell {} matched but was misformatted'
.format(report_path, line_idx + 1, data[0]))
print("Area in kGE = ", round(ge/1000, 2))
def main():
arg_parser = argparse.ArgumentParser(
description="""Calculate kGE from a Yosys report and LIB file""")
arg_parser.add_argument('lib_file_path', help='Path to the LIB file')
arg_parser.add_argument('report_path', help='Path to the report')
arg_parser.add_argument('--cell', help='Reference cell (default:NAND2_X1)',
default='NAND2_X1')
args = arg_parser.parse_args()
weighted_dict = read_lib(args.lib_file_path, args.cell)
get_kge(args.report_path, weighted_dict)
if __name__ == "__main__":
main()
| 35.148148
| 79
| 0.588339
|
dbdb41fe69ab9694fe573269ef0becb89599cac8
| 447
|
py
|
Python
|
env/lib/python3.8/site-packages/plotly/validators/contour/_uirevision.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 76
|
2020-07-06T14:44:05.000Z
|
2022-02-14T15:30:21.000Z
|
env/lib/python3.8/site-packages/plotly/validators/contour/_uirevision.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-08-09T02:30:14.000Z
|
2022-03-12T00:50:14.000Z
|
env/lib/python3.8/site-packages/plotly/validators/contour/_uirevision.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 11
|
2020-07-12T16:18:07.000Z
|
2022-02-05T16:48:35.000Z
|
import _plotly_utils.basevalidators
class UirevisionValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="uirevision", parent_name="contour", **kwargs):
super(UirevisionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
| 34.384615
| 82
| 0.657718
|
cf46f93f64f6e2585f2243569bd8db792ef764c9
| 552
|
py
|
Python
|
utils/builders/grpc_builders.py
|
bpuderer/python-test-env
|
ed5d1f1b977560cc9bc52952500b93060fc11e6a
|
[
"MIT"
] | null | null | null |
utils/builders/grpc_builders.py
|
bpuderer/python-test-env
|
ed5d1f1b977560cc9bc52952500b93060fc11e6a
|
[
"MIT"
] | 1
|
2021-03-31T18:50:05.000Z
|
2021-03-31T18:50:05.000Z
|
utils/builders/grpc_builders.py
|
bpuderer/python-test-env
|
ed5d1f1b977560cc9bc52952500b93060fc11e6a
|
[
"MIT"
] | null | null | null |
import json
from google.protobuf import json_format
from services.doubler.doubler_pb2 import Number
def build_request_from_dict(d, request):
json_str = json.dumps(d)
return json_format.Parse(json_str, request)
def build_request_from_file(filename, request):
with open(filename) as f:
json_str = f.read()
return json_format.Parse(json_str, request)
def build_number_from_dict(d):
return build_request_from_dict(d, Number())
def build_number_from_file(filename):
return build_request_from_file(filename, Number())
| 24
| 54
| 0.768116
|
81c202fb46304ee3e9ced64dbcc189af26ac908f
| 1,354
|
py
|
Python
|
ooobuild/dyn/view/x_control_access.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/view/x_control_access.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
ooobuild/dyn/view/x_control_access.py
|
Amourspirit/ooo_uno_tmpl
|
64e0c86fd68f24794acc22d63d8d32ae05dd12b8
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.view
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.view import XControlAccess as XControlAccess
setattr(XControlAccess, '__ooo_ns__', 'com.sun.star.view')
setattr(XControlAccess, '__ooo_full_ns__', 'com.sun.star.view.XControlAccess')
setattr(XControlAccess, '__ooo_type_name__', 'interface')
else:
from ...lo.view.x_control_access import XControlAccess as XControlAccess
__all__ = ['XControlAccess']
| 36.594595
| 82
| 0.767356
|
0473dbd355cf30969018b1156a37f9bcc5b61775
| 1,374
|
py
|
Python
|
dapr/actor/runtime/_method_context.py
|
mukundansundararajan/python-sdk
|
2ee94e31292a650135b97bc3c70e3eca885c9b47
|
[
"MIT"
] | null | null | null |
dapr/actor/runtime/_method_context.py
|
mukundansundararajan/python-sdk
|
2ee94e31292a650135b97bc3c70e3eca885c9b47
|
[
"MIT"
] | null | null | null |
dapr/actor/runtime/_method_context.py
|
mukundansundararajan/python-sdk
|
2ee94e31292a650135b97bc3c70e3eca885c9b47
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT License.
"""
from dapr.actor.runtime._call_type import ActorCallType
class ActorMethodContext:
"""A Actor method context that contains method information invoked
by :class:`ActorRuntime`.
"""
def __init__(self, method_name: str, call_type: ActorCallType):
self._method_name = method_name
self._calltype = call_type
@property
def method_name(self) -> str:
"""Gets the method name."""
return self._method_name
@property
def call_type(self) -> ActorCallType:
"""Gets :class:`ActorCallType` for this method."""
return self._calltype
@classmethod
def create_for_actor(cls, method_name: str):
"""Creates :class:`ActorMethodContext` object for actor method."""
return ActorMethodContext(method_name, ActorCallType.actor_interface_method)
@classmethod
def create_for_timer(cls, method_name: str):
"""Creates :class:`ActorMethodContext` object for timer_method."""
return ActorMethodContext(method_name, ActorCallType.timer_method)
@classmethod
def create_for_reminder(cls, method_name: str):
"""Creates :class:`ActorMethodContext` object for reminder_method."""
return ActorMethodContext(method_name, ActorCallType.reminder_method)
| 31.227273
| 84
| 0.700873
|
26b37c181e6812d0b4d916774768c68fe25e8e99
| 11,739
|
py
|
Python
|
src/plugin.py
|
urwrstkn8mare/galaxy-minecraft-integration
|
cc4448019b6a8713ae474b45b554172b17d4776b
|
[
"MIT"
] | 17
|
2019-09-19T15:02:05.000Z
|
2022-01-14T16:33:02.000Z
|
src/plugin.py
|
urwrstkn8mare/galaxy-minecraft-integration
|
cc4448019b6a8713ae474b45b554172b17d4776b
|
[
"MIT"
] | 19
|
2019-09-05T12:27:18.000Z
|
2021-12-05T20:55:45.000Z
|
src/plugin.py
|
urwrstkn8mare/galaxy-minecraft-integration
|
cc4448019b6a8713ae474b45b554172b17d4776b
|
[
"MIT"
] | 8
|
2019-08-30T12:55:08.000Z
|
2021-12-18T10:10:07.000Z
|
import sys, asyncio, logging, urllib, os, json, pickle, webbrowser
from galaxy.api.plugin import (
Plugin,
LocalGame,
Authentication,
Game,
create_and_run_plugin,
GameTime,
)
from galaxy.api.consts import LocalGameState, LicenseType, OSCompatibility, Platform
from galaxy.api.types import LicenseInfo
from galaxy.api.errors import InvalidCredentials
from local import WindowsLocalClient, MacLocalClient
from consts import (
GameID,
MINECRAFT_DUNGEONS_INSTALL_URL,
MINECRAFT_MAC_INSTALL_URL,
MINECRAFT_WIN_INSTALL_URL,
GAME_NAMES,
IS_WINDOWS,
INSTALLED_FOLDER_PATH,
GAMES,
)
from utils import misc, time_tracker
from utils.decorators import double_click_effect
import multimc
from version import __version__
log = logging.getLogger(__name__)
class MinecraftPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Minecraft, __version__, reader, writer, token)
self.local_client = WindowsLocalClient() if IS_WINDOWS else MacLocalClient()
self.status = {
GameID.Minecraft: LocalGameState.None_,
GameID.MinecraftDungeons: LocalGameState.None_,
}
self.was_game_launched_task: asyncio.Task = None
self.update_task: asyncio.Task = None
self.check_sizes_task: asyncio.Task = None
self.owned = []
self.multimc: multimc.MultiMCClient = None
def _authenticate(self):
return Authentication("Minecraft_ID", "Minecraft Player")
async def authenticate(self, stored_credentials=None):
log.debug(f"stored_credentials: {stored_credentials}")
if stored_credentials is not None:
if "dummy" in stored_credentials:
raise InvalidCredentials
if misc.IS(["owned", "multimcpath"], IN=stored_credentials):
self.owned = json.loads(stored_credentials["owned"])
if stored_credentials["multimcpath"] != "null":
self.multimc = multimc.MultiMCClient(stored_credentials["multimcpath"])
return self._authenticate()
return misc.get_next_step("Select Owned Games", 715, 725, "page1")
async def pass_login_credentials(self, step, credentials, cookies):
def auth():
self.store_credentials(
{
"owned": json.dumps(self.owned),
"multimcpath": "null" if self.multimc is None else self.multimc.path,
}
)
return self._authenticate()
params = urllib.parse.parse_qs(
urllib.parse.urlsplit(credentials["end_uri"]).query, keep_blank_values=True
)
log.debug(f"Params: {params}")
if "open" in params:
webbrowser.open_new(params["open"][0])
if "install_mc" in params and params["install_mc"][0] == "true":
await self.install_game(GameID.Minecraft)
if "next" in params:
nxt = params["next"][0]
if nxt == "page2":
new_params = ""
if "path" in params:
raw_path = params["path"][0]
new_params = f"?path={urllib.parse.quote(raw_path)}"
for game_id in GAMES:
if game_id in params and params[game_id][0] == "on":
self.owned.append(game_id)
return misc.get_next_step(
"Set your MultiMC path", 505, 505, "page2", params=new_params
)
elif nxt == "page3":
if "path" in params:
raw_path = params["path"][0]
if raw_path != "":
path = os.path.expanduser(os.path.expandvars(os.path.abspath(raw_path)))
try:
self.multimc = multimc.MultiMCClient(path)
return misc.get_next_step(
"Finished", 410, 355, "page3", params="?multimc=true"
)
except multimc.PathNotExectuable:
return misc.get_next_step(
"Set your MultiMC path",
530,
505,
"page2",
params=f"?errored=true&path={urllib.parse.quote(raw_path)}",
)
return misc.get_next_step("Finished", 310, 310, "page3")
elif nxt == "close":
return auth()
log.warning("if you see this, something is wrong")
async def get_owned_games(self):
log.debug(f"self.owned: {self.owned}")
out = []
for game_id in self.owned:
out.append(
Game(game_id, GAME_NAMES[game_id], None, LicenseInfo(LicenseType.SinglePurchase))
)
return out
async def get_local_games(self):
local_games = []
for game_id in self.owned:
local_games.append(LocalGame(game_id, self.status[game_id]))
return local_games
async def get_os_compatibility(self, game_id, context):
# Assuming OS compatible with game and supported by this plugin.
if game_id == GameID.Minecraft:
return OSCompatibility.MacOS | OSCompatibility.Windows
elif game_id == GameID.MinecraftDungeons:
return OSCompatibility.Windows
async def get_local_size(self, game_id: str, context):
size = await misc.get_size_at_path(
self.local_client.find_launcher_path(game_id, folder=True)
)
if game_id == GameID.Minecraft and self._multimc_enabled():
if size is None:
size = 0
size += await misc.get_size_at_path(self.multimc.folder)
return size
async def install_game(self, game_id):
if game_id == GameID.Minecraft:
url = MINECRAFT_WIN_INSTALL_URL if IS_WINDOWS else MINECRAFT_MAC_INSTALL_URL
elif game_id == GameID.MinecraftDungeons:
url = MINECRAFT_DUNGEONS_INSTALL_URL
else:
log.warning(f"Uknown game_id to install: {game_id}")
return
installer_path = await misc.download(url)
log.info(f"Installing {game_id} by launching: {installer_path}")
misc.open_path(installer_path)
def _launch_multimc(self):
self.multimc.launch()
def _multimc_enabled(self):
return self.multimc is not None
@double_click_effect(timeout=0.5, effect="_launch_multimc", if_func="_multimc_enabled")
async def launch_game(self, game_id):
pth = self.local_client.find_launcher_path(game_id)
if game_id == GameID.Minecraft and pth is None and self._multimc_enabled():
log.info("Launching MultiMC")
self.multimc.launch()
else:
self.local_client.launch(game_id)
async def uninstall_game(self, game_id):
log.info(f"Uninstalling {game_id}")
self.local_client.uninstall(game_id)
async def _update(self):
def update(game_id, status: LocalGameState):
if self.status[game_id] != status:
self.status[game_id] = status
self.update_local_game_status(LocalGame(game_id, status))
log.info(f"Updated {game_id} to {status}")
return True
return False
for game_id in self.owned:
is_installed = self.local_client.find_launcher_path(game_id) is not None
if game_id == GameID.Minecraft and self._multimc_enabled() and self.multimc.running():
update(game_id, LocalGameState.Installed | LocalGameState.Running)
elif self.local_client.is_game_still_running(game_id):
if update(game_id, LocalGameState.Installed | LocalGameState.Running):
log.info(f"Starting to track {game_id}")
self.game_time_tracker.start_tracking_game(game_id)
elif game_id == GameID.Minecraft and self._multimc_enabled():
update(game_id, LocalGameState.Installed)
elif is_installed:
if update(game_id, LocalGameState.Installed):
if game_id in self.game_time_tracker.get_tracking_games():
self.game_time_tracker.stop_tracking_game(game_id)
log.debug(f"Stopped tracking time for {game_id}")
else:
update(game_id, LocalGameState.None_)
await asyncio.sleep(0)
def tick(self):
if self.update_task is None or self.update_task.done():
self.update_task = self.create_task(self._update(), "Update Task")
# Time Tracker
async def get_game_time(self, game_id, context):
try:
tracked_time = self.game_time_tracker.get_tracked_time(game_id)
except time_tracker.GameNotTrackedException:
tracked_time = GameTime(game_id, 0, None)
if self._multimc_enabled() and game_id == GameID.Minecraft:
multimc_time = self.multimc.get_time()
else:
multimc_time = GameTime(game_id, 0, None)
time = tracked_time.time_played + multimc_time.time_played
lastPlayed = misc.compare(tracked_time.last_played_time, multimc_time.last_played_time)
log.debug(f"Got game time: {time}")
return GameTime(game_id, time, lastPlayed)
def handshake_complete(self):
self.play_time_cache_path = os.path.join(
INSTALLED_FOLDER_PATH, "minecraft_play_time_cache.txt"
)
log.debug(f"Local Play Time Cache Path: {self.play_time_cache_path}")
if "game_time_cache" in self.persistent_cache:
self.game_time_cache = pickle.loads(
bytes.fromhex(self.persistent_cache["game_time_cache"])
)
else:
try:
with open(self.play_time_cache_path, "r") as file:
for line in file.readlines():
if line[:1] != "#":
self.game_time_cache = pickle.loads(bytes.fromhex(line))
break
except FileNotFoundError:
self.game_time_cache = None
self.game_time_tracker = time_tracker.TimeTracker(game_time_cache=self.game_time_cache)
async def shutdown(self):
for game_id in self.game_time_tracker.get_tracking_games():
self.game_time_tracker.stop_tracking_game(game_id)
if self.game_time_cache is not None:
with open(self.play_time_cache_path, "w+") as file:
file.write("# DO NOT EDIT THIS FILE\n")
file.write(self.game_time_tracker.get_time_cache_hex())
log.info("Wrote to local file cache")
await super().shutdown()
def game_times_import_complete(self):
if len(self.game_time_tracker.get_tracking_games()) > 0:
log.debug("Game time still being tracked. Not setting cache yet.")
else:
self.game_time_cache = self.game_time_tracker.get_time_cache()
log.debug(f"game_time_cache: {self.game_time_cache}")
self.persistent_cache["game_time_cache"] = self.game_time_tracker.get_time_cache_hex()
self.push_cache()
def main():
create_and_run_plugin(MinecraftPlugin, sys.argv)
if __name__ == "__main__":
main()
| 42.532609
| 99
| 0.593066
|
cd3c7a3a77c6712ce77ca59b7d2f340ce9405979
| 8,851
|
py
|
Python
|
tests/contrib/sensors/test_hdfs_sensor.py
|
RyanMagnusson/incubator-airflow
|
ad81412fba2e8510442db73d9c905cac5eed8ebd
|
[
"Apache-2.0"
] | 1
|
2021-05-07T17:12:35.000Z
|
2021-05-07T17:12:35.000Z
|
tests/contrib/sensors/test_hdfs_sensor.py
|
RyanMagnusson/incubator-airflow
|
ad81412fba2e8510442db73d9c905cac5eed8ebd
|
[
"Apache-2.0"
] | null | null | null |
tests/contrib/sensors/test_hdfs_sensor.py
|
RyanMagnusson/incubator-airflow
|
ad81412fba2e8510442db73d9c905cac5eed8ebd
|
[
"Apache-2.0"
] | 1
|
2021-05-07T17:12:37.000Z
|
2021-05-07T17:12:37.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
import re
from datetime import timedelta
from airflow.contrib.sensors.hdfs_sensor import HdfsSensorFolder, HdfsSensorRegex
from airflow.exceptions import AirflowSensorTimeout
class HdfsSensorFolderTests(unittest.TestCase):
def setUp(self):
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
def test_should_be_empty_directory(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory',
filepath='/datadirectory/empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_be_empty_directory_fail(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/not_empty_directory',
be_empty=True,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_be_a_non_empty_directory(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_non_empty_directory',
filepath='/datadirectory/not_empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_be_non_empty_directory_fail(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
task = HdfsSensorFolder(task_id='Should_be_empty_directory_fail',
filepath='/datadirectory/empty_directory',
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
class HdfsSensorRegexTests(unittest.TestCase):
def setUp(self):
from tests.core import FakeHDFSHook
self.hook = FakeHDFSHook
self.log = logging.getLogger()
self.log.setLevel(logging.DEBUG)
def test_should_match_regex(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_not_match_regex(self):
"""
test the empty directory behaviour
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("^IDoNotExist")
task = HdfsSensorRegex(task_id='Should_not_match_the_regex',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_and_filesize(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_and_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignore_copying=True,
ignored_ext=['_COPYING_', 'sftp'],
file_size=10,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
task.execute(None)
# Then
# Nothing happens, nothing is raised exec is ok
def test_should_match_regex_but_filesize(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("test[1-2]file")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
def test_should_match_regex_but_copyingext(self):
"""
test the file size behaviour with regex
:return:
"""
# Given
self.log.debug('#' * 10)
self.log.debug('Running %s', self._testMethodName)
self.log.debug('#' * 10)
compiled_regex = re.compile("copying_file_\d+.txt")
task = HdfsSensorRegex(task_id='Should_match_the_regex_but_filesize',
filepath='/datadirectory/regex_dir',
regex=compiled_regex,
ignored_ext=['_COPYING_', 'sftp'],
file_size=20,
timeout=1,
retry_delay=timedelta(seconds=1),
poke_interval=1,
hook=self.hook)
# When
# Then
with self.assertRaises(AirflowSensorTimeout):
task.execute(None)
| 35.546185
| 81
| 0.516552
|
c427c62662daa151124b09abf6b4db8ba9afaf3a
| 3,347
|
py
|
Python
|
image_proc/launch/image_proc.launch.py
|
allenh1/image_pipeline
|
ab7f8ec264c5c8e7240d599fbacc44b6a7af0450
|
[
"Apache-2.0"
] | 1
|
2021-03-22T15:35:15.000Z
|
2021-03-22T15:35:15.000Z
|
image_proc/launch/image_proc.launch.py
|
allenh1/image_pipeline
|
ab7f8ec264c5c8e7240d599fbacc44b6a7af0450
|
[
"Apache-2.0"
] | null | null | null |
image_proc/launch/image_proc.launch.py
|
allenh1/image_pipeline
|
ab7f8ec264c5c8e7240d599fbacc44b6a7af0450
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020, Open Source Robotics Foundation, Inc.
# All rights reserved.
#
# Software License Agreement (BSD License 2.0)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from launch import LaunchDescription
from launch_ros import actions
from launch_ros.descriptions import ComposableNode
def generate_launch_description():
ld = LaunchDescription()
# Load composable container
image_processing = actions.ComposableNodeContainer(
node_name='image_proc_container',
package='rclcpp_components',
node_executable='component_container',
composable_node_descriptions=[
ComposableNode(
package='image_proc',
node_plugin='image_proc::DebayerNode',
node_name='debayer_node',
),
# Example of rectifying an image
ComposableNode(
package='image_proc',
node_plugin='image_proc::RectifyNode',
node_name='rectify_mono_node',
# Remap subscribers and publishers
remappings=[
# Subscriber remap
('image', 'image_mono'),
('camera_info', 'camera_info'),
('image_rect', 'image_rect')
],
),
# Example of rectifying an image
ComposableNode(
package='image_proc',
node_plugin='image_proc::RectifyNode',
node_name='rectify_color_node',
# Remap subscribers and publishers
remappings=[
# Subscriber remap
('image', 'image_color'),
# Publisher remap
('image_rect', 'image_rect_color')
],
)],
output='screen'
)
ld.add_action(image_processing)
return ld
| 39.376471
| 70
| 0.653421
|
3242b2d491a50281dba26151f677fc40922722f1
| 12,907
|
py
|
Python
|
oldversion/test/experiment.py
|
ne1199/RobustAutoencoder
|
2d32750fa08a88fe19ecb3ede6d76e50ecceb91d
|
[
"MIT"
] | 3
|
2021-07-08T03:30:33.000Z
|
2022-02-10T14:12:22.000Z
|
oldversion/test/experiment.py
|
yli96/RobustAutoencoder
|
01de32f5d670280ca8f8f9a6f6d704930fc266b4
|
[
"MIT"
] | null | null | null |
oldversion/test/experiment.py
|
yli96/RobustAutoencoder
|
01de32f5d670280ca8f8f9a6f6d704930fc266b4
|
[
"MIT"
] | 1
|
2021-07-09T07:54:03.000Z
|
2021-07-09T07:54:03.000Z
|
import PIL.Image as Image
import ImShow as I
import numpy as np
import tensorflow as tf
import DAE_tensorflow as DAE
import RDAE_tensorflow as RDAE
import l21RobustAutoencoder as l21RDAE
import os
def tune_RDAE(x, lam_list = [50], learn_rates = [0.1], inner = 50, outter = 5):
sess = tf.Session()
sparsities = []
error = []
for lam in lam_list:
for rate in learn_rates:
rae = RDAE.RDAE(sess = sess, lambda_ = lam, layers_sizes =[784,400,225,100,50])
L, S = rae.fit(x ,sess = sess, learning_rate = rate, batch_size = 130, inner_iteration = inner, iteration=outter, verbose=True)
recon_rae = rae.getRecon(x, sess = sess)
error.append([lam,np.sum(X-L-S)])
sparsities.append([lam,Counter(S.reshape(S.size))[0]])
## save the training error
#error_file_name = "lam"+str(lam)+"_rate"+str(rate)+"_inner"+str(inner)+"_outter"+str(outter)+".pkl"
#np.array(rae.errors).dump(error_file_name)
sess.close()
return error,sparsities
def tune_l21RDAE(x, lam_list = [50], learn_rates = [0.1], inner = 150, outter = 10, batch_size=133):
#x = np.load(r"/home/czhou2/Documents/train_x_small.pkl")
with tf.Session() as sess:
rae = l21RDAE.RobustL21Autoencoder(sess = sess, lambda_= lam_list[0], layers_sizes=[784,400,255,100])
L, S = rae.fit(x, sess = sess, inner_iteration = inner, iteration = outter, batch_size = batch_size, verbose = True)
ae = Deep_Autoencoder(sess = sess, input_dim_list=[784,400,225,100])
error = ae.fit(x ,sess = sess, learning_rate = learn_rates[0], iteration = inner * outer, batch_size = batch_size ,verbose=True)
return rae.errors,error
def compare_RDAE_DAE_l21RDAE(X, layers, lamda, folder, learning_rate = 0.15, inner = 100, outer = 10, batch_size = 133,inputsize = (28,28)):
if not os.path.isdir(folder):
os.makedirs(folder)
os.chdir(folder)
with tf.Graph().as_default():
with tf.Session() as sess:
ae = DAE.Deep_Autoencoder(sess = sess, input_dim_list = layers)
error = ae.fit(X = X ,sess = sess, learning_rate = learning_rate, iteration = inner * outer, batch_size = batch_size, verbose=True)
dR = ae.getRecon(X = X, sess = sess)
dH = ae.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=dR,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"dR.png")
dH.dump("dH.pkl")
np.array(error).dump(r"DAEerror.pkl")
with tf.Graph().as_default():
with tf.Session() as sess:
rael21 = l21RDAE.RobustL21Autoencoder(sess = sess, lambda_= lamda*X.shape[0], layers_sizes=layers)
l21L, l21S = rael21.fit(X = X, sess = sess, inner_iteration = inner, iteration = outer, batch_size = batch_size, learning_rate = learning_rate, verbose = True)
l21R = rael21.getRecon(X = X, sess = sess)
l21H = rael21.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=l21S,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21S.png")
Image.fromarray(I.tile_raster_images(X=l21R,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21R.png")
Image.fromarray(I.tile_raster_images(X=l21L,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21L.png")
l21H.dump(r"l21H.pkl")
np.array(rael21.errors).dump(r"l21error.pkl")
l21S.dump("l21S.pkl")
with tf.Graph().as_default():
with tf.Session() as sess:
rae = RDAE.RDAE(sess = sess, lambda_ = lamda * 10, layers_sizes = layers)
rL, rS = rae.fit(X = X ,sess = sess, learning_rate = learning_rate, batch_size = batch_size, inner_iteration = inner, iteration = outer, verbose=True)
rR = rae.getRecon(X, sess)
rH = rae.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=rR,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"rR.png")
Image.fromarray(I.tile_raster_images(X=rS,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"rS.png")
Image.fromarray(I.tile_raster_images(X=rL,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"rL.png")
rH.dump(r"rH.pkl")
np.array(rae.errors).dump(r"RDAEerror.pkl")
rS.dump("rS.pkl")
os.chdir("../")
def RDAE_DAE_l21RDAE_compressUser(X, layers, lamda, folder, learning_rate = 0.15, inner = 100, outer = 10, batch_size = 133,inputsize = (28,28)):
if not os.path.isdir(folder):
os.makedirs(folder)
os.chdir(folder)
with tf.Graph().as_default():
with tf.Session() as sess:
rael21 = l21RDAE.RobustL21Autoencoder(sess = sess, lambda_= lamda*X.shape[0], layers_sizes=layers)
l21L, l21S = rael21.fit(X = X, sess = sess, inner_iteration = inner, iteration = outer, batch_size = batch_size, learning_rate = learning_rate, verbose = True)
l21R = rael21.getRecon(X = X, sess = sess)
l21H = rael21.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=l21S.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21S.png")
Image.fromarray(I.tile_raster_images(X=l21R.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21R.png")
Image.fromarray(I.tile_raster_images(X=l21L.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21L.png")
l21H.dump(r"l21H.pkl")
np.array(rael21.errors).dump(r"l21error.pkl")
l21S.dump("l21S.pkl")
with tf.Graph().as_default():
with tf.Session() as sess:
rae = RDAE.RDAE(sess = sess, lambda_ = lamda * 10, layers_sizes = layers)
rL, rS = rae.fit(X = X ,sess = sess, learning_rate = learning_rate, batch_size = batch_size, inner_iteration = inner, iteration = outer, verbose=True)
rR = rae.getRecon(X, sess)
rH = rae.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=rR.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"rR.png")
Image.fromarray(I.tile_raster_images(X=rS.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"rS.png")
Image.fromarray(I.tile_raster_images(X=rL.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"rL.png")
rH.dump(r"rH.pkl")
np.array(rae.errors).dump(r"RDAEerror.pkl")
rS.dump("rS.pkl")
os.chdir("../")
def l21RDAE_compressUser(X, layers, lamda, folder, learning_rate = 0.15, inner = 100, outer = 10, batch_size = 133,inputsize = (28,28)):
if not os.path.isdir(folder):
os.makedirs(folder)
os.chdir(folder)
with tf.Graph().as_default():
with tf.Session() as sess:
rael21 = l21RDAE.RobustL21Autoencoder(sess = sess, lambda_= lamda*X.shape[0], layers_sizes=layers)
l21L, l21S = rael21.fit(X = X, sess = sess, inner_iteration = inner, iteration = outer, batch_size = batch_size, learning_rate = learning_rate, verbose = True)
l21R = rael21.getRecon(X = X, sess = sess)
l21H = rael21.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=l21S.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21S.png")
Image.fromarray(I.tile_raster_images(X=l21R.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21R.png")
Image.fromarray(I.tile_raster_images(X=l21L.T,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21L.png")
l21H.dump(r"l21H.pkl")
np.array(rael21.errors).dump(r"l21error.pkl")
l21S.dump("l21S.pkl")
os.chdir("../")
def l21RDAE_compressFeature(X, layers, lamda, folder, learning_rate = 0.15, inner = 100, outer = 10, batch_size = 133,inputsize = (28,28)):
if not os.path.isdir(folder):
os.makedirs(folder)
os.chdir(folder)
with tf.Graph().as_default():
with tf.Session() as sess:
rael21 = l21RDAE.RobustL21Autoencoder(sess = sess, lambda_= lamda*X.shape[0], layers_sizes=layers)
l21L, l21S = rael21.fit(X = X, sess = sess, inner_iteration = inner, iteration = outer, batch_size = batch_size, learning_rate = learning_rate, verbose = True)
l21R = rael21.getRecon(X = X, sess = sess)
l21H = rael21.transform(X, sess)
Image.fromarray(I.tile_raster_images(X=l21S,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21S.png")
Image.fromarray(I.tile_raster_images(X=l21R,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21R.png")
Image.fromarray(I.tile_raster_images(X=l21L,img_shape=inputsize, tile_shape=(10, 10),tile_spacing=(1, 1))).save(r"l21L.png")
l21H.dump(r"l21H.pkl")
np.array(rael21.errors).dump(r"l21error.pkl")
l21S.dump("l21S.pkl")
os.chdir("../")
def onePixel_uniformNoise(data, corNum=10):
corruption_index = map(np.int,np.floor(np.random.random_sample(size = corNum)*data.shape[1]))
for i in range(data.shape[0]):
for j in corruption_index:
#corrupted[i,j] = corrupted[i,j] + np.random.normal(loc=0.5, scale=0.25)
data[i,j] = np.random.uniform()
return data
def onePixel_fixedNoise(data, corNum=10):
corruption_index = map(np.int,np.floor(np.random.random_sample(size = corNum)*data.shape[1]))
for j in corruption_index:
corruption_amplitude = np.random.random()
data[:,j] = corruption_amplitude
return data
def onePixel_GaussianNoise(data, corNum=10):
corruption_index = map(np.int,np.floor(np.random.random_sample(size = corNum)*data.shape[1]))
for i in range(data.shape[0]):
for j in corruption_index:
#corrupted[i,j] = corrupted[i,j] + np.random.normal(loc=0.5, scale=0.25)
data[i,j] = np.random.normal(loc=0, scale=1)
return data
def compare_frame():
#X = np.load(r"/home/czhou2/Documents/mnist_noise_variations_all_1_data_small.pkl")
#X = np.load(r"/home/czhou2/Documents/train_x_small.pkl")
#X = np.load(r"/home/zc8304/Documents/Two_with7_p5.pkl") ## on jetstream
#X = np.load(r"/home/czhou2/Documents/Two_with7_p5.pkl")
#X = np.load(r"/home/czhou2/Documents/Zero_with1_p5.pkl")
#X = np.load(r"/home/czhou2/Documents/Two_with7_p5.pkl")
#X = np.load(r"/home/zc8304/Documents/ZeroOne_with7_p5.pkl")
X = np.load(r"/home/czhou2/Documents/ZeroOne_with7_p5.pkl")
#X = np.load(r"/home/zc8304/Documents/Two_with0_noise_p10.pkl") ## jetstream
#X = np.load(r"/home/czhou2/Documents/Two_with0_noise_p10.pkl") ## stormbringer
inner = 100
outer = 8
#lamda_list = [0.1,1.,5.,10.,15.,20.,25.,50.,70.,100.]
#layers_list =[[784,625,400,225],[784,400,225],[784,625,225],[784,625,400]]
#lamda_list = np.arange(0.0001,0.0015,0.00005)
#lamda_list = [0.0015,0.003,0.005,0.008,0.01,0.03,0.05,0.08,0.1,0.2,0.3,0.5]
#lamda_list = np.arange(0.03,0.08,0.003) ## detailed search for 2 with 0 noise
lamda_list = np.arange(0.0002,0.0004,0.00003) ## detailed search for 2 with 0 noise
#lamda_list = [0.0001,0.0005,0.001,0.005,0.01,0.05,0.1,0.5,1.,5]
# error,sp = tune_RDAE(x = X, lam_list = lam_list, learn_rates = learn_rates, inner = inner, outter = outter, batch_size )
# np.array(error).dump(r"diff_X_L_S.pkl")
# np.array(sp).dump(r"sparsities")
#error,sp = tune_l21RDAE(x = X, lam_list = lam_list, learn_rates = learn_rates, inner = inner, outter = outter)
layers = [784, 600, 400] ## S trans
#layers = [5215,1000,400] ## for 0 and 1
#layers = [5216,1000,400] ## for 2 and 7
#layers = [10868,4000,2000] ## for 01 and 7
#layers = [5460,2000,1000] ## for 2 with 0 noise
image_X = Image.fromarray(I.tile_raster_images(X = X, img_shape = (28,28), tile_shape=(10, 10),tile_spacing=(1, 1)))
image_X.save(r"X.png")
for lam in lamda_list:
folder = "lam" + str(lam)
#compare_RDAE_DAE_l21RDAE(X = X, layers=layers, lamda = lam, folder = folder, learning_rate = 0.05, inner = inner, outer = outer, batch_size = 133,inputsize = (28,28))
#RDAE_DAE_l21RDAE_compressUser(X = X.T, layers=layers, lamda = lam, folder = folder, learning_rate = 0.05, inner = inner, outer = outer, batch_size = 133,inputsize = (28,28))
#l21RDAE_compressUser(X = X.T, layers=layers, lamda = lam, folder = folder, learning_rate = 0.05, inner = inner, outer = outer, batch_size = 133,inputsize = (28,28))
l21RDAE_compressFeature(X = X, layers=layers, lamda = lam, folder = folder, learning_rate = 0.05, inner = inner, outer = outer, batch_size = 133,inputsize = (28,28))
if __name__ == "__main__":
compare_frame()
| 58.402715
| 182
| 0.64802
|
c5423e2bb2bd5c9fea16e6ed08fe47b20e867597
| 773
|
py
|
Python
|
test/test.py
|
ytyaru/Python.Mastodon.Api.Toot.20210813105938
|
18b46d9143f6d2a934e2fe7cd69b847e62399e67
|
[
"CC0-1.0"
] | null | null | null |
test/test.py
|
ytyaru/Python.Mastodon.Api.Toot.20210813105938
|
18b46d9143f6d2a934e2fe7cd69b847e62399e67
|
[
"CC0-1.0"
] | null | null | null |
test/test.py
|
ytyaru/Python.Mastodon.Api.Toot.20210813105938
|
18b46d9143f6d2a934e2fe7cd69b847e62399e67
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
# coding: utf8
#import requests
#import os, sys, argparse, json, urllib.parse, datetime
#from string import Template
#from abc import ABCMeta, abstractmethod
#import mimetypes
import os, sys
# 親ディレクトリをパスに追加する
#sys.path.append(os.path.join(os.path.dirname(__file__), '../src'))
#from lib import exept_null, Path, FileReader, FileWriter, Authenticator, Api
#import media
#import unittest
from unittest import TestLoader
from unittest import TextTestRunner
#from test.support import captured_stdout
#from unittest.mock import MagicMock, patch, mock_open
#import copy
#from collections import namedtuple
#from string import Template
if __name__ == '__main__':
TextTestRunner().run(TestLoader().discover(os.path.abspath(os.path.dirname(__file__))))
| 30.92
| 91
| 0.78784
|
834b6ded71b3a92fa44e70ad8e85a01e4d353c2d
| 4,590
|
py
|
Python
|
libpermian/webui/server.py
|
velezd/permian
|
b52189f44c3112ad933a6b1e303a6b30c272651a
|
[
"MIT"
] | null | null | null |
libpermian/webui/server.py
|
velezd/permian
|
b52189f44c3112ad933a6b1e303a6b30c272651a
|
[
"MIT"
] | 9
|
2022-02-07T14:14:10.000Z
|
2022-03-22T09:17:16.000Z
|
libpermian/webui/server.py
|
velezd/permian
|
b52189f44c3112ad933a6b1e303a6b30c272651a
|
[
"MIT"
] | 3
|
2022-01-20T09:17:39.000Z
|
2022-03-08T00:35:58.000Z
|
import uuid
from flask import Flask
import threading
import socket
import logging
import socket
from . import hooks
LOGGER = logging.getLogger(__name__)
class WebUI(threading.Thread):
"""
Web UI for the pipeline. This class is designed as container for the flask
app running in thread providing interface for the flask app instance and
the HTTP server used for the interface where one can obtain baseurl of the
server or wait for the server to start.
The class also work as bridge between the flask app and pipeline instance
associated to it using flask config to pass the pipeline instance, see
:py:func:`currentPipeline` and :py:func:`currentWebUI`.
:param pipeline: Pipeline instance for which the web UI should provide interface.
:type pipeline: libpermian.pipeline.Pipeline
"""
blueprints=[]
@classmethod
def registerBlueprint(cls, blueprint, url_prefix=None):
"""
Extend the webUI by providing flask Blueprint. There's no way to provide
url_prefix at this point and it should be set in the Blueprint itself.
The blueprints are registered to the app in the order they are
registered via this method starting with pipeline builtin blueprints
and then the plugin blueprints (in the order of plugins being loaded).
:param blueprint: Flask Blueprint instance extending the webUI flask app
:type blueprint: flask.Blueprint
"""
cls.blueprints.append((blueprint, url_prefix))
return blueprint
def __init__(self, pipeline):
super().__init__(daemon=True)
self.app = Flask(__name__)
for blueprint, url_prefix in self.blueprints:
self.app.register_blueprint(blueprint, url_prefix=url_prefix)
self.pipeline = pipeline
self.listen_ip = self.config('listen_ip')
self.port = None # delay obtaining the port until the very moment before the flask app is started to limit potential random port collision
self._operationalLock = threading.Lock()
self._operationalLock.acquire() # immediately acquire the lock as the webUI is not running yet
self.uuid = uuid.uuid4().hex
self.app.config.update(
ENV='embedded',
pipeline=self.pipeline,
webUI=self,
)
def run(self):
"""
The function that's executed in the thread. Call the
:py:meth:`~WebUI.start` method instead of this one.
For more info see python :py:class:`threading.Thread`.
"""
self.port = get_port(self.config('listen_port'))
hooks.WebUI_starting(self)
self.app.run(self.listen_ip, self.port)
def config(self, option):
"""
"""
return self.pipeline.settings.get('WebUI', option)
@property
def baseurl(self):
"""
"""
return f'http://{get_ip()}:{self.port}/'
def waitUntilStarted(self):
"""
"""
# just wait for operational lock to be released
with self._operationalLock:
pass
def unlock(self):
"""
"""
LOGGER.debug(f'Unlocking webUI {self}')
hooks.WebUI_started(self)
self._operationalLock.release()
def get_port(port_spec):
"""
Provide port number based on the `port_spec` parameter. If the provided port
is string 'random' then random available port is returned.
In future this function could possibly also accept tuple (or range) as
`port_spec` choosing available port from the provided range.
:param port_spec:
:type port_spec: int or str
:return: port number
:rtype: int
"""
try: # try first to convert the string to number
port_spec = int(port_spec)
except ValueError:
pass
if isinstance(port_spec, int):
return port_spec
if port_spec == 'random':
return get_random_free_port()
raise ValueError(f'Unrecognized port value: {port_spec!r}')
def get_random_free_port():
"""
Finds free port, There is possibility for a race condition,
since another process may grab that port before it is used by Flask.
:return: Port number of random available (currently unused) port
:rtype: int
"""
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp.bind(('', 0))
port = tcp.getsockname()[1]
tcp.close()
return port
def get_ip():
"""
:return: IP address of the host which should be reachable from outside.
:rtype: str
"""
return socket.gethostbyname(socket.gethostname())
| 32.097902
| 146
| 0.661656
|
89a5a5c29549fabd6cbab7e450b3368c029a29a2
| 5,385
|
py
|
Python
|
sydent/http/servlets/__init__.py
|
eyetime-international-ltd/sydent
|
48501fbe473555163214c37b90b342a706bd53d7
|
[
"Apache-2.0"
] | null | null | null |
sydent/http/servlets/__init__.py
|
eyetime-international-ltd/sydent
|
48501fbe473555163214c37b90b342a706bd53d7
|
[
"Apache-2.0"
] | null | null | null |
sydent/http/servlets/__init__.py
|
eyetime-international-ltd/sydent
|
48501fbe473555163214c37b90b342a706bd53d7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2014 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import copy
import functools
from twisted.internet import defer
from twisted.web import server
logger = logging.getLogger(__name__)
class MatrixRestError(Exception):
"""
Handled by the jsonwrap wrapper. Any servlets that don't use this
wrapper should catch this exception themselves.
"""
def __init__(self, httpStatus, errcode, error):
super(Exception, self).__init__(error)
self.httpStatus = httpStatus
self.errcode = errcode
self.error = error
def get_args(request, required_args):
"""
Helper function to get arguments for an HTTP request.
Currently takes args from the top level keys of a json object or
www-form-urlencoded for backwards compatability on v1 endpoints only.
Returns a tuple (error, args) where if error is non-null,
the request is malformed. Otherwise, args contains the
parameters passed.
"""
v1_path = request.path.startswith('/_matrix/identity/api/v1')
args = None
# for v1 paths, only look for json args if content type is json
if (
request.method in ('POST', 'PUT') and (
not v1_path or (
request.requestHeaders.hasHeader('Content-Type') and
request.requestHeaders.getRawHeaders('Content-Type')[0].startswith('application/json')
)
)
):
try:
args = json.load(request.content)
except ValueError:
raise MatrixRestError(400, 'M_BAD_JSON', 'Malformed JSON')
# If we didn't get anything from that, and it's a v1 api path, try the request args
# (riot-web's usage of the ed25519 sign servlet currently involves
# sending the params in the query string with a json body of 'null')
if args is None and (v1_path or request.method == 'GET'):
args = copy.copy(request.args)
# Twisted supplies everything as an array because it's valid to
# supply the same params multiple times with www-form-urlencoded
# params. This make it incompatible with the json object though,
# so we need to convert one of them. Since this is the
# backwards-compat option, we convert this one.
for k, v in args.items():
if isinstance(v, list) and len(v) == 1:
args[k] = v[0]
elif args is None:
args = {}
missing = []
for a in required_args:
if a not in args:
missing.append(a)
if len(missing) > 0:
request.setResponseCode(400)
msg = "Missing parameters: "+(",".join(missing))
raise MatrixRestError(400, 'M_MISSING_PARAMS', msg)
return args
def jsonwrap(f):
@functools.wraps(f)
def inner(self, request, *args, **kwargs):
try:
request.setHeader("Content-Type", "application/json")
return json.dumps(f(self, request, *args, **kwargs)).encode("UTF-8")
except MatrixRestError as e:
request.setResponseCode(e.httpStatus)
return json.dumps({
"errcode": e.errcode,
"error": e.error,
})
except Exception:
logger.exception("Exception processing request");
request.setHeader("Content-Type", "application/json")
request.setResponseCode(500)
return json.dumps({
"errcode": "M_UNKNOWN",
"error": "Internal Server Error",
})
return inner
def deferjsonwrap(f):
def reqDone(resp, request):
request.setHeader("Content-Type", "application/json")
request.setResponseCode(200)
request.write(json.dumps(resp).encode("UTF-8"))
request.finish()
def reqErr(failure, request):
request.setHeader("Content-Type", "application/json")
if failure.check(MatrixRestError) is not None:
request.setResponseCode(failure.value.httpStatus)
request.write(json.dumps({'errcode': failure.value.errcode, 'error': failure.value.error}))
else:
logger.error("Request processing failed: %r, %s", failure, failure.getTraceback())
request.setResponseCode(500)
request.write(json.dumps({'errcode': 'M_UNKNOWN', 'error': 'Internal Server Error'}))
request.finish()
def inner(*args, **kwargs):
request = args[1]
d = defer.maybeDeferred(f, *args, **kwargs)
d.addCallback(reqDone, request)
d.addErrback(reqErr, request)
return server.NOT_DONE_YET
return inner
def send_cors(request):
request.setHeader("Access-Control-Allow-Origin", "*")
request.setHeader("Access-Control-Allow-Methods",
"GET, POST, PUT, DELETE, OPTIONS")
request.setHeader("Access-Control-Allow-Headers", "*")
| 36.14094
| 103
| 0.640483
|
a3be9afaae88f08bc9ca5765b7def6293f31cabe
| 20,394
|
py
|
Python
|
admin/polytope-admin/polytope_admin/api/Config.py
|
ecmwf-projects/polytope-server
|
135017717cf161d68875a55aa364ce311a1d2ea6
|
[
"Apache-2.0"
] | null | null | null |
admin/polytope-admin/polytope_admin/api/Config.py
|
ecmwf-projects/polytope-server
|
135017717cf161d68875a55aa364ce311a1d2ea6
|
[
"Apache-2.0"
] | 1
|
2022-01-28T11:39:25.000Z
|
2022-01-28T11:39:25.000Z
|
admin/polytope-admin/polytope_admin/api/Config.py
|
ecmwf-projects/polytope-server
|
135017717cf161d68875a55aa364ce311a1d2ea6
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2022 European Centre for Medium-Range Weather Forecasts (ECMWF)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
#
import getpass
import logging
import os
import pprint
import urllib.parse
from http.client import HTTPConnection
from pathlib import Path
import yaml
from . import helpers
class Config:
def __init__(
self,
config_path=None,
address=None,
port=None,
username=None,
key_path=None,
quiet=None,
verbose=None,
log_file=None,
log_level=None,
user_key=None,
user_email=None,
password=None,
insecure=None,
skip_tls=None,
logger=None,
cli=False,
):
# hard-coded defaults are not specified in the __init__ header
# so that session configuration specified in the headers is not
# directly merged into the hard-coded configuration before the file
# configuration is applied
# __init__ collects and merges client configuration from five sources,
# in the following order (subsequent layers override configuration
# already present in the previous layers):
# - hard-coded defaults
# - system-wide configuration file
# - configuration file
# - env variables
# - session ad hoc configuration
if logger:
self._logger = logger
else:
self._logger = logging.getLogger(__name__)
self._cli = cli
self.file_config_items = [
"address",
"port",
"username",
"key_path",
"quiet",
"verbose",
"log_file",
"log_level",
"user_key",
"user_email",
"password",
"insecure",
"skip_tls",
]
# Reading session configuration
config = locals()
config.pop("self")
config.pop("config_path")
config.pop("logger")
config.pop("cli")
self.session_config = helpers.process_config(config)
# Default polytope client environment configuration
config = {}
config["address"] = ""
config["port"] = None
config["username"] = getpass.getuser()
config["key_path"] = None # this default may be readjusted
# later according to the config_path
config["quiet"] = False
config["verbose"] = False
config["log_file"] = None
config["log_level"] = "DEBUG"
config["user_key"] = None
config["user_email"] = None
config["password"] = None
config["insecure"] = False
config["skip_tls"] = False
self.default_config = config
# Reading system-wide file configuration
system_config_path = Path("/etc/polytope-adm-client/config.yaml")
self.system_file_config = helpers.read_config(system_config_path)
# Reading file configuration
if not config_path:
config_path = os.environ.get("POLYTOPE_CONFIG_PATH")
if config_path:
config_path = Path(os.path.expanduser(config_path))
self.config_path = config_path
else:
self.config_path = Path.home() / ".polytope-adm-client"
self.default_config["key_path"] = str(self.config_path / "keys")
self.file_config = helpers.read_config(self.config_path)
# Reading env var configuration
def fun(x):
return "POLYTOPE_" + x.upper()
env_var_config = {}
for var in self.file_config_items:
val = os.environ.get(fun(var))
if val:
env_var_config[var] = val
self.env_var_config = env_var_config
self.update_loggers()
message = "Gathered Polytope client configuration:\n"
message += pprint.pformat(self.get())
self._logger.debug(message)
def get_url(self, endpoint, request_id=None, collection_id=None):
# This function generates an HTTP URL pointing to the API exposed by a
# Polytope frontend instance. The IP and port are read from the
# configuration file (after being set manually by the user, or using the
# 'polytope config' command), or set to the default 127.0.0.1:5678 if not
# configured.
config = self.get()
valid_endpoints = ["api_root", "auth", "users", "requests", "download", "upload", "ping", "collections"]
if endpoint not in valid_endpoints:
raise ValueError("Unrecognized frontend endpoint requested.")
if endpoint == "api_root":
suffix = ""
elif endpoint == "auth":
suffix = "/auth/keys"
elif endpoint == "users":
# the admin client performs all user management operations
# against /auth/users, whereas the normal client attacks
# /user
suffix = "/auth/users"
elif endpoint == "requests":
suffix = "/requests"
if request_id:
suffix += "/" + request_id
elif collection_id:
suffix += "/" + collection_id
elif endpoint == "collections":
suffix = "/collections"
elif endpoint == "download":
suffix = "/downloads"
if request_id:
suffix += "/" + request_id
elif endpoint == "upload":
suffix = "/requests"
if collection_id:
suffix += "/" + collection_id
elif request_id:
suffix = "/uploads/" + request_id
elif endpoint == "ping":
suffix = "/test"
else:
raise helpers.BugError
url = config["address"]
# Add default scheme 'https://' if it was not specified
if not url.startswith(("http://", "https://")) and "://" not in url:
url = "https://" + url
# Split URL and validate each component
parsed_url = urllib.parse.urlsplit(url)
scheme = parsed_url.scheme
if scheme not in ("http", "https"):
raise ValueError("Unrecognized URL scheme {}".format(scheme))
if scheme == "https" and config["insecure"]:
scheme = "http"
if scheme == "http" and not config["insecure"]:
raise ValueError("Cannot use insecure http protocol without insecure flag")
if not parsed_url.hostname:
raise ValueError("URL {} could not be parsed".format(url))
# Adopt the port from URL if it exists, else read config
port = parsed_url.port or config["port"]
if port is None:
port = 80 if scheme == "http" else 443
# Create the full path and reconstruct the URL
path = os.path.join(parsed_url.path + "/api/v1" + suffix)
url = urllib.parse.urlunsplit((scheme, parsed_url.hostname + ":" + str(port), path, None, None))
return url
def get(self):
# gathers, merges and returns all configuration, as detailed in the
# documentation of __init__
config = {}
config.update(self.default_config)
config.update(self.system_file_config)
config.update(self.file_config)
config.update(self.env_var_config)
config.update(self.session_config)
special_file_priority = ["quiet", "verbose", "insecure", "skip_tls"]
for item in special_file_priority:
if item in self.session_config and not self.session_config[item]:
if item in self.system_file_config:
config[item] = self.system_file_config[item]
if item in self.file_config:
config[item] = self.file_config[item]
booleans = ["quiet", "verbose", "insecure", "skip_tls"]
for item in booleans:
if isinstance(config[item], str):
config[item] = config[item].lower() in ["true", "1"]
return config
def update_loggers(self):
config_dict = self.get()
# polytope_client loggers
if config_dict["verbose"]:
stream_log_level = "DEBUG"
else:
stream_log_level = "INFO"
helpers.set_stream_handler(self._logger, config_dict["quiet"], stream_log_level)
helpers.set_file_handler(self._logger, config_dict["log_file"], config_dict["log_level"])
# requests loggers
# actually, the requests module doesn't implement logging loggers
# the urllib3 module does
# and the http.client just prints information out
logger = logging.getLogger("urllib3")
logger.setLevel(logging.DEBUG)
quiet = config_dict["quiet"]
log_file = config_dict["log_file"]
if not config_dict["verbose"]:
quiet = True
HTTPConnection.debuglevel = 0
else:
if not quiet:
HTTPConnection.debuglevel = 1
if config_dict["log_level"] != "DEBUG":
log_file = None
helpers.set_stream_handler(logger, quiet, "DEBUG")
helpers.set_file_handler(logger, log_file, "DEBUG")
def list(self):
"""
Show Polytope client configuration.
Lists the current Polytope client configuration, as stored in the
Polytope client configuration file ($HOME/.polytope-adm-client by
default).
For each configuration item, its name and value are shown. Also,
its origin is shown wrapped in brackets. The possible origins of
a configuration item can be 'default', 'system file', 'file',
'env var' or 'session'. See 'polytope set config' for more
information.
:returns: None
"""
config = {}
origins = {}
for k, v in self.default_config.items():
config[k] = v
origins[k] = "default"
for k, v in self.system_file_config.items():
config[k] = v
origins[k] = "system file"
for k, v in self.file_config.items():
config[k] = v
origins[k] = "file"
for k, v in self.env_var_config.items():
config[k] = v
origins[k] = "env var"
for k, v in self.session_config.items():
config[k] = v
origins[k] = "session"
special_file_priority = ["quiet", "verbose", "insecure", "skip_tls"]
for item in special_file_priority:
if item in self.session_config and not self.session_config[item]:
if item in self.system_file_config:
config[item] = self.system_file_config[item]
origins[item] = "system file"
if item in self.file_config:
config[item] = self.file_config[item]
origins[item] = "file"
message = "Found configuration items:\n"
for k in config:
print_value = config[k]
if k == "password" and config[k]:
print_value = "**hidden**"
message += " - ({}) {}: {}\n".format(origins[k], k, print_value)
if len(self.system_file_config) > 0:
message += "The system-wide configuration file " + "is /etc/polytope-adm-client/config.json\n"
message += "The configuration source directory used in this session " + "is " + str(self.config_path)
self._logger.info(message)
def set(self, key, value, persist=False):
"""
Configure the Polytope client.
Sets a Polytope client configuration item, such as the IP address
or port where the Polytope server frontend is exposed, as well as a
user name to be used for the next operations with the Polytope server.
See below for a complete list of configurable items.
(Not relevant to CLI users): The new configuration item is added to
the session configuration (in order for the new value to take effect
over potential previous session configuration values for the same
item) and, if 'persist = True' (always the case when used from the
CLI), it is as well stored in the Polytope client configuration file
($HOME/.polytope_client/config.yaml by default).
During execution, whenever the Polytope client does not find a value
in the configuration file for any configuration item, a system-wide
configuration file (/etc/polytope-adm-client/config.yaml) is first
checked to find a value and, if not found, a library-internal fixed
value is used.
Configuration items in the configuration file can be overridden with
environment variables (e.g. POLYTOPE_PORT, POLYTOPE_VERBOSE, ...)
specified before running the API or CLI.
When using the Polytope client API, upon creation of the client
(i.e. when instantiating the Client class), additional session
configuration values can be specified via the creator or via
environment variables. These values will take priority over any
library-defaults, file configurations or environment variables.
The configuration items 'quiet' and 'verbose' are special.
The Polytope CLI specifically sets them to True or False (depending
on whether the CLI user specified the --quiet or --verbose flags) when
instantiating the Client class, and hence these items show up as
session configurations when queried via 'polytope list config'.
However, if these items are set in the configuration file, and the
session --quiet or --verbose flags have not been specified, the file
values will take priority over the session configuration values. In
that case, the origin of the configuration item in 'polytope list
config' will show as '(file)'.
The available configurable items, their library-default values and
their meaning are listed below:
config_path: $HOME/.polytope-client
Path where the client configuration is stored.
address:
URL or IP address to the Polytope server frontend.
port: 32002
Port used by the Polytope server frontend.
username: host system user name
Name of the Polytope user to use for subsequent client operations.
This username is used to:
- log in the system and name the obtained credential (key)
- easily identify the key to be used in subsequent client operations
This configuration item does not have an effect if a session key has
been specified.
key_path: config_path/keys
Path where the user credentials are stored.
quiet: False
Whether to hide user information messages or not.
verbose: False
Whether to show detailed information of the progress or not.
log_file: None
File where to store the user information messages.
log_level: DEBUG
Level of detail of the log messages stored in the log_file. Accepts
any Python logging level (WARNING, INFO, DEBUG, ...).
user_key: None
Polytope user key
user_email: None
Polytope user email associated to the user_key.
password: None
HTTP Basic authentication password. Strongly recommended to specify this
configuration item only via the environment variable POLYTOPE_PASSWORD
or manually in the Polytope configuration file.
:param key: Name of the configuration item to set.
:type key: str
:param value: Value for the configuration item.
:type value: str
:param persist: Whether to apply the change in the Polytope client
configuration or not (default).
:type persist: bool
:returns: None
"""
if key not in self.file_config_items:
raise ValueError("Invalid configuration key provided (" + key + ")")
print_value = value
if key == "password" and value:
print_value = "**hidden**"
self.session_config[key] = value
self.update_loggers()
if not persist:
if not self._cli:
message = (
"Successfully updated the following configuration "
+ "item for the current Polytope client session:\n"
)
message += " - {}: {}\n".format(key, print_value)
self._logger.info(message)
return
config = self.file_config
config[key] = value
helpers.convert_back(config)
helpers.validate_config(config)
self.file_config = config
os.makedirs(str(self.config_path), exist_ok=True)
with open(str(self.config_path / "config.yaml"), "w", encoding="utf8") as outfile:
yaml.dump(config, outfile, default_flow_style=False, allow_unicode=True)
message = (
"Successfully updated the following configuration " + "item in the Polytope client configuration file:\n"
)
message += " - {}: {}\n".format(key, print_value)
self._logger.info(message)
if key == "user_key":
message = (
"A session key has been specified. The "
+ "specified 'username' ("
+ self.get()["username"]
+ ") will be ignored."
)
self._logger.info(message)
def unset(self, key, persist=False):
"""
Remove Polytope client configurations.
Removes a Polytope client configuration item.
(Not relevant to CLI users): The item is removed from the current
session configuration if present and, if 'persist = True' (always
the case when used from the CLI), it is removed as well from the
Polytope client configuration file.
See 'polytope set config' for more information.
:param key: Name of the configuration item to be removed. Can take
the value 'all' to remove all file configuration items.
:type key: str
:param persist: Whether to apply the change in the Polytope client
configuration or not (default).
:type persist: bool
:returns: None
"""
if key == "all":
self.session_config = {}
self._logger.info("Configuration wiped.")
if not persist:
return
config_dir = self.config_path
try:
os.remove(str(config_dir / "config.yaml"))
self._logger.debug("Deleted " + str(config_dir / "config.yaml"))
except OSError:
self._logger.debug("Configuration file not found.")
pass
self.file_config = {}
return
if key not in self.file_config_items:
raise ValueError("Invalid configuration key provided (" + key + ")")
if key in self.session_config:
del self.session_config[key]
message = (
"Successfully removed the following configuration " + "item for the Polytope client:\n - " + key + "\n"
)
self._logger.info(message)
self.update_loggers()
if not persist:
return
if key not in self.file_config:
self._logger.info("Configuration item '" + key + "' not currently present in the configuration file.")
return
del self.file_config[key]
os.makedirs(str(self.config_path), exist_ok=True)
with open(str(self.config_path / "config.yaml"), "w", encoding="utf8") as outfile:
yaml.dump(self.file_config, outfile, default_flow_style=False, allow_unicode=True)
| 38.119626
| 117
| 0.608365
|
9a2207d48f38f0bf97739ba91fe67dc626cf58cf
| 1,589
|
py
|
Python
|
BlinkSupport/KNNModel.py
|
isibord/EyeBlinkModels
|
d80659189234a66c10bec501f9e25062f243d2b3
|
[
"MIT"
] | null | null | null |
BlinkSupport/KNNModel.py
|
isibord/EyeBlinkModels
|
d80659189234a66c10bec501f9e25062f243d2b3
|
[
"MIT"
] | null | null | null |
BlinkSupport/KNNModel.py
|
isibord/EyeBlinkModels
|
d80659189234a66c10bec501f9e25062f243d2b3
|
[
"MIT"
] | null | null | null |
import numpy as np
class KNNModel(object):
"""A KNN Model"""
def __init__(self):
pass
def fit(self, x, y, k):
self.trainx = x
self.trainy = y
self.kval = k
def predictThres(self, x, batchThres=True, threshold=0.5):
# for every sample in training set
# compute the distance to xText (using l2 norm)
probEst = []
for eachx in x:
distDict = {}
eachx = np.asarray(eachx)
for i in range(len(self.trainx)):
trainxval = np.asarray(self.trainx[i])
normval = np.linalg.norm(trainxval-eachx)
distDict[i] = normval
sorted_x = sorted(distDict.items(), key=lambda kv: kv[1])
count1s = 0
for j in range(self.kval):
(idx, norm) = sorted_x[j]
yVal = self.trainy[idx]
if yVal == 1:
count1s +=1
probEst.append(count1s/self.kval)
predictionList = []
if batchThres:
thresholdval = -0.01
for j in range(101):
predictions = []
thresholdval += 0.01
for probVal in probEst:
predictions.append(1 if probVal > thresholdval else 0)
predictionList.append(predictions)
else:
predictions = []
for probVal in probEst:
predictions.append(1 if probVal > threshold else 0)
predictionList.append(predictions)
return predictionList
| 27.877193
| 74
| 0.507237
|
38028d5069637cb55233b6c7410c866cfe122132
| 1,611
|
py
|
Python
|
Loan-Approval-Analysis/code.py
|
Muskan-Agrawal07/greyatom-python-for-data-science
|
761afdeb4886a7db9fa74ca46df45ab1b0c5d630
|
[
"MIT"
] | null | null | null |
Loan-Approval-Analysis/code.py
|
Muskan-Agrawal07/greyatom-python-for-data-science
|
761afdeb4886a7db9fa74ca46df45ab1b0c5d630
|
[
"MIT"
] | null | null | null |
Loan-Approval-Analysis/code.py
|
Muskan-Agrawal07/greyatom-python-for-data-science
|
761afdeb4886a7db9fa74ca46df45ab1b0c5d630
|
[
"MIT"
] | null | null | null |
# --------------
# Importing header files
import numpy as np
import pandas as pd
from scipy.stats import mode
import warnings
warnings.filterwarnings('ignore')
#Reading file
bank_data = pd.read_csv(path)
#Checking all categorical values
categorical_var = bank_data.select_dtypes(include = 'object')
print(categorical_var)
#Checking all numerical values
numerical_var = bank_data.select_dtypes(include = 'number')
print(numerical_var)
#drop the column Loan_ID to create a new dataframe banks
banks = bank_data.drop('Loan_ID',axis=1)
#see the null values
print(banks.isnull().sum())
bank_mode = banks.mode().iloc[0]
banks.fillna(bank_mode, inplace = True)
banks.isnull()
print(banks.isnull().sum())
#Now let's check the loan amount of an average person based on 'Gender', 'Married', 'Self_Employed'.
avg_loan_amount = banks.pivot_table(index=['Gender', 'Married', 'Self_Employed'],values='LoanAmount',aggfunc='mean')
loan_approved_se = banks[(banks['Self_Employed'] == 'Yes') & (banks['Loan_Status'] == 'Y')]
loan_approved_nse = banks[(banks['Self_Employed'] == 'No') & (banks['Loan_Status'] == 'Y')]
percentage_se = round((len(loan_approved_se)/614)*100,2)
print(percentage_se)
percentage_nse = round((len(loan_approved_nse)/614)*100,2)
print(percentage_nse)
loan_term = banks['Loan_Amount_Term'].apply(lambda x:x/12)
big_loan_term = 0
for i in loan_term:
if i>=25:
big_loan_term+=1
print(big_loan_term)
loan_groupby = banks.groupby('Loan_Status')[['ApplicantIncome', 'Credit_History']]
mean_values = loan_groupby.mean()
print(mean_values)
| 30.980769
| 117
| 0.721912
|
5f6dde442962e2fdce021352fe0cd483e0c1179a
| 3,493
|
py
|
Python
|
src/trellotask/settings.py
|
true7/oleg_diatlenko_test_pj
|
4249c75010f92e94f3a6dd0b2b672fc14061c9e5
|
[
"MIT"
] | null | null | null |
src/trellotask/settings.py
|
true7/oleg_diatlenko_test_pj
|
4249c75010f92e94f3a6dd0b2b672fc14061c9e5
|
[
"MIT"
] | null | null | null |
src/trellotask/settings.py
|
true7/oleg_diatlenko_test_pj
|
4249c75010f92e94f3a6dd0b2b672fc14061c9e5
|
[
"MIT"
] | null | null | null |
"""
Django settings for trellotask project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0@i$5-aauf)th!i(t&z04oi-8$d4_ndywke0=mwr*f!%x&c-ho'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'notes',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'trellotask.urls'
FIXTURE_DIRS = (
os.path.join(BASE_DIR, 'initial_data.json'),
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'notes.context_processors.total_count',
],
},
},
]
WSGI_APPLICATION = 'trellotask.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
# '/var/www/static/',
]
STATIC_ROOT = os.path.join(BASE_DIR, "static_cdn")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "media_cdn")
| 25.49635
| 91
| 0.692814
|
4c33bd0bc0d77116e61e99049716af678fb44591
| 105,674
|
py
|
Python
|
reviewboard/diffviewer/tests/test_diffx_parser.py
|
pombredanne/reviewboard
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
[
"MIT"
] | null | null | null |
reviewboard/diffviewer/tests/test_diffx_parser.py
|
pombredanne/reviewboard
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
[
"MIT"
] | null | null | null |
reviewboard/diffviewer/tests/test_diffx_parser.py
|
pombredanne/reviewboard
|
15f1d7236ec7a5cb4778ebfeb8b45d13a46ac71d
|
[
"MIT"
] | null | null | null |
"""Unit tests for reviewboard.diffviewer.parser.DiffXParser."""
from djblets.testing.decorators import add_fixtures
from reviewboard.diffviewer.errors import DiffParserError
from reviewboard.diffviewer.parser import DiffXParser
from reviewboard.scmtools.core import HEAD, PRE_CREATION, UNKNOWN
from reviewboard.testing import TestCase
class DiffXParserTests(TestCase):
"""Unit tests for reviewboard.diffviewer.parser.DiffXParser."""
def test_parse_diff_with_basic_diff(self):
"""Testing DiffXParser.parse_diff with a basic DiffX file"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=156\n'
b'{\n'
b' "path": {\n'
b' "new": "message2.py",\n'
b' "old": "message.py"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=693, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'message.py',
'new': 'message2.py',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message2.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_complex_diff(self):
"""Testing DiffXParser.parse_diff with a complex DiffX file"""
parser = DiffXParser(
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.preamble: encoding=ascii, indent=2, length=36,'
b' line_endings=dos, mimetype=text/plain\n'
b' This is the file-level preamble.\r\n'
b'#.meta: encoding=utf-32, format=json, length=96\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"'
b'\x00\x00\x00k\x00\x00\x00e\x00\x00\x00y\x00\x00\x00"'
b'\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00v'
b'\x00\x00\x00a\x00\x00\x00l\x00\x00\x00u\x00\x00\x00e'
b'\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
b'#.change:\n'
b'#..preamble: encoding=utf-8, indent=4, length=56, '
b'line_endings=unix\n'
b' Summary of commit #2\n'
b' \n'
b' Here\'s a description.\n'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T19:46:22-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:46:25-07:00",\n'
b' "id": "91127b687f583184144161f432222748c1a30b23",\n'
b' "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
b'#..file:\n'
b'#...meta: encoding=utf-8, format=json, length=166\n'
b'{\n'
b' "path": "file3",\n'
b' "revision": {\n'
b' "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\n'
b' "old": "be089b7197974703c83682088a068bef3422c6c2"\n'
b' }\n'
b'}\n'
b'#...diff: length=87, line_endings=dos\n'
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 2)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
# Inspect change #1.
parsed_change = parsed_diff.changes[0]
self.assertEqual(parsed_change.commit_id,
b'a25e7b28af5e3184946068f432122c68c1a30b23')
self.assertIsNone(parsed_change.parent_commit_id,
b'b892d5f833474c59d7851ff46a4b0bd919017e97')
self.assertEqual(parsed_change.extra_data, {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id': 'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
})
self.assertEqual(len(parsed_change.files), 1)
# Inspect change #1, file #1
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n')
self.assertEqual(parsed_file.orig_filename, b'file1')
self.assertEqual(parsed_file.orig_file_details,
b'c8839177d1a5605aa60abe69db95c84183f0eebe')
self.assertEqual(parsed_file.modified_filename, b'file1')
self.assertEqual(parsed_file.modified_file_details,
b'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
# Inspect change #2.
parsed_change = parsed_diff.changes[1]
self.assertEqual(parsed_change.commit_id,
b'91127b687f583184144161f432222748c1a30b23')
self.assertIsNone(parsed_change.parent_commit_id,
b'a25e7b28af5e3184946068f432122c68c1a30b23')
self.assertEqual(parsed_change.extra_data, {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T19:46:22-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:46:25-07:00',
'id': '91127b687f583184144161f432222748c1a30b23',
'parent id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': (
"Summary of commit #2\n"
"\n"
"Here's a description.\n"
),
'preamble_options': {
'encoding': 'utf-8',
'indent': 4,
'line_endings': 'unix',
},
},
})
self.assertEqual(len(parsed_change.files), 2)
# Inspect change #2, file #1
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
})
self.assertEqual(
parsed_file.data,
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00')
self.assertEqual(parsed_file.orig_filename, b'file2')
self.assertEqual(parsed_file.orig_file_details,
b'281bac2b704617e807850e07e54bae3469f6a2e7')
self.assertEqual(parsed_file.modified_filename, b'file2')
self.assertEqual(parsed_file.modified_file_details,
b'389cc6b7ae5a659383eab5dfc253764eccf84732')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
# Inspect change #2, file #2
parsed_file = parsed_change.files[1]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'dos',
},
'metadata': {
'path': 'file3',
'revision': {
'old': 'be089b7197974703c83682088a068bef3422c6c2',
'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',
},
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n')
self.assertEqual(parsed_file.orig_filename, b'file3')
self.assertEqual(parsed_file.orig_file_details,
b'be089b7197974703c83682088a068bef3422c6c2')
self.assertEqual(parsed_file.modified_filename, b'file3')
self.assertEqual(parsed_file.modified_file_details,
b'0d4a0fb8d62b762a26e13591d06d93d79d61102f')
self.assertEqual(parsed_file.insert_count, 2)
self.assertEqual(parsed_file.delete_count, 1)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_path_string(self):
"""Testing DiffXParser.parse_diff with file's meta.path as single
string
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=103\n'
b'{\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'message.py',
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_revision_old_only(self):
"""Testing DiffXParser.parse_diff with file's revision.old and no
revision.new
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=78\n'
b'{\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'message.py',
'revision': {
'old': 'abc123',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, HEAD)
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_revision_new_only(self):
"""Testing DiffXParser.parse_diff with file's revision.new and no
revision.old
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=78\n'
b'{\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'message.py',
'revision': {
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, UNKNOWN)
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_revision_new_only_op_create(self):
"""Testing DiffXParser.parse_diff with file's revision.new and no
revision.old and op=create
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=98\n'
b'{\n'
b' "op": "create",\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=692, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'create',
'path': 'message.py',
'revision': {
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, PRE_CREATION)
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 4)
self.assertEqual(parsed_file.delete_count, 4)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_binary_file(self):
"""Testing DiffXParser.parse_diff with binary file"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=104\n'
b'{\n'
b' "path": "message.bin",\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=23, type=binary, line_endings=unix\n'
b'This is a binary file.\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
'type': 'binary',
},
'metadata': {
'path': 'message.bin',
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'This is a binary file.\n')
self.assertEqual(parsed_file.orig_filename, b'message.bin')
self.assertEqual(parsed_file.orig_file_details, b'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.bin')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertTrue(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_file_op_delete(self):
"""Testing DiffXParser.parse_diff with file op=delete"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=123\n'
b'{\n'
b' "op": "delete",\n'
b' "path": "message.py",\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=29, line_endings=unix\n'
b'@@ -1 +0,0 @@\n'
b'-Goodbye, file\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'delete',
'path': 'message.py',
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'@@ -1 +0,0 @@\n'
b'-Goodbye, file\n')
self.assertEqual(parsed_file.orig_filename, b'message.py')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'message.py')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.deleted)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.moved)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_op_move(self):
"""Testing DiffXParser.parse_diff with file op=move"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=169\n'
b'{\n'
b' "op": "move",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'metadata': {
'op': 'move',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(parsed_file.data, b'')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertTrue(parsed_file.moved)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_op_move_modify(self):
"""Testing DiffXParser.parse_diff with file op=move-modify"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=176\n'
b'{\n'
b' "op": "move-modify",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'move-modify',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 1)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.moved)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
def test_parse_diff_with_op_copy(self):
"""Testing DiffXParser.parse_diff with file op=copy"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=169\n'
b'{\n'
b' "op": "copy",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'metadata': {
'op': 'copy',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(parsed_file.data, b'')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 0)
self.assertEqual(parsed_file.delete_count, 0)
self.assertTrue(parsed_file.copied)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_op_copy_modify(self):
"""Testing DiffXParser.parse_diff with file op=copy-modify"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=176\n'
b'{\n'
b' "op": "copy-modify",\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' }\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'op': 'copy-modify',
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 1)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.copied)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_existing_stats(self):
"""Testing DiffXParser.parse_diff with existing file stats"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=225\n'
b'{\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' },\n'
b' "stats": {\n'
b' "deletions": 100,\n'
b' "insertions": 200\n'
b' }\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
'stats': {
'deletions': 100,
'insertions': 200,
},
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 200)
self.assertEqual(parsed_file.delete_count, 100)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.is_symlink)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_type_symlink(self):
"""Testing DiffXParser.parse_diff with file type=symlink"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=212\n'
b'{\n'
b' "path": {\n'
b' "old": "old-name",\n'
b' "new": "new-name"\n'
b' },\n'
b' "revision": {\n'
b' "old": "abc123",\n'
b' "new": "def456"\n'
b' },\n'
b' "type": "symlink",\n'
b' "symlink target": "target/path/"\n'
b'}\n'
b'#...diff: length=58, line_endings=unix\n'
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n'
)
parsed_diff = parser.parse_diff()
self.assertEqual(len(parsed_diff.changes), 1)
self.assertEqual(parsed_diff.extra_data, {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
})
self.assertIs(parsed_diff.parser, parser)
self.assertFalse(parsed_diff.uses_commit_ids_as_revisions)
parsed_change = parsed_diff.changes[0]
self.assertIsNone(parsed_change.commit_id)
self.assertIsNone(parsed_change.parent_commit_id)
self.assertEqual(parsed_change.extra_data, {})
self.assertEqual(len(parsed_change.files), 1)
parsed_file = parsed_change.files[0]
self.assertEqual(parsed_file.extra_data, {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'old-name',
'new': 'new-name',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
'type': 'symlink',
'symlink target': 'target/path/',
},
'metadata_options': {
'format': 'json',
},
},
})
self.assertEqual(
parsed_file.data,
b'--- old-name\n'
b'+++ new-name\n'
b'@@ -1 +1 @@\n'
b'-old line\n'
b'+new line\n')
self.assertEqual(parsed_file.orig_filename, b'old-name')
self.assertEqual(parsed_file.orig_file_details, 'abc123')
self.assertEqual(parsed_file.modified_filename, b'new-name')
self.assertEqual(parsed_file.modified_file_details, b'def456')
self.assertEqual(parsed_file.insert_count, 1)
self.assertEqual(parsed_file.delete_count, 1)
self.assertTrue(parsed_file.is_symlink)
self.assertFalse(parsed_file.binary)
self.assertFalse(parsed_file.copied)
self.assertFalse(parsed_file.deleted)
self.assertFalse(parsed_file.moved)
def test_parse_diff_with_invalid_diffx(self):
"""Testing DiffXParser.parse_diff with invalid DiffX file contents"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'BLARGH\n'
)
message = (
"Error on line 2: Unexpected or improperly formatted header: %r"
% b'BLARGH'
)
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_path_invalid_type(self):
"""Testing DiffXParser.parse_diff with invalid file path type"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=94\n'
b'{\n'
b' "path": 123,\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
)
message = (
'Unexpected type %s for "path" key in change 1, file 1'
% int
)
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_path_dict_missing_old(self):
"""Testing DiffXParser.parse_diff with file path as dictionary with
missing "old" key
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=120\n'
b'{\n'
b' "path": {\n'
b' "new": "file"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
)
message = 'Missing the "path.old" key in change 1, file 1'
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_path_dict_missing_new(self):
"""Testing DiffXParser.parse_diff with file path as dictionary with
missing "new" key
"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=120\n'
b'{\n'
b' "path": {\n'
b' "old": "file"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
)
message = 'Missing the "path.new" key in change 1, file 1'
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
def test_parse_diff_with_revision_invalid_type(self):
"""Testing DiffXParser.parse_diff with invalid file revision type"""
parser = DiffXParser(
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=44\n'
b'{\n'
b' "path": "file",\n'
b' "revision": 123\n'
b'}\n'
)
message = (
'Unexpected type %s for "revision" key in change 1, file 1'
% int
)
with self.assertRaisesMessage(DiffParserError, message):
parser.parse_diff()
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffset_simple(self):
"""Testing DiffXParser.raw_diff with DiffSet and simple diff"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'options': {
'encoding': 'utf-8',
'version': '1.0',
},
},
}
diffset.save(update_fields=('extra_data',))
diffcommit = self.create_diffcommit(diffset=diffset,
with_diff=False)
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
save=False,
diff=(
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict '
b'does not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': {
'old': 'message.py',
'new': 'message2.py',
},
'revision': {
'old': 'abc123',
'new': 'def456',
},
},
'metadata_options': {
'format': 'json',
},
},
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffset),
b'#diffx: encoding=utf-8, version=1.0\n'
b'#.change:\n'
b'#..file:\n'
b'#...meta: format=json, length=156\n'
b'{\n'
b' "path": {\n'
b' "new": "message2.py",\n'
b' "old": "message.py"\n'
b' },\n'
b' "revision": {\n'
b' "new": "def456",\n'
b' "old": "abc123"\n'
b' }\n'
b'}\n'
b'#...diff: length=693, line_endings=unix\n'
b'--- message.py\t2021-07-02 13:20:12.285875444 -0700\n'
b'+++ message2.py\t2021-07-02 13:21:31.428383873 -0700\n'
b'@@ -164,10 +164,10 @@\n'
b' not isinstance(headers, MultiValueDict)):\n'
b' # Instantiating a MultiValueDict from a dict does '
b'not ensure that\n'
b' # values are lists, so we have to ensure that '
b'ourselves.\n'
b'- headers = MultiValueDict(dict(\n'
b'- (key, [value])\n'
b'- for key, value in six.iteritems(headers)\n'
b'- ))\n'
b'+ headers = MultiValueDict({\n'
b'+ key: [value]\n'
b'+ for key, value in headers.items()\n'
b'+ })\n'
b' \n'
b' if in_reply_to:\n'
b' headers["In-Reply-To"] = in_reply_to\n')
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffset_complex(self):
"""Testing DiffXParser.raw_diff with DiffSet and complex diff"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
}
diffset.save(update_fields=('extra_data',))
# Create DiffCommit #1.
diffcommit = self.create_diffcommit(
diffset=diffset,
commit_id='a25e7b28af5e3184946068f432122c68c1a30b23',
with_diff=False)
diffcommit.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id': 'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
}
diffcommit.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
source_file='file1',
source_revision='c8839177d1a5605aa60abe69db95c84183f0eebe',
dest_file='file1',
dest_detail='eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
save=False,
diff=(
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
}
filediff.save()
# Create DiffCommit #2.
diffcommit = self.create_diffcommit(
diffset=diffset,
commit_id='91127b687f583184144161f432222748c1a30b23',
with_diff=False)
diffcommit.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T19:46:22-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:46:25-07:00',
'id': '91127b687f583184144161f432222748c1a30b23',
'parent id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': (
"Summary of commit #2\n"
"\n"
"Here's a description.\n"
),
'preamble_options': {
'encoding': 'utf-8',
'indent': 4,
'line_endings': 'unix',
},
},
}
diffcommit.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
source_file='file2',
source_revision='281bac2b704617e807850e07e54bae3469f6a2e7',
dest_file='file2',
dest_detail='389cc6b7ae5a659383eab5dfc253764eccf84732',
save=False,
diff=(
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
}
filediff.save()
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit,
source_file='file3',
source_revision='be089b7197974703c83682088a068bef3422c6c2',
dest_file='file3',
dest_detail='0d4a0fb8d62b762a26e13591d06d93d79d61102f',
save=False,
diff=(
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'dos',
},
'metadata': {
'path': 'file3',
'revision': {
'old': 'be089b7197974703c83682088a068bef3422c6c2',
'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',
},
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
},
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffset),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.preamble: encoding=ascii, indent=2, length=36,'
b' line_endings=dos, mimetype=text/plain\n'
b' This is the file-level preamble.\r\n'
b'#.meta: encoding=utf-32, format=json, length=96\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"'
b'\x00\x00\x00k\x00\x00\x00e\x00\x00\x00y\x00\x00\x00"'
b'\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00v'
b'\x00\x00\x00a\x00\x00\x00l\x00\x00\x00u\x00\x00\x00e'
b'\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
b'#.change:\n'
b'#..preamble: encoding=utf-8, indent=4, length=56, '
b'line_endings=unix\n'
b' Summary of commit #2\n'
b' \n'
b' Here\'s a description.\n'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T19:46:22-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:46:25-07:00",\n'
b' "id": "91127b687f583184144161f432222748c1a30b23",\n'
b' "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
b'#..file:\n'
b'#...meta: encoding=utf-8, format=json, length=166\n'
b'{\n'
b' "path": "file3",\n'
b' "revision": {\n'
b' "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\n'
b' "old": "be089b7197974703c83682088a068bef3422c6c2"\n'
b' }\n'
b'}\n'
b'#...diff: length=87, line_endings=dos\n'
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n')
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffset_no_diffcommits(self):
"""Testing DiffXParser.raw_diff with DiffSet and no DiffCommits"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
'change_extra_data': {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id':
'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
},
}
diffset.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
source_file='file1',
source_revision='c8839177d1a5605aa60abe69db95c84183f0eebe',
dest_file='file1',
dest_detail='eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
save=False,
diff=(
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
}
filediff.save()
filediff = self.create_filediff(
diffset=diffset,
source_file='file2',
source_revision='281bac2b704617e807850e07e54bae3469f6a2e7',
dest_file='file2',
dest_detail='389cc6b7ae5a659383eab5dfc253764eccf84732',
save=False,
diff=(
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffset),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.preamble: encoding=ascii, indent=2, length=36,'
b' line_endings=dos, mimetype=text/plain\n'
b' This is the file-level preamble.\r\n'
b'#.meta: encoding=utf-32, format=json, length=96\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"'
b'\x00\x00\x00k\x00\x00\x00e\x00\x00\x00y\x00\x00\x00"'
b'\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00v'
b'\x00\x00\x00a\x00\x00\x00l\x00\x00\x00u\x00\x00\x00e'
b'\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00')
@add_fixtures(['test_scmtools'])
def test_raw_diff_with_diffcommit(self):
"""Testing DiffXParser.raw_diff with DiffCommit"""
repository = self.create_repository(tool_name='Test')
diffset = self.create_diffset(repository=repository)
diffset.extra_data = {
'diffx': {
'metadata': {
'key': 'value',
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
'options': {
'encoding': 'utf-16',
'version': '1.0',
},
'preamble': 'This is the file-level preamble.\r\n',
'preamble_options': {
'encoding': 'ascii',
'indent': 2,
'line_endings': 'dos',
'mimetype': 'text/plain',
},
},
}
diffset.save(update_fields=('extra_data',))
# Create DiffCommit #1.
diffcommit1 = self.create_diffcommit(
diffset=diffset,
commit_id='a25e7b28af5e3184946068f432122c68c1a30b23',
with_diff=False)
diffcommit1.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T13:12:06-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:26:31-07:00',
'id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
'parent id': 'b892d5f833474c59d7851ff46a4b0bd919017e97',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': 'test\n',
'preamble_options': {
'indent': 2,
'line_endings': 'unix',
'mimetype': 'text/markdown',
},
},
}
diffcommit1.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit1,
source_file='file1',
source_revision='c8839177d1a5605aa60abe69db95c84183f0eebe',
dest_file='file1',
dest_detail='eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
save=False,
diff=(
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'unix',
},
'metadata': {
'path': 'file1',
'revision': {
'old': 'c8839177d1a5605aa60abe69db95c84183f0eebe',
'new': 'eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef',
},
},
'metadata_options': {
'encoding': 'latin1',
'format': 'json',
},
},
}
filediff.save()
# Create DiffCommit #2. This one won't be used.
diffcommit2 = self.create_diffcommit(
diffset=diffset,
commit_id='91127b687f583184144161f432222748c1a30b23',
with_diff=False)
diffcommit2.extra_data = {
'diffx': {
'metadata': {
'author': 'Test User <test@example.com>',
'author date': '2021-06-01T19:46:22-07:00',
'committer': 'Test User <test@example.com>',
'date': '2021-06-02T19:46:25-07:00',
'id': '91127b687f583184144161f432222748c1a30b23',
'parent id': 'a25e7b28af5e3184946068f432122c68c1a30b23',
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
'preamble': (
"Summary of commit #2\n"
"\n"
"Here's a description.\n"
),
'preamble_options': {
'encoding': 'utf-8',
'indent': 4,
'line_endings': 'unix',
},
},
}
diffcommit2.save(update_fields=('extra_data',))
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit2,
source_file='file2',
source_revision='281bac2b704617e807850e07e54bae3469f6a2e7',
dest_file='file2',
dest_detail='389cc6b7ae5a659383eab5dfc253764eccf84732',
save=False,
diff=(
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'encoding': 'utf-16',
'line_endings': 'unix',
},
'metadata': {
'path': 'file2',
'revision': {
'old': '281bac2b704617e807850e07e54bae3469f6a2e7',
'new': '389cc6b7ae5a659383eab5dfc253764eccf84732',
},
},
'metadata_options': {
'encoding': 'utf-32',
'format': 'json',
},
},
'encoding': 'utf-16',
}
filediff.save()
filediff = self.create_filediff(
diffset=diffset,
commit=diffcommit2,
source_file='file3',
source_revision='be089b7197974703c83682088a068bef3422c6c2',
dest_file='file3',
dest_detail='0d4a0fb8d62b762a26e13591d06d93d79d61102f',
save=False,
diff=(
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n'
))
filediff.extra_data = {
'diffx': {
'diff_options': {
'line_endings': 'dos',
},
'metadata': {
'path': 'file3',
'revision': {
'old': 'be089b7197974703c83682088a068bef3422c6c2',
'new': '0d4a0fb8d62b762a26e13591d06d93d79d61102f',
},
},
'metadata_options': {
'encoding': 'utf-8',
'format': 'json',
},
},
}
filediff.save()
parser = DiffXParser(b'')
self.assertEqual(
parser.raw_diff(diffcommit1),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.change:\n'
b'#..preamble: indent=2, length=14, line_endings=unix, '
b'mimetype=text/markdown\n'
b' \xff\xfet\x00e\x00s\x00t\x00\n\x00'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T13:12:06-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:26:31-07:00",\n'
b' "id": "a25e7b28af5e3184946068f432122c68c1a30b23",\n'
b' "parent id": "b892d5f833474c59d7851ff46a4b0bd919017e97"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=latin1, format=json, length=166\n'
b'{\n'
b' "path": "file1",\n'
b' "revision": {\n'
b' "new": "eed8df7f1400a95cdf5a87ddb947e7d9c5a19cef",\n'
b' "old": "c8839177d1a5605aa60abe69db95c84183f0eebe"\n'
b' }\n'
b'}\n'
b'#...diff: length=60, line_endings=unix\n'
b'--- /file1\n'
b'+++ /file1\n'
b'@@ -498,7 +498,7 @@\n'
b' ... diff content\n')
self.assertEqual(
parser.raw_diff(diffcommit2),
b'#diffx: encoding=utf-16, version=1.0\n'
b'#.change:\n'
b'#..preamble: encoding=utf-8, indent=4, length=56, '
b'line_endings=unix\n'
b' Summary of commit #2\n'
b' \n'
b' Here\'s a description.\n'
b'#..meta: encoding=utf-8, format=json, length=302\n'
b'{\n'
b' "author": "Test User <test@example.com>",\n'
b' "author date": "2021-06-01T19:46:22-07:00",\n'
b' "committer": "Test User <test@example.com>",\n'
b' "date": "2021-06-02T19:46:25-07:00",\n'
b' "id": "91127b687f583184144161f432222748c1a30b23",\n'
b' "parent id": "a25e7b28af5e3184946068f432122c68c1a30b23"\n'
b'}\n'
b'#..file:\n'
b'#...meta: encoding=utf-32, format=json, length=668\n'
b'\xff\xfe\x00\x00{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'p\x00\x00\x00a\x00\x00\x00t\x00\x00\x00h\x00\x00\x00'
b'"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00"\x00\x00\x00'
b'f\x00\x00\x00i\x00\x00\x00l\x00\x00\x00e\x00\x00\x00'
b'2\x00\x00\x00"\x00\x00\x00,\x00\x00\x00\n\x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x00r\x00\x00\x00e\x00\x00\x00v\x00\x00\x00'
b'i\x00\x00\x00s\x00\x00\x00i\x00\x00\x00o\x00\x00\x00'
b'n\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'{\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00"\x00\x00\x00n\x00\x00\x00'
b'e\x00\x00\x00w\x00\x00\x00"\x00\x00\x00:\x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x003\x00\x00\x008\x00\x00\x00'
b'9\x00\x00\x00c\x00\x00\x00c\x00\x00\x006\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x00a\x00\x00\x00e\x00\x00\x00'
b'5\x00\x00\x00a\x00\x00\x006\x00\x00\x005\x00\x00\x00'
b'9\x00\x00\x003\x00\x00\x008\x00\x00\x003\x00\x00\x00'
b'e\x00\x00\x00a\x00\x00\x00b\x00\x00\x005\x00\x00\x00'
b'd\x00\x00\x00f\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'5\x00\x00\x003\x00\x00\x007\x00\x00\x006\x00\x00\x00'
b'4\x00\x00\x00e\x00\x00\x00c\x00\x00\x00c\x00\x00\x00'
b'f\x00\x00\x008\x00\x00\x004\x00\x00\x007\x00\x00\x00'
b'3\x00\x00\x002\x00\x00\x00"\x00\x00\x00,\x00\x00\x00'
b'\n\x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00"\x00\x00\x00o\x00\x00\x00l\x00\x00\x00'
b'd\x00\x00\x00"\x00\x00\x00:\x00\x00\x00 \x00\x00\x00'
b'"\x00\x00\x002\x00\x00\x008\x00\x00\x001\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00c\x00\x00\x002\x00\x00\x00'
b'b\x00\x00\x007\x00\x00\x000\x00\x00\x004\x00\x00\x00'
b'6\x00\x00\x001\x00\x00\x007\x00\x00\x00e\x00\x00\x00'
b'8\x00\x00\x000\x00\x00\x007\x00\x00\x008\x00\x00\x00'
b'5\x00\x00\x000\x00\x00\x00e\x00\x00\x000\x00\x00\x00'
b'7\x00\x00\x00e\x00\x00\x005\x00\x00\x004\x00\x00\x00'
b'b\x00\x00\x00a\x00\x00\x00e\x00\x00\x003\x00\x00\x00'
b'4\x00\x00\x006\x00\x00\x009\x00\x00\x00f\x00\x00\x00'
b'6\x00\x00\x00a\x00\x00\x002\x00\x00\x00e\x00\x00\x00'
b'7\x00\x00\x00"\x00\x00\x00\n\x00\x00\x00 \x00\x00\x00'
b' \x00\x00\x00 \x00\x00\x00 \x00\x00\x00}\x00\x00\x00'
b'\n\x00\x00\x00}\x00\x00\x00\n\x00\x00\x00'
b'#...diff: encoding=utf-16, length=22, line_endings=unix\n'
b'\xff\xfe \x00.\x00.\x00.\x00 \x00d\x00i\x00f\x00f\x00\n\x00'
b'#..file:\n'
b'#...meta: encoding=utf-8, format=json, length=166\n'
b'{\n'
b' "path": "file3",\n'
b' "revision": {\n'
b' "new": "0d4a0fb8d62b762a26e13591d06d93d79d61102f",\n'
b' "old": "be089b7197974703c83682088a068bef3422c6c2"\n'
b' }\n'
b'}\n'
b'#...diff: length=87, line_endings=dos\n'
b'--- a/file3\r\n'
b'+++ b/file3\r\n'
b'@@ -258,1 +258,2 @@\r\n'
b'- old line\r\n'
b'+ new line 1\r\n'
b'+ new line 2\r\n')
| 40.958915
| 78
| 0.475349
|
fe87bf30f22f34861a589c4cc5a74427af647ce4
| 9,045
|
py
|
Python
|
models/bilinear.py
|
roeiherz/AG2Video
|
a4eb439d7147c91237ddd50ec305add8e1537360
|
[
"MIT"
] | 22
|
2020-07-01T07:11:15.000Z
|
2022-02-17T13:26:16.000Z
|
models/bilinear.py
|
roeiherz/AG2Video
|
a4eb439d7147c91237ddd50ec305add8e1537360
|
[
"MIT"
] | 5
|
2021-06-16T02:35:14.000Z
|
2022-03-12T01:00:27.000Z
|
models/bilinear.py
|
roeiherz/AG2Video
|
a4eb439d7147c91237ddd50ec305add8e1537360
|
[
"MIT"
] | 2
|
2021-08-04T05:22:58.000Z
|
2021-12-11T02:15:57.000Z
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn.functional as F
from models.metrics import xywh_to_points
from models.utils import remove_dummy_objects
"""
Functions for performing differentiable bilinear cropping of images, for use in
the object discriminator
"""
def crop_bbox_batch(imgs, objs, bbox, HH, WW=None, vocab=None, backend='cudnn'):
"""
Inputs:
- imgs: FloatTensor of shape (N, C, H, W)
- bbox: FloatTensor of shape (B, 4) giving bounding box coordinates
- bbox_to_feats: LongTensor of shape (B,) mapping boxes to feature maps;
each element is in the range [0, N) and bbox_to_feats[b] = i means that
bbox[b] will be cropped from feats[i].
- HH, WW: Size of the output crops
Returns:
- crops: FloatTensor of shape (B, C, HH, WW) where crops[i] uses bbox[i] to
crop from feats[bbox_to_feats[i]].
"""
if backend == 'cudnn':
return crop_bbox_batch_cudnn(imgs, objs, bbox, HH, WW, vocab=vocab)
N, C, H, W = imgs.size()
B = bbox.size(0)
if WW is None: WW = HH
dtype, device = imgs.dtype, imgs.device
crops = torch.zeros(B, C, HH, WW, dtype=dtype, device=device)
for i in range(N):
mask = remove_dummy_objects(objs[i], vocab)
cur_bbox = bbox[i][mask]
n = cur_bbox.size(0)
cur_feats = imgs[i].view(1, C, H, W).expand(n, C, H, W).contiguous()
cur_crops = crop_bbox(cur_feats, cur_bbox, HH, WW)
crops[i] = cur_crops
return crops
def _invperm(p):
N = p.size(0)
eye = torch.arange(0, N).type_as(p)
pp = (eye[:, None] == p).nonzero()[:, 1]
return pp
def crop_bbox_batch_cudnn(imgs, objs, bbox, HH, WW=None, vocab=None):
B, N, C, H, W = imgs.size()
if WW is None:
WW = HH
crops_b = []
objs_b = []
for b in range(B):
feats_flat, bbox_flat, obj_flat = [], [], []
mask = remove_dummy_objects(objs[b], vocab)
for i in range(N):
cur_bbox = bbox[b][i][mask]
# Remove illegal boxes [0, 0, 0, 0]
legal_boxes = (cur_bbox != 0).any(dim=-1)
cur_bbox = cur_bbox[legal_boxes]
curr_objs = objs[b][mask][legal_boxes].view(-1)
n = cur_bbox.size(0)
cur_feats = imgs[b][i].view(1, C, H, W).expand(n, C, H, W).contiguous()
feats_flat.append(cur_feats)
bbox_flat.append(cur_bbox)
obj_flat.append(curr_objs)
feats_flat_b = torch.cat(feats_flat, dim=0)
bbox_flat_b = torch.cat(bbox_flat, dim=0)
objs_flat_b = torch.cat(obj_flat, dim=0)
crops = crop_bbox(feats_flat_b, bbox_flat_b, HH, WW, backend='cudnn')
crops_b.append(crops)
objs_b.append(objs_flat_b)
return crops_b, objs_b
def crop_bbox(feats, bbox, HH, WW=None, backend='cudnn'):
"""
Take differentiable crops of feats specified by bbox.
Inputs:
- feats: Tensor of shape (N, C, H, W)
- bbox: Bounding box coordinates of shape (N, 4) in the format
[x0, y0, x1, y1] in the [0, 1] coordinate space.
- HH, WW: Size of the output crops.
Returns:
- crops: Tensor of shape (N, C, HH, WW) where crops[i] is the portion of
feats[i] specified by bbox[i], reshaped to (HH, WW) using bilinear sampling.
"""
bbox = xywh_to_points(bbox)
N = feats.size(0)
assert bbox.size(0) == N
assert bbox.size(1) == 4
if WW is None: WW = HH
if backend == 'cudnn':
# Change box from [0, 1] to [-1, 1] coordinate system
bbox = 2 * bbox - 1
x0, y0, x1, y1 = bbox[:, 0], bbox[:, 1], bbox[:, 2], bbox[:, 3]
X = tensor_linspace(x0, x1, steps=WW).view(N, 1, WW).expand(N, HH, WW)
Y = tensor_linspace(y0, y1, steps=HH).view(N, HH, 1).expand(N, HH, WW)
if backend == 'jj':
return bilinear_sample(feats, X, Y)
elif backend == 'cudnn':
grid = torch.stack([X, Y], dim=3)
return F.grid_sample(feats, grid, align_corners=True)
def bilinear_sample(feats, X, Y):
"""
Perform bilinear sampling on the features in feats using the sampling grid
given by X and Y.
Inputs:
- feats: Tensor holding input feature map, of shape (N, C, H, W)
- X, Y: Tensors holding x and y coordinates of the sampling
grids; both have shape shape (N, HH, WW) and have elements in the range [0, 1].
Returns:
- out: Tensor of shape (B, C, HH, WW) where out[i] is computed
by sampling from feats[idx[i]] using the sampling grid (X[i], Y[i]).
"""
N, C, H, W = feats.size()
assert X.size() == Y.size()
assert X.size(0) == N
_, HH, WW = X.size()
X = X.mul(W)
Y = Y.mul(H)
# Get the x and y coordinates for the four samples
x0 = X.floor().clamp(min=0, max=W - 1)
x1 = (x0 + 1).clamp(min=0, max=W - 1)
y0 = Y.floor().clamp(min=0, max=H - 1)
y1 = (y0 + 1).clamp(min=0, max=H - 1)
# In numpy we could do something like feats[i, :, y0, x0] to pull out
# the elements of feats at coordinates y0 and x0, but PyTorch doesn't
# yet support this style of indexing. Instead we have to use the gather
# method, which only allows us to index along one dimension at a time;
# therefore we will collapse the features (BB, C, H, W) into (BB, C, H * W)
# and index along the last dimension. Below we generate linear indices into
# the collapsed last dimension for each of the four combinations we need.
y0x0_idx = (W * y0 + x0).view(N, 1, HH * WW).expand(N, C, HH * WW)
y1x0_idx = (W * y1 + x0).view(N, 1, HH * WW).expand(N, C, HH * WW)
y0x1_idx = (W * y0 + x1).view(N, 1, HH * WW).expand(N, C, HH * WW)
y1x1_idx = (W * y1 + x1).view(N, 1, HH * WW).expand(N, C, HH * WW)
# Actually use gather to pull out the values from feats corresponding
# to our four samples, then reshape them to (BB, C, HH, WW)
feats_flat = feats.view(N, C, H * W)
v1 = feats_flat.gather(2, y0x0_idx.long()).view(N, C, HH, WW)
v2 = feats_flat.gather(2, y1x0_idx.long()).view(N, C, HH, WW)
v3 = feats_flat.gather(2, y0x1_idx.long()).view(N, C, HH, WW)
v4 = feats_flat.gather(2, y1x1_idx.long()).view(N, C, HH, WW)
# Compute the weights for the four samples
w1 = ((x1 - X) * (y1 - Y)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w2 = ((x1 - X) * (Y - y0)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w3 = ((X - x0) * (y1 - Y)).view(N, 1, HH, WW).expand(N, C, HH, WW)
w4 = ((X - x0) * (Y - y0)).view(N, 1, HH, WW).expand(N, C, HH, WW)
# Multiply the samples by the weights to give our interpolated results.
out = w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4
return out
def tensor_linspace(start, end, steps=10):
"""
Vectorized version of torch.linspace.
Inputs:
- start: Tensor of any shape
- end: Tensor of the same shape as start
- steps: Integer
Returns:
- out: Tensor of shape start.size() + (steps,), such that
out.select(-1, 0) == start, out.select(-1, -1) == end,
and the other elements of out linearly interpolate between
start and end.
"""
assert start.size() == end.size()
view_size = start.size() + (1,)
w_size = (1,) * start.dim() + (steps,)
out_size = start.size() + (steps,)
start_w = torch.linspace(1, 0, steps=steps).to(start)
start_w = start_w.view(w_size).expand(out_size)
end_w = torch.linspace(0, 1, steps=steps).to(start)
end_w = end_w.view(w_size).expand(out_size)
start = start.contiguous().view(view_size).expand(out_size)
end = end.contiguous().view(view_size).expand(out_size)
out = start_w * start + end_w * end
return out
if __name__ == '__main__':
import numpy as np
from scipy.misc import imread, imsave, imresize
cat = imresize(imread('cat.jpg'), (256, 256), anti_aliasing=True)
dog = imresize(imread('dog.jpg'), (256, 256), anti_aliasing=True)
feats = torch.stack([
torch.from_numpy(cat.transpose(2, 0, 1).astype(np.float32)),
torch.from_numpy(dog.transpose(2, 0, 1).astype(np.float32))],
dim=0)
boxes = torch.FloatTensor([
[0, 0, 1, 1],
[0.25, 0.25, 0.75, 0.75],
[0, 0, 0.5, 0.5],
])
box_to_feats = torch.LongTensor([1, 0, 1]).cuda()
feats, boxes = feats.cuda(), boxes.cuda()
crops = crop_bbox_batch_cudnn(feats, boxes, box_to_feats, 128)
for i in range(crops.size(0)):
crop_np = crops.data[i].cpu().numpy().transpose(1, 2, 0).astype(np.uint8)
imsave('out%d.png' % i, crop_np)
| 36.471774
| 85
| 0.612272
|
3fce59e242e38ad28ee6df35a892f4fd78bf3f49
| 1,392
|
py
|
Python
|
example/blueprints/testdash.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | null | null | null |
example/blueprints/testdash.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | 24
|
2020-04-07T17:04:47.000Z
|
2020-05-27T00:51:25.000Z
|
example/blueprints/testdash.py
|
MaayanLab/jupyter-template
|
dd05bfcb95c9eafb1a9df845b5d8fecae1d6b9d5
|
[
"Apache-2.0"
] | null | null | null |
def testdash(app, url_prefix='/testdash', DATA_DIR=''):
import pandas as pd
import dash
from dash.dependencies import Input, Output
from dash import dcc
from dash import html
df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/dash-stock-ticker-demo.csv')
testdash = dash.Dash(
'testdash',
server=app,
routes_pathname_prefix=url_prefix + '/',
)
testdash.layout = html.Div([
html.H1('Stock Tickers'),
dcc.Dropdown(
id='my-dropdown',
options=[
{'label': 'Tesla', 'value': 'TSLA'},
{'label': 'Apple', 'value': 'AAPL'},
{'label': 'Coke', 'value': 'COKE'}
],
value='TSLA'
),
dcc.Graph(id='my-graph')
], className="container")
@testdash.callback(Output('my-graph', 'figure'),
[Input('my-dropdown', 'value')])
def update_graph(selected_dropdown_value):
dff = df[df['Stock'] == selected_dropdown_value]
return {
'data': [{
'x': dff.Date,
'y': dff.Close,
'line': {
'width': 3,
'shape': 'spline'
}
}],
'layout': {
'margin': {
'l': 30,
'r': 20,
'b': 30,
't': 20
}
}
}
| 26.769231
| 105
| 0.469828
|
09a645c2b36031352bf29c72ee41e0fce0728493
| 2,921
|
py
|
Python
|
migrations/versions/eb6f1d267a24_.py
|
Amin1014/pitch
|
38862a080a1ea183d9820b2499fab98776223799
|
[
"MIT"
] | null | null | null |
migrations/versions/eb6f1d267a24_.py
|
Amin1014/pitch
|
38862a080a1ea183d9820b2499fab98776223799
|
[
"MIT"
] | null | null | null |
migrations/versions/eb6f1d267a24_.py
|
Amin1014/pitch
|
38862a080a1ea183d9820b2499fab98776223799
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: eb6f1d267a24
Revises:
Create Date: 2021-08-18 16:25:41.411119
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'eb6f1d267a24'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('pass_secure', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('pitches',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pitch', sa.String(), nullable=True),
sa.Column('posted_date', sa.DateTime(), nullable=False),
sa.Column('content', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(), nullable=False),
sa.Column('posted_date', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('downvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('upvotes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('upvote', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('pitch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['pitch_id'], ['pitches.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('upvotes')
op.drop_table('downvotes')
op.drop_table('comments')
op.drop_table('pitches')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| 36.5125
| 76
| 0.657994
|
ccd54f0d919152280191464aea819835e47ec852
| 15,614
|
py
|
Python
|
legacy/huobi_eth_client.py
|
szhu3210/Arbitrage-trader
|
d2af5670f7285084f70e36a48c95e796d1fd2d37
|
[
"Apache-2.0"
] | 6
|
2019-04-03T22:33:39.000Z
|
2021-05-07T12:00:00.000Z
|
legacy/huobi_eth_client.py
|
szhu3210/Arbitrage-trader
|
d2af5670f7285084f70e36a48c95e796d1fd2d37
|
[
"Apache-2.0"
] | 2
|
2019-04-03T22:57:41.000Z
|
2019-04-26T07:13:12.000Z
|
legacy/huobi_eth_client.py
|
szhu3210/Arbitrage-trader
|
d2af5670f7285084f70e36a48c95e796d1fd2d37
|
[
"Apache-2.0"
] | 1
|
2019-04-28T07:23:32.000Z
|
2019-04-28T07:23:32.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import base64
import hmac
import hashlib
import json
from urllib import parse
from urllib import request
from datetime import datetime
# timeout in 5 seconds:
TIMEOUT = 5
API_HOST = 'be.huobi.com'
SCHEME = 'https'
# language setting: 'zh-CN', 'en':
LANG = 'zh-CN'
DEFAULT_GET_HEADERS = {
'Accept': 'application/json',
'Accept-Language': LANG
}
DEFAULT_POST_HEADERS = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Accept-Language': LANG
}
class Dict(dict):
def __init__(self, **kw):
super().__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
def _toDict(d):
return Dict(**d)
class ApiError(BaseException):
pass
class ApiNetworkError(BaseException):
pass
class ApiClient(object):
def __init__(self, appKey, appSecret, host=API_HOST):
'''
Init api client object, by passing appKey and appSecret.
'''
self._accessKeyId = appKey
self._accessKeySecret = appSecret.encode('utf-8') # change to bytes
self._host = host
def get(self, path, **params):
'''
Send a http get request and return json object.
'''
qs = self._sign('GET', path, self._utc(), params)
return self._call('GET', '%s?%s' % (path, qs))
def post(self, path, obj=None):
'''
Send a http post request and return json object.
'''
qs = self._sign('POST', path, self._utc())
data = None
if obj is not None:
data = json.dumps(obj).encode('utf-8')
return self._call('POST', '%s?%s' % (path, qs), data)
def _call(self, method, uri, data=None):
url = '%s://%s%s' % (SCHEME, self._host, uri)
# print(method + ' ' + url)
req = request.Request(url, data=data, headers=DEFAULT_GET_HEADERS if method=='GET' else DEFAULT_POST_HEADERS, method=method)
with request.urlopen(req, timeout=TIMEOUT) as resp:
if resp.getcode()!=200:
raise ApiNetworkError('Bad response code: %s %s' % (resp.getcode(), resp.reason))
return self._parse(resp.read())
def _parse(self, text):
# print('Response:\n%s' % text)
result = json.loads(text, object_hook=_toDict)
if result.status=='ok':
return result.data
raise ApiError('%s: %s' % (result['err-code'], result['err-msg']))
def _sign(self, method, path, ts, params=None):
self._method = method
# create signature:
if params is None:
params = {}
params['SignatureMethod'] = 'HmacSHA256'
params['SignatureVersion'] = '2'
params['AccessKeyId'] = self._accessKeyId
params['Timestamp'] = ts
# sort by key:
keys = sorted(params.keys())
# build query string like: a=1&b=%20&c=:
qs = '&'.join(['%s=%s' % (key, self._encode(params[key])) for key in keys])
# build payload:
payload = '%s\n%s\n%s\n%s' % (method, self._host, path, qs)
# print('payload:\n%s' % payload)
dig = hmac.new(self._accessKeySecret, msg=payload.encode('utf-8'), digestmod=hashlib.sha256).digest()
sig = self._encode(base64.b64encode(dig).decode())
# print('sign: ' + sig)
qs = qs + '&Signature=' + sig
return qs
def _utc(self):
return datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
def _encode(self, s):
return parse.quote(s, safe='')
class Huobi_ETH_Client():
def __init__(self):
self.client = ApiClient(API_KEY, API_SECRET)
self.account_id = self._getAccountID()
def getSymbols(self):
logging.info('Getting symbols for client:')
return self.client.get('/v1/common/symbols')
def getUserInfo(self):
logging.info('Getting user info for client:')
return self.client.get('/v1/users/user')
def getAllAccounts(self):
logging.info('Getting accounts for client:')
return self.client.get('/v1/account/accounts')
def getETHBalance(self):
balanceList = self.client.get('/v1/account/accounts/%s/balance' % self.account_id).list
for line in balanceList:
if line.currency=='eth' and line.type=='trade':
return line.balance
raise BaseException('ETH balance not found in account! Check ETH account!')
def getCNYBalance(self):
balanceList = self.client.get('/v1/account/accounts/%s/balance' % self.account_id).list
for line in balanceList:
if line.currency=='cny' and line.type=='trade':
return line.balance
raise BaseException('CNY balance not found in account! Check ETH account!')
def printBalanceRaw(self):
accs = self.getAllAccounts()
logging.info('All Accounts: ')
logging.info(accs)
logging.info('Getting balance for client:')
for acc in accs:
logging.info('Getting sub account: %s' % acc)
subaccs = self.client.get('/v1/account/accounts/%s/balance' % acc.id)
print(subaccs)
def getBalance(self):
res = []
accs = self.getAllAccounts()
logging.info('All Accounts: ')
logging.info(accs)
logging.info('Getting balance for client:')
for acc in accs:
logging.info('Getting sub account: %s' % acc)
subaccs = self.client.get('/v1/account/accounts/%s/balance' % acc.id)
res.append(subaccs)
return res
def printBalance(self):
accs = self.getAllAccounts()
logging.info('All Accounts: ')
logging.info(accs)
logging.info('Getting balance for client:')
account_id = accs[0].id
for acc in accs:
logging.info('Getting sub account: %s' % acc)
subaccs = self.client.get('/v1/account/accounts/%s/balance' % acc.id)
self._br()
print('Account ID: %s' % account_id)
print('#\tCurrency\tType\t\tBalance')
for i, currency in enumerate(subaccs.list):
print('%d\t%s\t\t%s\t\t%s' % (i+1, currency.currency, currency.type, currency.balance))
self._br()
def _getAccountID(self):
return self.getAllAccounts()[0].id
def _br(self):
print('\n' + '-'*50 + '\n')
def getSubmittedOrders(self):
return self._getOrders('submitted')
def printSubmittedOrders(self):
logging.info('Getting submitted orders:')
order_info = self.getSubmittedOrders()
self._printOrders(order_info, title='ALL SUBMITTED ORDERS')
def getCurrentOrders(self):
return self._getOrders('submitted,partial-filled,partial-canceled')
def printCurrentOrders(self):
logging.info('Getting current orders:')
order_info = self.getCurrentOrders()
self._printOrders(order_info, title='CURRENT ORDERS')
def getAllValidOrders(self):
return self._getOrders('submitted,partial-filled,partial-canceled,filled,canceled')
def printAllValidOrders(self):
logging.info('Getting all valid orders:')
order_info = self.getAllValidOrders()
self._printOrders(order_info, title='ALL VALID ORDERS')
def getFilledOrders(self):
return self._getOrders('filled')
def getAllOrders(self):
return self._getOrders()
def _getOrders(self, types='pre-submitted,submitted,partial-filled,partial-canceled,filled,canceled'):
return self.client.get('/v1/order/orders', symbol='ethcny', states=types)
def printAllOrders(self):
logging.info('Getting all orders:')
order_info = self.getAllOrders()
self._printOrders(order_info, title='ALL ORDERS')
def _printOrders(self, order_info, title=''):
self._br()
print(' ' + '~'*10 + ''.join(title) + '~'*10 +'\n')
print(' # Order\t Amount\t Price\t Create Time Type Field-Amount Field-Cash Field-Fees Finished Time Source State Cancelled at')
for i, line in enumerate(order_info):
# print(line)
print('%3d %d\t%s\t%15s\t %s \t%10s\t%15s\t%15s\t%15s\t %s\t %s \t%s\t%s' % (
i+1,
line.id,
line.amount,
line.price,
datetime.fromtimestamp(line['created-at']/1000).strftime('%Y-%m-%d %H:%M:%S'),
line.type,
line['field-amount'],
line['field-cash-amount'],
line['field-fees'],
datetime.fromtimestamp(line['finished-at']/1000).strftime('%Y-%m-%d %H:%M:%S'),
line.source,
line.state,
'' if 0==line['canceled-at'] else datetime.fromtimestamp(line['canceled-at']/1000).strftime('%Y-%m-%d %H:%M:%S')
))
self._br()
def buy_ETH_limit(self):
pass
def createOrder(self, amount, price, direction):
order_id = self.client.post('/v1/order/orders', {
'account-id': self.account_id,
'amount': amount,
'price': price,
'symbol': 'ethcny',
'type': direction,
'source': 'api'
})
logging.info('Printing order_id:')
logging.info(order_id)
return order_id
def placeOrder(self, order_id):
self.client.post('/v1/order/orders/%s/place' % order_id)
def printOrderDetails(self, order_id):
order_info = self.client.get('/v1/order/orders/%s' % order_id)
self._printOrders([order_info], title='ORDER DETAIL of ORDER # %s' % order_id)
def getOrderStatus(self, order_id):
return self.client.get('/v1/order/orders/%s' % order_id).state
def getOrderDetail(self, order_id):
return self.client.get('/v1/order/orders/%s' % order_id)
def isOrderSuccess(self, order_id):
orderStatus = self.getOrderStatus(order_id)
return orderStatus == 'filled'
def isOrderCancelled(self, order_id):
orderStatus = self.getOrderStatus(order_id)
return orderStatus == 'canceled'
def cancelOrder(self, order_id):
return self.client.post('/v1/order/orders/%s/submitcancel' % order_id)
def cancelAllOrders(self):
logging.info('Canelling all current orders:')
self.printCurrentOrders()
orders = self.getCurrentOrders()
for order in orders:
order_id = order.id
logging.info('Cancelling order # %d' % order_id)
self.cancelOrder(order_id)
logging.info('All orders cancelled!')
def getWithdrawAddress(self):
return self.client.get('/v1/dw/withdraw-legal/addresses', currency='cny')
def create_transfer_cny_to_main(self, amount):
withdraw_id = self.client.post('/v1/dw/withdraw-legal/create', {
'account-id': self.account_id,
'amount': amount,
'currency': 'cny',
})
print('Printing CNY_withdraw_id:')
print(withdraw_id)
return withdraw_id
def place_transfer_cny_to_main(self, withdraw_id):
return self.client.post('/v1/dw/withdraw-legal/%s/place' % withdraw_id)
def transfer_cny_to_main(self, amount):
if '.' in amount and len(amount.split('.')[1]) > 2:
raise BaseException('CNY transfer amount: Decimal part should be no more than 2-digits!')
if float(self.getCNYBalance()) < float(amount):
raise BaseException('Not enough CNY balance (in ETH account) to transfer!')
transfer_id = self.create_transfer_cny_to_main(amount)
return self.place_transfer_cny_to_main(transfer_id)
def get_transferable_cny_from_main(self):
return self.client.get('/v1/dw/deposit-legal/balance', currency='cny')
def create_transfer_cny_from_main(self, amount):
withdraw_id = self.client.post('/v1/dw/deposit-legal/create', {
'account-id': self.account_id,
'amount': amount,
'currency': 'cny',
})
print('Printing CNY_deposit_id: %s ' % withdraw_id)
return withdraw_id
def place_transfer_cny_from_main(self, withdraw_id):
return self.client.post('/v1/dw/deposit-legal/%s/place' % withdraw_id)
def cancel_transfer_cny_from_main(self, withdraw_id):
# INVALID
return self.client.post('/v1/dw/deposit-legal/%s/submitcancel' % withdraw_id)
def cancel_transfer_cny_to_main(self, withdraw_id):
# INVALID
return self.client.post('/v1/dw/withdraw-legal/%s/cancel' % withdraw_id)
def get_financial_history(self):
return self.client.get('/v1/query/finances')
def print_financial_history(self):
history = self.get_financial_history()
for transaction in history:
print(transaction)
def transfer_cny_from_main(self, amount):
if float(self.get_transferable_cny_from_main()) < float(amount):
raise BaseException('Not enough CNY balance (in main account) to transfer!')
transfer_id = self.create_transfer_cny_from_main(amount)
return self.place_transfer_cny_from_main(transfer_id)
def get_eth_withdraw_addresses(self):
addresses = self.client.get('/v1/dw/withdraw-virtual/addresses', currency='eth')
logging.info('Printing addresses:')
logging.info(addresses)
return addresses
def withdraw_eth_create(self, address_id='', amount=''):
# INVALID
withdraw_id = self.client.post('/v1/dw/withdraw-virtual/create', {
'address-id': address_id,
'amount': amount,
'trade-password': self.TRADE_PW # needs update here, trade pw is not supported by server and will return error
})
logging.info('Printing withdraw_id:')
logging.info(withdraw_id)
return withdraw_id
def withdraw_eth_place(self, withdraw_id):
status = self.client.post('/v1/dw/withdraw-virtual/%s/place' % withdraw_id)
print('Withdraw ETH order placed.')
logging.info('Printing withdraw status:')
logging.info(status)
return status
def main():
huobi_eth = Huobi_ETH_Client()
# print(type(huobi_eth.getCNYBalance()))
# print(huobi_eth.cancel_transfer_cny_to_main(withdraw_id='45833'))
# huobi_eth.print_financial_history()
# huobi_eth.transfer_cny_from_main(amount='1.0')
# print(huobi_eth.transfer_cny_to_main(amount='0.02'))
# print(huobi_eth.get_eth_withdraw_addresses())
# print(huobi_eth.get_transferable_cny_from_main())
# print(huobi_eth.transfer_cny_from_main('0.01'))
# print(huobi_eth.getCNYBalance())
# huobi_eth.getETHBalance()
# print(huobi_eth.transfer_cny_to_main('0.03'))
# transfer_id = huobi_eth.create_transfer_cny_to_main('0.02')
# print(huobi_eth.place_transfer_cny_to_main(transfer_id))
# print(huobi_eth.getWithdrawAddress())
# print(huobi_eth.getSymbols())
# print(huobi_eth.getUserInfo())
# print(huobi_eth.getAllAccounts())
# huobi_eth.printBalance()
# huobi_eth.printSubmittedOrders()
# huobi_eth.printAllValidOrders()
# huobi_eth.printAllOrders()
# orderid = huobi_eth.createOrder(huobi_eth.account_id(), '0.001', '1600.0', 'sell-limit')
# huobi_eth.placeOrder(orderid)
# huobi_eth.cancelAllOrders()
if __name__ == '__main__':
main()
| 36.227378
| 204
| 0.618612
|
3b7d4b6c172ee5fe12716066c26e54f0e01a292a
| 298
|
py
|
Python
|
tools/__init__.py
|
NickolausDS/gladier_kanzus
|
79c66eaba88956ab44ff5e6e38ed3b6e6b3e1fdd
|
[
"MIT"
] | null | null | null |
tools/__init__.py
|
NickolausDS/gladier_kanzus
|
79c66eaba88956ab44ff5e6e38ed3b6e6b3e1fdd
|
[
"MIT"
] | null | null | null |
tools/__init__.py
|
NickolausDS/gladier_kanzus
|
79c66eaba88956ab44ff5e6e38ed3b6e6b3e1fdd
|
[
"MIT"
] | null | null | null |
from gladier import GladierBaseTool
#__[all]__ = [CreatePhil,DialsStills,DialsVersion,Pilot,Prime,Primalisys]
CreatePhil = GladierBaseTool()
DialsStills = GladierBaseTool()
DialsVersion = GladierBaseTool()
Pilot = GladierBaseTool()
Prime = GladierBaseTool()
Primalisys = GladierBaseTool()
| 16.555556
| 73
| 0.788591
|
85c630753e80879e4489ceb359e25a3287be3ff0
| 3,958
|
py
|
Python
|
navigation.py
|
Johnkhk/140A-lab-5
|
6ff1a441675b5e0052a4e1ef3ca5206b4fb044bb
|
[
"MIT"
] | null | null | null |
navigation.py
|
Johnkhk/140A-lab-5
|
6ff1a441675b5e0052a4e1ef3ca5206b4fb044bb
|
[
"MIT"
] | null | null | null |
navigation.py
|
Johnkhk/140A-lab-5
|
6ff1a441675b5e0052a4e1ef3ca5206b4fb044bb
|
[
"MIT"
] | null | null | null |
import torchvision
import torch
import torchvision.transforms as transforms
import torch.nn.functional as F
import cv2
import PIL.Image
import numpy as np
import traitlets
from jetbot import Camera, bgr8_to_jpeg
from jetbot import Robot
import time
import math
# Hint : Look at Road Following and Collision Avoidance Lab
class Navigation:
def __init__(self):
self.camera = Camera.instance(width=224, height=224)
self.robot = Robot()
#Collision Avoidance
self.ca_model = torchvision.models.alexnet(pretrained=False)
self.ca_model.classifier[6] = torch.nn.Linear(self.ca_model.classifier[6].in_features, 2)
self.ca_model.load_state_dict(torch.load('best_model.pth'))
self.device = torch.device('cuda')
self.ca_model = self.ca_model.to(self.device)
self.mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()
self.std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()
self.normalize = torchvision.transforms.Normalize(self.mean, self.std)
#Road following support variables
self.angle = 0.0
self.angle_last = 0.0
# Instantiating the road following network.
self.rf_model = torchvision.models.resnet18(pretrained=False)
self.rf_model.fc = torch.nn.Linear(512, 2)
self.rf_model.load_state_dict(torch.load('best_steering_model_xy.pth'))
self.rf_model = self.rf_model.to(self.device)
self.rf_model = self.rf_model.eval().half()
self.speed_gain = 0.12
self.steering_gain = 0
self.steering_dgain = 0.1
self.steering_bias = 0.0
self.t_unit_dist = 0.04
self.starttime = 0
self.cumulative_angle = -1
self.pitstop = []
self.startposition = []
self.pathpoints = [[]]
self.proportionality_const = -1 # TODO : Add the proper value
def collision_avoidance_preprocessing(self, camera_value):
"""Preprocessing for collision avoidance."""
...
def collision_avoidance(self, change):
"""This will determine the next start point which will be
which will be demarcated by the presence of another bot."""
# Collision avoidance has to be trained to detect a bot as
# and obstacle. This will then be called in the road following function.
...
def road_following_preprocessing(self, image):
"Preprocesses the image for road following."
...
def road_following(self, change):
"The main function to navigate in the race."
...
# 1. This will ideally have the road following code
# 2. This method will also call the collision avoidance
# function which will detect the presence of a bot.
# 3. Once the collision is detected it will verify it's position
# is within the range of the next start point
# 4. If it is so, it will call the bot detected function
# which will publish a message on the appropriate topic.
# 5. In addition to that it will also be storing its coordinate location
# 6. The initial start angle (bot's orientation at the startpoint ) will
# be provided to the students in the start-coordinates
def collision_detected(self):
"""This will publish the message on the topic for the
next bot to run."""
...
def move_to_start(self, pitstop, startpoint):
"""Calibrate the bot to reach the start positions."""
# pitstop structure : list : [ x_coordinate, y_coordinate ]
# startpoint structure : list : [ x_coordinate, y_coordinate, start_angle ]
# start_angle : angle it makes with the x axis at the start location
def sprint(self, comm_callback1, comm_callback2):
"""Navigate through the track."""
self.baton_callback = baton_callback()
self.path_callback = pathpoints_callback()
| 40.804124
| 97
| 0.656645
|
3d364a7c8805e503632e7a208047a6419fb0ce53
| 40
|
py
|
Python
|
core/database/SqlException.py
|
Korbier/PyWsServer
|
95510a935c3019ead04d2de71e4344d94b3ac560
|
[
"MIT"
] | null | null | null |
core/database/SqlException.py
|
Korbier/PyWsServer
|
95510a935c3019ead04d2de71e4344d94b3ac560
|
[
"MIT"
] | null | null | null |
core/database/SqlException.py
|
Korbier/PyWsServer
|
95510a935c3019ead04d2de71e4344d94b3ac560
|
[
"MIT"
] | null | null | null |
class SqlException(Exception):
pass
| 13.333333
| 30
| 0.75
|
baefc05a4393db9f3ebbf03b3ffb07012f642692
| 400
|
py
|
Python
|
multinet/api/views/__init__.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | null | null | null |
multinet/api/views/__init__.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 91
|
2021-03-15T19:00:15.000Z
|
2022-03-11T00:04:05.000Z
|
multinet/api/views/__init__.py
|
multinet-app/multinet-api
|
a658d787f0fb9ba415ed85a1e37c29953486287f
|
[
"Apache-2.0"
] | 1
|
2022-02-05T15:53:04.000Z
|
2022-02-05T15:53:04.000Z
|
from .network import NetworkViewSet
from .query import AqlQueryViewSet
from .table import TableViewSet
from .upload import UploadViewSet
from .users import users_me_view, users_search_view
from .workspace import WorkspaceViewSet
__all__ = [
'users_me_view',
'users_search_view',
'NetworkViewSet',
'TableViewSet',
'UploadViewSet',
'WorkspaceViewSet',
'AqlQueryViewSet',
]
| 23.529412
| 51
| 0.76
|
27cc7a8cb424d68ce06905d92c8df4faf2590958
| 1,846
|
py
|
Python
|
main.py
|
mtrazzi/baba-is-gym
|
5bbc1a9f7eda86073fa399378cb6b39c0ac16db0
|
[
"BSD-3-Clause"
] | 5
|
2019-04-14T22:12:26.000Z
|
2021-12-28T09:27:26.000Z
|
main.py
|
mtrazzi/baba-is-gym
|
5bbc1a9f7eda86073fa399378cb6b39c0ac16db0
|
[
"BSD-3-Clause"
] | 1
|
2019-04-14T21:34:58.000Z
|
2019-04-14T21:34:58.000Z
|
main.py
|
mtrazzi/baba-is-gym
|
5bbc1a9f7eda86073fa399378cb6b39c0ac16db0
|
[
"BSD-3-Clause"
] | null | null | null |
# adapted from: https://medium.com/emergent-future/simple-reinforcement-learning-with-tensorflow-part-0-q-learning-with-tables-and-neural-networks-d195264329d0
import numpy as np
import os
import random
import sys
def gridworld():
import gym
import baba_is_gym
return gym.make('BabaIsGymEnv-v0')
def main():
render_mode = 'terminal'
if (len(sys.argv) > 1):
render_mode = sys.argv[1]
env = gridworld()
total_reward = 0
# Initialize table with all zeros
Q = np.zeros([env.observation_space.n,env.action_space.n])
# Set learning parameters
lr = .8
y = .95
epsilon = 0.1
num_episodes = 20000
steps_per_episode = 200
for i in range(num_episodes):
# Reset environment and get first new observation
s = env.reset()
rAll = 0
d = False
j = 0
#The Q-Table learning algorithm
while j < steps_per_episode:
if (render_mode == 'terminal' and i > 0 and (i % 100) == 0):
env.render('terminal')
j += 1
# Choose an action by epsilon-greedy (with noise) picking from Q table
if (random.random() < (epsilon / np.log(i+2))):
a = random.randint(0, env.action_space.n - 1)
else:
a = np.argmax(Q[s,:] + np.random.randn(1,env.action_space.n)*(1./(i+1)))
# Get new state and reward from environment
s1,r,d,_ = env.step(a)
# Update Q-Table with new knowledge
s1 = int(s1)
Q[s,a] = Q[s,a] + lr * (r + y * np.max(Q[s1,:]) - Q[s,a])
rAll += r
s = s1
if d == True:
break
os.system('cls' if os.name == 'nt' else 'clear')
print("num_episodes: ", i, "\nreward: ", int(rAll))
if __name__ == "__main__":
main()
| 32.964286
| 159
| 0.557421
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.