hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795236d47b3fabc037de675331efa5a541e17a5f
| 9,140
|
py
|
Python
|
xclim/testing/tests/test_cli.py
|
fossabot/xclim
|
31fbdce6545d29e8a762b64b880e04eeb601f9eb
|
[
"Apache-2.0"
] | null | null | null |
xclim/testing/tests/test_cli.py
|
fossabot/xclim
|
31fbdce6545d29e8a762b64b880e04eeb601f9eb
|
[
"Apache-2.0"
] | null | null | null |
xclim/testing/tests/test_cli.py
|
fossabot/xclim
|
31fbdce6545d29e8a762b64b880e04eeb601f9eb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tests for `xclim` package, command line interface
import numpy as np
import pytest
import xarray as xr
from click.testing import CliRunner
import xclim
from xclim.cli import cli
from xclim.testing import open_dataset
try:
from dask.distributed import Client
except ImportError:
Client = None
@pytest.mark.parametrize(
"indicators,indnames",
[
([xclim.atmos.tg_mean], ["tg_mean"]),
(
# Note: This test is dependent on indicator name length and terminal dimensions.
[xclim.atmos.tn_mean, xclim.atmos.ice_days],
["tn_mean", "ice_days"],
),
],
)
def test_info(indicators, indnames):
runner = CliRunner()
results = runner.invoke(cli, ["info"] + indnames)
for ind in indicators:
assert ind.title in results.output
assert ind.identifier in results.output
def test_indices():
runner = CliRunner()
results = runner.invoke(cli, ["indices"])
for name, ind in xclim.core.indicator.registry.items():
assert name.lower() in results.output
@pytest.mark.parametrize(
"indicator,indname",
[
(xclim.atmos.heating_degree_days, "heating_degree_days"),
(xclim.land.base_flow_index, "base_flow_index"),
],
)
def test_indicator_help(indicator, indname):
runner = CliRunner()
results = runner.invoke(cli, [indname, "--help"])
for name in indicator.parameters.keys():
if name not in ["ds", "indexer"]:
assert name in results.output
@pytest.mark.parametrize(
"indicator,expected,varnames",
[
("tg_mean", 272.15, ["tas"]),
("dtrvar", 0.0, ["tasmin", "tasmax"]),
("heating_degree_days", 6588.0, ["tas"]),
("solidprcptot", 31622400.0, ["tas", "pr"]),
],
)
def test_normal_computation(
tasmin_series, tasmax_series, pr_series, tmp_path, indicator, expected, varnames
):
tasmin = tasmin_series(np.ones(366) + 270.15, start="1/1/2000")
tasmax = tasmax_series(np.ones(366) + 272.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
ds = xr.Dataset(
data_vars={
"tasmin": tasmin,
"tasmax": tasmax,
"tas": xclim.atmos.tg(tasmin, tasmax),
"pr": pr,
}
)
input_file = tmp_path / "in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
args = ["-i", str(input_file), "-o", str(output_file), "-v", indicator]
runner = CliRunner()
results = runner.invoke(cli, args)
for varname in varnames:
assert f"Parsed {varname} = {varname}" in results.output
assert "Processing :" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
outvar = list(out.data_vars.values())[0]
np.testing.assert_allclose(outvar[0], expected)
def test_multi_input(tas_series, pr_series, tmp_path):
tas = tas_series(np.ones(366) + 273.15, start="1/1/2000")
pr = pr_series(np.ones(366), start="1/1/2000")
tas_file = tmp_path / "multi_tas_in.nc"
pr_file = tmp_path / "multi_pr_in.nc"
output_file = tmp_path / "out.nc"
tas.to_dataset().to_netcdf(tas_file)
pr.to_dataset().to_netcdf(pr_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(tmp_path / "multi_*_in.nc"),
"-o",
str(output_file),
"-v",
"solidprcptot",
],
)
assert "Processing : solidprcptot" in results.output
out = xr.open_dataset(output_file)
assert out.solidprcptot.sum() == 0
def test_multi_output(tmp_path):
ds = open_dataset("ERA5/daily_surface_cancities_1990-1993.nc")
input_file = tmp_path / "ws_in.nc"
output_file = tmp_path / "out.nc"
ds.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"wind_speed_from_vector",
],
)
assert "Processing : wind_speed_from_vector" in results.output
def test_renaming_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.name = "tas"
tas.to_netcdf(input_file)
with xclim.set_options(cf_compliance="warn"):
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tn_mean",
"--tasmin",
"tas",
],
)
assert "Processing : tn_mean" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tn_mean[0] == 1.0
def test_indicator_chain(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"-v",
"tg_mean",
"growing_degree_days",
],
)
assert "Processing : tg_mean" in results.output
assert "Processing : growing_degree_days" in results.output
assert "100% Completed" in results.output
out = xr.open_dataset(output_file)
assert out.tg_mean[0] == 1.0
assert out.growing_degree_days[0] == 0
def test_missing_variable(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli, ["-i", str(input_file), "-o", str(output_file), "tn_mean"]
)
assert results.exit_code == 2
assert "'tasmin' was not found in the input dataset." in results.output
@pytest.mark.parametrize(
"options,output",
[
(["--dask-nthreads", "2"], "Error: '--dask-maxmem' must be given"),
(["--chunks", "time:90"], "100% Complete"),
(["--chunks", "time:90,lat:5"], "100% Completed"),
(["--version"], xclim.__version__),
],
)
def test_global_options(tas_series, tmp_path, options, output):
if "dask" in options[0]:
pytest.importorskip("dask.distributed")
tas = tas_series(np.ones(366), start="1/1/2000")
tas = xr.concat([tas] * 10, dim="lat")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
results = runner.invoke(
cli,
["-i", str(input_file), "-o", str(output_file)] + options + ["tg_mean"],
)
assert output in results.output
def test_bad_usage(tas_series, tmp_path):
tas = tas_series(np.ones(366), start="1/1/2000")
input_file = tmp_path / "tas.nc"
output_file = tmp_path / "out.nc"
tas.to_netcdf(input_file)
runner = CliRunner()
# No command
results = runner.invoke(cli, ["-i", str(input_file)])
assert "Missing command" in results.output
# Indicator not found:
results = runner.invoke(cli, ["info", "mean_ether_velocity"])
assert "Indicator 'mean_ether_velocity' not found in xclim" in results.output
# No input file given
results = runner.invoke(cli, ["-o", str(output_file), "base_flow_index"])
assert "No input file name given" in results.output
# No output file given
results = runner.invoke(cli, ["-i", str(input_file), "tg_mean"])
assert "No output file name given" in results.output
results = runner.invoke(
cli,
[
"-i",
str(input_file),
"-o",
str(output_file),
"--dask-nthreads",
"2",
"tg_mean",
],
)
if Client is None: # dask.distributed not installed
assert "distributed scheduler is not installed" in results.output
else:
assert "'--dask-maxmem' must be given" in results.output
@pytest.mark.parametrize("method, pattern", [("-r", "`GH/"), ("-m", "[GH/")])
def test_release_notes(method, pattern):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", method],
)
assert ":pull:`" not in results.output
assert ":issue:`" not in results.output
assert ":user:`" not in results.output
assert pattern in results.output
@pytest.mark.parametrize(
"method, error",
[
(
["-m", "-r"],
"Cannot return both Markdown and ReStructuredText in same release_notes call.",
),
(list(), "Must specify Markdown (-m) or ReStructuredText (-r)."),
],
)
def test_release_notes_failure(method, error):
runner = CliRunner()
results = runner.invoke(
cli,
["release_notes", *method],
)
assert error in results.output
| 27.95107
| 92
| 0.595295
|
795238cdd140f802daad16798b0f06c4193fee09
| 1,908
|
py
|
Python
|
encoding.py
|
AnuragAnalog/Machine-learning
|
4ec2657524cb54d715b0bddeb7d8ea3b7644a302
|
[
"MIT"
] | 2
|
2020-02-23T07:08:08.000Z
|
2020-06-15T15:11:36.000Z
|
encoding.py
|
AnuragAnalog/Machine-learning
|
4ec2657524cb54d715b0bddeb7d8ea3b7644a302
|
[
"MIT"
] | 1
|
2020-01-08T13:30:33.000Z
|
2020-01-08T13:30:33.000Z
|
encoding.py
|
AnuragAnalog/Machine-learning
|
4ec2657524cb54d715b0bddeb7d8ea3b7644a302
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import numpy as np
class OneHotEncoder():
def __init__(self):
self.unique = dict()
self.fit_called = False
self.row = 2
self.column = 2
def __str__(self):
if self.fit_called:
return "Encoding is: "+str(self.unique)
else:
return "call the fit method to initialize some parameters"
def __encode(self, index, n):
return [0 if i is not index else 1 for i in range(n)]
def fit(self, x):
index = 0
self.fit_called = True
unique_values = set(x)
for value in unique_values:
self.unique[value] = index
index = index + 1
self.row = len(x)
self.column = index
return
def transform(self, x):
encoded = list()
for col in x:
for key in self.unique.keys():
if col == key:
encoded.append(self.__encode(self.unique[key], self.column))
break
return np.array(encoded).reshape(self.row, self.column)
class categorize():
def __init__(self):
self.unique = dict()
self.fit_called = False
self.row = 2
def __str__(self):
if self.fit_called:
return "Encoding is: "+str(self.unique)
else:
return "call the fit method to initialize some parameters"
def fit(self, x):
index = 0
self.fit_called = True
unique_values = set(x)
for value in unique_values:
self.unique[value] = index
index = index + 1
self.row = len(x)
return
def transform(self, x):
encoded = list()
for col in x:
for key in self.unique.keys():
if col == key:
encoded.append(self.unique[key])
break
return np.array(encoded)
| 25.44
| 80
| 0.527778
|
795239ab78c6ead04df71b340941a444cfc25d19
| 61,589
|
py
|
Python
|
pandas/plotting/_core.py
|
liaoaoyuan97/pandas
|
6e707f2f68a4ed7fd990438bc489374641374cff
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2021-03-24T21:56:40.000Z
|
2021-03-24T21:56:40.000Z
|
pandas/plotting/_core.py
|
amrezz13/pandas
|
2fad6a38f8ecb1f4f7568ca57cb974f7f058cb9d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/plotting/_core.py
|
amrezz13/pandas
|
2fad6a38f8ecb1f4f7568ca57cb974f7f058cb9d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
import importlib
from typing import TYPE_CHECKING, Optional, Sequence, Tuple, Union
from pandas._config import get_option
from pandas._typing import Label
from pandas.util._decorators import Appender, Substitution
from pandas.core.dtypes.common import is_integer, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.base import PandasObject
if TYPE_CHECKING:
from pandas import DataFrame
def hist_series(
self,
by=None,
ax=None,
grid: bool = True,
xlabelsize: Optional[int] = None,
xrot: Optional[float] = None,
ylabelsize: Optional[int] = None,
yrot: Optional[float] = None,
figsize: Optional[Tuple[int, int]] = None,
bins: Union[int, Sequence[int]] = 10,
backend: Optional[str] = None,
legend: bool = False,
**kwargs,
):
"""
Draw histogram of the input series using matplotlib.
Parameters
----------
by : object, optional
If passed, then used to form histograms for separate groups.
ax : matplotlib axis object
If not passed, uses gca().
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels.
figsize : tuple, default None
Figure size in inches by default.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
To be passed to the actual plotting function.
Returns
-------
matplotlib.AxesSubplot
A histogram plot.
See Also
--------
matplotlib.axes.Axes.hist : Plot a histogram using matplotlib.
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_series(
self,
by=by,
ax=ax,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
figsize=figsize,
bins=bins,
legend=legend,
**kwargs,
)
def hist_frame(
data: "DataFrame",
column: Union[Label, Sequence[Label]] = None,
by=None,
grid: bool = True,
xlabelsize: Optional[int] = None,
xrot: Optional[float] = None,
ylabelsize: Optional[int] = None,
yrot: Optional[float] = None,
ax=None,
sharex: bool = False,
sharey: bool = False,
figsize: Optional[Tuple[int, int]] = None,
layout: Optional[Tuple[int, int]] = None,
bins: Union[int, Sequence[int]] = 10,
backend: Optional[str] = None,
legend: bool = False,
**kwargs,
):
"""
Make a histogram of the DataFrame's.
A `histogram`_ is a representation of the distribution of data.
This function calls :meth:`matplotlib.pyplot.hist`, on each series in
the DataFrame, resulting in one histogram per column.
.. _histogram: https://en.wikipedia.org/wiki/Histogram
Parameters
----------
data : DataFrame
The pandas object holding the data.
column : str or sequence
If passed, will be used to limit data to a subset of columns.
by : object, optional
If passed, then used to form histograms for separate groups.
grid : bool, default True
Whether to show axis grid lines.
xlabelsize : int, default None
If specified changes the x-axis label size.
xrot : float, default None
Rotation of x axis labels. For example, a value of 90 displays the
x labels rotated 90 degrees clockwise.
ylabelsize : int, default None
If specified changes the y-axis label size.
yrot : float, default None
Rotation of y axis labels. For example, a value of 90 displays the
y labels rotated 90 degrees clockwise.
ax : Matplotlib axes object, default None
The axes to plot the histogram on.
sharex : bool, default True if ax is None else False
In case subplots=True, share x axis and set some x axis labels to
invisible; defaults to True if ax is None otherwise False if an ax
is passed in.
Note that passing in both an ax and sharex=True will alter all x axis
labels for all subplots in a figure.
sharey : bool, default False
In case subplots=True, share y axis and set some y axis labels to
invisible.
figsize : tuple
The size in inches of the figure to create. Uses the value in
`matplotlib.rcParams` by default.
layout : tuple, optional
Tuple of (rows, columns) for the layout of the histograms.
bins : int or sequence, default 10
Number of histogram bins to be used. If an integer is given, bins + 1
bin edges are calculated and returned. If bins is a sequence, gives
bin edges, including left edge of first bin and right edge of last
bin. In this case, bins is returned unmodified.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
legend : bool, default False
Whether to show the legend.
.. versionadded:: 1.1.0
**kwargs
All other plotting keyword arguments to be passed to
:meth:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.AxesSubplot or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.hist : Plot a histogram using matplotlib.
Examples
--------
This example draws a histogram based on the length and width of
some animals, displayed in three bins
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'length': [1.5, 0.5, 1.2, 0.9, 3],
... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]
... }, index=['pig', 'rabbit', 'duck', 'chicken', 'horse'])
>>> hist = df.hist(bins=3)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.hist_frame(
data,
column=column,
by=by,
grid=grid,
xlabelsize=xlabelsize,
xrot=xrot,
ylabelsize=ylabelsize,
yrot=yrot,
ax=ax,
sharex=sharex,
sharey=sharey,
figsize=figsize,
layout=layout,
legend=legend,
bins=bins,
**kwargs,
)
_boxplot_doc = """
Make a box plot from DataFrame columns.
Make a box-and-whisker plot from DataFrame columns, optionally grouped
by some other columns. A box plot is a method for graphically depicting
groups of numerical data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. By default, they extend no more than
`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest
data point within that interval. Outliers are plotted as separate dots.
For further details see
Wikipedia's entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`_.
Parameters
----------
column : str or list of str, optional
Column name or list of names, or vector.
Can be any valid input to :meth:`pandas.DataFrame.groupby`.
by : str or array-like, optional
Column in the DataFrame to :meth:`pandas.DataFrame.groupby`.
One box-plot will be done per value of columns in `by`.
ax : object of class matplotlib.axes.Axes, optional
The matplotlib axes to be used by boxplot.
fontsize : float or str
Tick label font size in points or as a string (e.g., `large`).
rot : int or float, default 0
The rotation angle of labels (in degrees)
with respect to the screen coordinate system.
grid : bool, default True
Setting this to True will show the grid.
figsize : A tuple (width, height) in inches
The size of the figure to create in matplotlib.
layout : tuple (rows, columns), optional
For example, (3, 5) will display the subplots
using 3 columns and 5 rows, starting from the top-left.
return_type : {'axes', 'dict', 'both'} or None, default 'axes'
The kind of object to return. The default is ``axes``.
* 'axes' returns the matplotlib axes the boxplot is drawn on.
* 'dict' returns a dictionary whose values are the matplotlib
Lines of the boxplot.
* 'both' returns a namedtuple with the axes and dict.
* when grouping with ``by``, a Series mapping columns to
``return_type`` is returned.
If ``return_type`` is `None`, a NumPy array
of axes with the same shape as ``layout`` is returned.
%(backend)s\
**kwargs
All other plotting keyword arguments to be passed to
:func:`matplotlib.pyplot.boxplot`.
Returns
-------
result
See Notes.
See Also
--------
Series.plot.hist: Make a histogram.
matplotlib.pyplot.boxplot : Matplotlib equivalent plot.
Notes
-----
The return type depends on the `return_type` parameter:
* 'axes' : object of class matplotlib.axes.Axes
* 'dict' : dict of matplotlib.lines.Line2D objects
* 'both' : a namedtuple with structure (ax, lines)
For data grouped with ``by``, return a Series of the above or a numpy
array:
* :class:`~pandas.Series`
* :class:`~numpy.array` (for ``return_type = None``)
Use ``return_type='dict'`` when you want to tweak the appearance
of the lines after plotting. In this case a dict containing the Lines
making up the boxes, caps, fliers, medians, and whiskers is returned.
Examples
--------
Boxplots can be created for every column in the dataframe
by ``df.boxplot()`` or indicating the columns to be used:
.. plot::
:context: close-figs
>>> np.random.seed(1234)
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['Col1', 'Col2', 'Col3', 'Col4'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3'])
Boxplots of variables distributions grouped by the values of a third
variable can be created using the option ``by``. For instance:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Col1', 'Col2'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> boxplot = df.boxplot(by='X')
A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot
in order to group the data by combination of the variables in the x-axis:
.. plot::
:context: close-figs
>>> df = pd.DataFrame(np.random.randn(10, 3),
... columns=['Col1', 'Col2', 'Col3'])
>>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A',
... 'B', 'B', 'B', 'B', 'B'])
>>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A',
... 'B', 'A', 'B', 'A', 'B'])
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y'])
The layout of boxplot can be adjusted giving a tuple to ``layout``:
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... layout=(2, 1))
Additional formatting can be done to the boxplot, like suppressing the grid
(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``)
or changing the fontsize (i.e. ``fontsize=15``):
.. plot::
:context: close-figs
>>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15)
The parameter ``return_type`` can be used to select the type of element
returned by `boxplot`. When ``return_type='axes'`` is selected,
the matplotlib axes on which the boxplot is drawn are returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes')
>>> type(boxplot)
<class 'matplotlib.axes._subplots.AxesSubplot'>
When grouping with ``by``, a Series mapping columns to ``return_type``
is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type='axes')
>>> type(boxplot)
<class 'pandas.core.series.Series'>
If ``return_type`` is `None`, a NumPy array of axes with the same shape
as ``layout`` is returned:
>>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X',
... return_type=None)
>>> type(boxplot)
<class 'numpy.ndarray'>
"""
_backend_doc = """\
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
"""
_bar_or_line_doc = """
Parameters
----------
x : label or position, optional
Allows plotting of one column versus another. If not specified,
the index of the DataFrame is used.
y : label or position, optional
Allows plotting of one column versus another. If not specified,
all numerical columns are used.
color : str, array_like, or dict, optional
The color for each of the DataFrame's columns. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each column recursively. For
instance ['green','yellow'] each column's %(kind)s will be filled in
green or yellow, alternatively. If there is only a single column to
be plotted, then only the first color from the color list will be
used.
- A dict of the form {column name : color}, so that each column will be
colored accordingly. For example, if your columns are called `a` and
`b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for
column `a` in green and %(kind)ss for column `b` in red.
.. versionadded:: 1.1.0
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
An ndarray is returned with one :class:`matplotlib.axes.Axes`
per column when ``subplots=True``.
"""
@Substitution(backend="")
@Appender(_boxplot_doc)
def boxplot(
data,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
**kwargs,
):
plot_backend = _get_plot_backend("matplotlib")
return plot_backend.boxplot(
data,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
@Substitution(backend=_backend_doc)
@Appender(_boxplot_doc)
def boxplot_frame(
self,
column=None,
by=None,
ax=None,
fontsize=None,
rot=0,
grid=True,
figsize=None,
layout=None,
return_type=None,
backend=None,
**kwargs,
):
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame(
self,
column=column,
by=by,
ax=ax,
fontsize=fontsize,
rot=rot,
grid=grid,
figsize=figsize,
layout=layout,
return_type=return_type,
**kwargs,
)
def boxplot_frame_groupby(
grouped,
subplots=True,
column=None,
fontsize=None,
rot=0,
grid=True,
ax=None,
figsize=None,
layout=None,
sharex=False,
sharey=True,
backend=None,
**kwargs,
):
"""
Make box plots from DataFrameGroupBy data.
Parameters
----------
grouped : Grouped DataFrame
subplots : bool
* ``False`` - no subplots will be used
* ``True`` - create a subplot for each group.
column : column name or list of names, or vector
Can be any valid input to groupby.
fontsize : int or str
rot : label rotation angle
grid : Setting this to True will show the grid
ax : Matplotlib axis object, default None
figsize : A tuple (width, height) in inches
layout : tuple (optional)
The layout of the plot: (rows, columns).
sharex : bool, default False
Whether x-axes will be shared among subplots.
sharey : bool, default True
Whether y-axes will be shared among subplots.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
All other plotting keyword arguments to be passed to
matplotlib's boxplot function.
Returns
-------
dict of key/value = group key/DataFrame.boxplot return value
or DataFrame.boxplot return value in case subplots=figures=False
Examples
--------
You can create boxplots for grouped data and show them as separate subplots:
.. plot::
:context: close-figs
>>> import itertools
>>> tuples = [t for t in itertools.product(range(1000), range(4))]
>>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1'])
>>> data = np.random.randn(len(index),4)
>>> df = pd.DataFrame(data, columns=list('ABCD'), index=index)
>>> grouped = df.groupby(level='lvl1')
>>> grouped.boxplot(rot=45, fontsize=12, figsize=(8,10))
The ``subplots=False`` option shows the boxplots in a single figure.
.. plot::
:context: close-figs
>>> grouped.boxplot(subplots=False, rot=45, fontsize=12)
"""
plot_backend = _get_plot_backend(backend)
return plot_backend.boxplot_frame_groupby(
grouped,
subplots=subplots,
column=column,
fontsize=fontsize,
rot=rot,
grid=grid,
ax=ax,
figsize=figsize,
layout=layout,
sharex=sharex,
sharey=sharey,
**kwargs,
)
class PlotAccessor(PandasObject):
"""
Make plots of Series or DataFrame.
Uses the backend specified by the
option ``plotting.backend``. By default, matplotlib is used.
Parameters
----------
data : Series or DataFrame
The object for which the method is called.
x : label or position, default None
Only used if data is a DataFrame.
y : label, position or list of label, positions, default None
Allows plotting of one column versus another. Only used if data is a
DataFrame.
kind : str
The kind of plot to produce:
- 'line' : line plot (default)
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : boxplot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot (DataFrame only)
- 'hexbin' : hexbin plot (DataFrame only)
ax : matplotlib axes object, default None
An axes of the current figure.
subplots : bool, default False
Make separate subplots for each column.
sharex : bool, default True if ax is None else False
In case ``subplots=True``, share x axis and set some x axis labels
to invisible; defaults to True if ax is None otherwise False if
an ax is passed in; Be aware, that passing in both an ax and
``sharex=True`` will alter all x axis labels for all axis in a figure.
sharey : bool, default False
In case ``subplots=True``, share y axis and set some y axis labels to invisible.
layout : tuple, optional
(rows, columns) for the layout of subplots.
figsize : a tuple (width, height) in inches
Size of a figure object.
use_index : bool, default True
Use index as ticks for x axis.
title : str or list
Title to use for the plot. If a string is passed, print the string
at the top of the figure. If a list is passed and `subplots` is
True, print each item in the list above the corresponding subplot.
grid : bool, default None (matlab style default)
Axis grid lines.
legend : bool or {'reverse'}
Place legend on axis subplots.
style : list or dict
The matplotlib line style per column.
logx : bool or 'sym', default False
Use log scaling or symlog scaling on x axis.
.. versionchanged:: 0.25.0
logy : bool or 'sym' default False
Use log scaling or symlog scaling on y axis.
.. versionchanged:: 0.25.0
loglog : bool or 'sym', default False
Use log scaling or symlog scaling on both x and y axes.
.. versionchanged:: 0.25.0
xticks : sequence
Values to use for the xticks.
yticks : sequence
Values to use for the yticks.
xlim : 2-tuple/list
Set the x limits of the current axes.
ylim : 2-tuple/list
Set the y limits of the current axes.
xlabel : label, optional
Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the
x-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
ylabel : label, optional
Name to use for the ylabel on y-axis. Default will show no ylabel, or the
y-column name for planar plots.
.. versionadded:: 1.1.0
.. versionchanged:: 1.2.0
Now applicable to planar plots (`scatter`, `hexbin`).
rot : int, default None
Rotation for ticks (xticks for vertical, yticks for horizontal
plots).
fontsize : int, default None
Font size for xticks and yticks.
colormap : str or matplotlib colormap object, default None
Colormap to select colors from. If string, load colormap with that
name from matplotlib.
colorbar : bool, optional
If True, plot colorbar (only relevant for 'scatter' and 'hexbin'
plots).
position : float
Specify relative alignments for bar plot layout.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center).
table : bool, Series or DataFrame, default False
If True, draw a table using the data in the DataFrame and the data
will be transposed to meet matplotlib's default layout.
If a Series or DataFrame is passed, use passed data to draw a
table.
yerr : DataFrame, Series, array-like, dict and str
See :ref:`Plotting with Error Bars <visualization.errorbars>` for
detail.
xerr : DataFrame, Series, array-like, dict and str
Equivalent to yerr.
stacked : bool, default False in line and bar plots, and True in area plot
If True, create stacked plot.
sort_columns : bool, default False
Sort column names to determine plot ordering.
secondary_y : bool or sequence, default False
Whether to plot on the secondary y-axis if a list/tuple, which
columns to plot on secondary y-axis.
mark_right : bool, default True
When using a secondary_y axis, automatically mark the column
labels with "(right)" in the legend.
include_bool : bool, default is False
If True, boolean values can be plotted.
backend : str, default None
Backend to use instead of the backend specified in the option
``plotting.backend``. For instance, 'matplotlib'. Alternatively, to
specify the ``plotting.backend`` for the whole session, set
``pd.options.plotting.backend``.
.. versionadded:: 1.0.0
**kwargs
Options to pass to matplotlib plotting method.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
If the backend is not the default matplotlib one, the return value
will be the object returned by the backend.
Notes
-----
- See matplotlib documentation online for more on this subject
- If `kind` = 'bar' or 'barh', you can specify relative alignments
for bar plot layout by `position` keyword.
From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5
(center)
"""
_common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box")
_series_kinds = ("pie",)
_dataframe_kinds = ("scatter", "hexbin")
_kind_aliases = {"density": "kde"}
_all_kinds = _common_kinds + _series_kinds + _dataframe_kinds
def __init__(self, data):
self._parent = data
@staticmethod
def _get_call_args(backend_name, data, args, kwargs):
"""
This function makes calls to this accessor `__call__` method compatible
with the previous `SeriesPlotMethods.__call__` and
`DataFramePlotMethods.__call__`. Those had slightly different
signatures, since `DataFramePlotMethods` accepted `x` and `y`
parameters.
"""
if isinstance(data, ABCSeries):
arg_def = [
("kind", "line"),
("ax", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", False),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("label", None),
("secondary_y", False),
("xlabel", None),
("ylabel", None),
]
elif isinstance(data, ABCDataFrame):
arg_def = [
("x", None),
("y", None),
("kind", "line"),
("ax", None),
("subplots", False),
("sharex", None),
("sharey", False),
("layout", None),
("figsize", None),
("use_index", True),
("title", None),
("grid", None),
("legend", True),
("style", None),
("logx", False),
("logy", False),
("loglog", False),
("xticks", None),
("yticks", None),
("xlim", None),
("ylim", None),
("rot", None),
("fontsize", None),
("colormap", None),
("table", False),
("yerr", None),
("xerr", None),
("secondary_y", False),
("sort_columns", False),
("xlabel", None),
("ylabel", None),
]
else:
raise TypeError(
f"Called plot accessor for type {type(data).__name__}, "
"expected Series or DataFrame"
)
if args and isinstance(data, ABCSeries):
positional_args = str(args)[1:-1]
keyword_args = ", ".join(
f"{name}={repr(value)}" for (name, default), value in zip(arg_def, args)
)
msg = (
"`Series.plot()` should not be called with positional "
"arguments, only keyword arguments. The order of "
"positional arguments will change in the future. "
f"Use `Series.plot({keyword_args})` instead of "
f"`Series.plot({positional_args})`."
)
raise TypeError(msg)
pos_args = {name: value for value, (name, _) in zip(args, arg_def)}
if backend_name == "pandas.plotting._matplotlib":
kwargs = dict(arg_def, **pos_args, **kwargs)
else:
kwargs = dict(pos_args, **kwargs)
x = kwargs.pop("x", None)
y = kwargs.pop("y", None)
kind = kwargs.pop("kind", "line")
return x, y, kind, kwargs
def __call__(self, *args, **kwargs):
plot_backend = _get_plot_backend(kwargs.pop("backend", None))
x, y, kind, kwargs = self._get_call_args(
plot_backend.__name__, self._parent, args, kwargs
)
kind = self._kind_aliases.get(kind, kind)
# when using another backend, get out of the way
if plot_backend.__name__ != "pandas.plotting._matplotlib":
return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs)
if kind not in self._all_kinds:
raise ValueError(f"{kind} is not a valid plot kind")
# The original data structured can be transformed before passed to the
# backend. For example, for DataFrame is common to set the index as the
# `x` parameter, and return a Series with the parameter `y` as values.
data = self._parent.copy()
if isinstance(data, ABCSeries):
kwargs["reuse_plot"] = True
if kind in self._dataframe_kinds:
if isinstance(data, ABCDataFrame):
return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs)
else:
raise ValueError(f"plot kind {kind} can only be used for data frames")
elif kind in self._series_kinds:
if isinstance(data, ABCDataFrame):
if y is None and kwargs.get("subplots") is False:
raise ValueError(
f"{kind} requires either y column or 'subplots=True'"
)
elif y is not None:
if is_integer(y) and not data.columns.holds_integer():
y = data.columns[y]
# converted to series actually. copy to not modify
data = data[y].copy()
data.index.name = y
elif isinstance(data, ABCDataFrame):
data_cols = data.columns
if x is not None:
if is_integer(x) and not data.columns.holds_integer():
x = data_cols[x]
elif not isinstance(data[x], ABCSeries):
raise ValueError("x must be a label or position")
data = data.set_index(x)
if y is not None:
# check if we have y as int or list of ints
int_ylist = is_list_like(y) and all(is_integer(c) for c in y)
int_y_arg = is_integer(y) or int_ylist
if int_y_arg and not data.columns.holds_integer():
y = data_cols[y]
label_kw = kwargs["label"] if "label" in kwargs else False
for kw in ["xerr", "yerr"]:
if kw in kwargs and (
isinstance(kwargs[kw], str) or is_integer(kwargs[kw])
):
try:
kwargs[kw] = data[kwargs[kw]]
except (IndexError, KeyError, TypeError):
pass
# don't overwrite
data = data[y].copy()
if isinstance(data, ABCSeries):
label_name = label_kw or y
data.name = label_name
else:
match = is_list_like(label_kw) and len(label_kw) == len(y)
if label_kw and not match:
raise ValueError(
"label should be list-like and same length as y"
)
label_name = label_kw or data.columns
data.columns = label_name
return plot_backend.plot(data, kind=kind, **kwargs)
__call__.__doc__ = __doc__
@Appender(
"""
See Also
--------
matplotlib.pyplot.plot : Plot y versus x as lines and/or markers.
Examples
--------
.. plot::
:context: close-figs
>>> s = pd.Series([1, 3, 2])
>>> s.plot.line()
.. plot::
:context: close-figs
The following example shows the populations for some animals
over the years.
>>> df = pd.DataFrame({
... 'pig': [20, 18, 489, 675, 1776],
... 'horse': [4, 25, 281, 600, 1900]
... }, index=[1990, 1997, 2003, 2009, 2014])
>>> lines = df.plot.line()
.. plot::
:context: close-figs
An example with subplots, so an array of axes is returned.
>>> axes = df.plot.line(subplots=True)
>>> type(axes)
<class 'numpy.ndarray'>
.. plot::
:context: close-figs
Let's repeat the same example, but specifying colors for
each column (in this case, for each animal).
>>> axes = df.plot.line(
... subplots=True, color={"pig": "pink", "horse": "#742802"}
... )
.. plot::
:context: close-figs
The following example shows the relationship between both
populations.
>>> lines = df.plot.line(x='pig', y='horse')
"""
)
@Substitution(kind="line")
@Appender(_bar_or_line_doc)
def line(self, x=None, y=None, **kwargs):
"""
Plot Series or DataFrame as lines.
This function is useful to plot lines using DataFrame's values
as coordinates.
"""
return self(kind="line", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.barh : Horizontal bar plot.
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.bar : Make a bar plot with matplotlib.
Examples
--------
Basic plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]})
>>> ax = df.plot.bar(x='lab', y='val', rot=0)
Plot a whole dataframe to a bar plot. Each column is assigned a
distinct color, and each row is nested in a group along the
horizontal axis.
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.bar(rot=0)
Plot stacked bar charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.bar(stacked=True)
Instead of nesting, the figure can be split by column with
``subplots=True``. In this case, a :class:`numpy.ndarray` of
:class:`matplotlib.axes.Axes` are returned.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(rot=0, subplots=True)
>>> axes[1].legend(loc=2) # doctest: +SKIP
If you don't like the default colours, you can specify how you'd
like each column to be colored.
.. plot::
:context: close-figs
>>> axes = df.plot.bar(
... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"}
... )
>>> axes[1].legend(loc=2) # doctest: +SKIP
Plot a single column.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(y='speed', rot=0)
Plot only selected categories for the DataFrame.
.. plot::
:context: close-figs
>>> ax = df.plot.bar(x='lifespan', rot=0)
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def bar(self, x=None, y=None, **kwargs):
"""
Vertical bar plot.
A bar plot is a plot that presents categorical data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="bar", x=x, y=y, **kwargs)
@Appender(
"""
See Also
--------
DataFrame.plot.bar: Vertical bar plot.
DataFrame.plot : Make plots of DataFrame using matplotlib.
matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib.
Examples
--------
Basic example
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]})
>>> ax = df.plot.barh(x='lab', y='val')
Plot a whole DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh()
Plot stacked barh charts for the DataFrame
.. plot::
:context: close-figs
>>> ax = df.plot.barh(stacked=True)
We can specify colors for each column
.. plot::
:context: close-figs
>>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"})
Plot a column of the DataFrame to a horizontal bar plot
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(y='speed')
Plot DataFrame versus the desired column
.. plot::
:context: close-figs
>>> speed = [0.1, 17.5, 40, 48, 52, 69, 88]
>>> lifespan = [2, 8, 70, 1.5, 25, 12, 28]
>>> index = ['snail', 'pig', 'elephant',
... 'rabbit', 'giraffe', 'coyote', 'horse']
>>> df = pd.DataFrame({'speed': speed,
... 'lifespan': lifespan}, index=index)
>>> ax = df.plot.barh(x='lifespan')
"""
)
@Substitution(kind="bar")
@Appender(_bar_or_line_doc)
def barh(self, x=None, y=None, **kwargs):
"""
Make a horizontal bar plot.
A horizontal bar plot is a plot that presents quantitative data with
rectangular bars with lengths proportional to the values that they
represent. A bar plot shows comparisons among discrete categories. One
axis of the plot shows the specific categories being compared, and the
other axis represents a measured value.
"""
return self(kind="barh", x=x, y=y, **kwargs)
def box(self, by=None, **kwargs):
r"""
Make a box plot of the DataFrame columns.
A box plot is a method for graphically depicting groups of numerical
data through their quartiles.
The box extends from the Q1 to Q3 quartile values of the data,
with a line at the median (Q2). The whiskers extend from the edges
of box to show the range of the data. The position of the whiskers
is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the
box. Outlier points are those past the end of the whiskers.
For further details see Wikipedia's
entry for `boxplot <https://en.wikipedia.org/wiki/Box_plot>`__.
A consideration when using this chart is that the box and the whiskers
can overlap, which is very common when plotting small sets of data.
Parameters
----------
by : str or sequence
Column in the DataFrame to group by.
**kwargs
Additional keywords are documented in
:meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
DataFrame.boxplot: Another method to draw a box plot.
Series.plot.box: Draw a box plot from a Series object.
matplotlib.pyplot.boxplot: Draw a box plot in matplotlib.
Examples
--------
Draw a box plot from a DataFrame with four columns of randomly
generated data.
.. plot::
:context: close-figs
>>> data = np.random.randn(25, 4)
>>> df = pd.DataFrame(data, columns=list('ABCD'))
>>> ax = df.plot.box()
"""
return self(kind="box", by=by, **kwargs)
def hist(self, by=None, bins=10, **kwargs):
"""
Draw one histogram of the DataFrame's columns.
A histogram is a representation of the distribution of data.
This function groups the values of all given Series in the DataFrame
into bins and draws all bins in one :class:`matplotlib.axes.Axes`.
This is useful when the DataFrame's Series are in a similar scale.
Parameters
----------
by : str or sequence, optional
Column in the DataFrame to group by.
bins : int, default 10
Number of histogram bins to be used.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
class:`matplotlib.AxesSubplot`
Return a histogram plot.
See Also
--------
DataFrame.hist : Draw histograms per DataFrame's Series.
Series.hist : Draw a histogram with Series' data.
Examples
--------
When we draw a dice 6000 times, we expect to get each value around 1000
times. But when we draw two dices and sum the result, the distribution
is going to be quite different. A histogram illustrates those
distributions.
.. plot::
:context: close-figs
>>> df = pd.DataFrame(
... np.random.randint(1, 7, 6000),
... columns = ['one'])
>>> df['two'] = df['one'] + np.random.randint(1, 7, 6000)
>>> ax = df.plot.hist(bins=12, alpha=0.5)
"""
return self(kind="hist", by=by, bins=bins, **kwargs)
def kde(self, bw_method=None, ind=None, **kwargs):
"""
Generate Kernel Density Estimate plot using Gaussian kernels.
In statistics, `kernel density estimation`_ (KDE) is a non-parametric
way to estimate the probability density function (PDF) of a random
variable. This function uses Gaussian kernels and includes automatic
bandwidth determination.
.. _kernel density estimation:
https://en.wikipedia.org/wiki/Kernel_density_estimation
Parameters
----------
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable.
If None (default), 'scott' is used.
See :class:`scipy.stats.gaussian_kde` for more information.
ind : NumPy array or int, optional
Evaluation points for the estimated PDF. If None (default),
1000 equally spaced points are used. If `ind` is a NumPy array, the
KDE is evaluated at the points passed. If `ind` is an integer,
`ind` number of equally spaced points are used.
**kwargs
Additional keyword arguments are documented in
:meth:`pandas.%(this-datatype)s.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray of them
See Also
--------
scipy.stats.gaussian_kde : Representation of a kernel-density
estimate using Gaussian kernels. This is the function used
internally to estimate the PDF.
Examples
--------
Given a Series of points randomly sampled from an unknown
distribution, estimate its PDF using KDE with automatic
bandwidth determination and plot the results, evaluating them at
1000 equally spaced points (default):
.. plot::
:context: close-figs
>>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5])
>>> ax = s.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = s.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5])
For DataFrame, it works in the same way:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'x': [1, 2, 2.5, 3, 3.5, 4, 5],
... 'y': [4, 4, 4.5, 5, 5.5, 6, 6],
... })
>>> ax = df.plot.kde()
A scalar bandwidth can be specified. Using a small bandwidth value can
lead to over-fitting, while using a large bandwidth value may result
in under-fitting:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=0.3)
.. plot::
:context: close-figs
>>> ax = df.plot.kde(bw_method=3)
Finally, the `ind` parameter determines the evaluation points for the
plot of the estimated PDF:
.. plot::
:context: close-figs
>>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6])
"""
return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs)
density = kde
def area(self, x=None, y=None, **kwargs):
"""
Draw a stacked area plot.
An area plot displays quantitative data visually.
This function wraps the matplotlib area function.
Parameters
----------
x : label or position, optional
Coordinates for the X axis. By default uses the index.
y : label or position, optional
Column to plot. By default uses all columns.
stacked : bool, default True
Area plots are stacked by default. Set to False to create a
unstacked plot.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or numpy.ndarray
Area plot, or array of area plots if subplots is True.
See Also
--------
DataFrame.plot : Make plots of DataFrame using matplotlib / pylab.
Examples
--------
Draw an area plot based on basic business metrics:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3, 9, 10, 6],
... 'signups': [5, 5, 6, 12, 14, 13],
... 'visits': [20, 42, 28, 62, 81, 50],
... }, index=pd.date_range(start='2018/01/01', end='2018/07/01',
... freq='M'))
>>> ax = df.plot.area()
Area plots are stacked by default. To produce an unstacked plot,
pass ``stacked=False``:
.. plot::
:context: close-figs
>>> ax = df.plot.area(stacked=False)
Draw an area plot for a single column:
.. plot::
:context: close-figs
>>> ax = df.plot.area(y='sales')
Draw with a different `x`:
.. plot::
:context: close-figs
>>> df = pd.DataFrame({
... 'sales': [3, 2, 3],
... 'visits': [20, 42, 28],
... 'day': [1, 2, 3],
... })
>>> ax = df.plot.area(x='day')
"""
return self(kind="area", x=x, y=y, **kwargs)
def pie(self, **kwargs):
"""
Generate a pie plot.
A pie plot is a proportional representation of the numerical data in a
column. This function wraps :meth:`matplotlib.pyplot.pie` for the
specified column. If no column reference is passed and
``subplots=True`` a pie plot is drawn for each numerical column
independently.
Parameters
----------
y : int or label, optional
Label or position of the column to plot.
If not provided, ``subplots=True`` argument must be passed.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
matplotlib.axes.Axes or np.ndarray of them
A NumPy array is returned when `subplots` is True.
See Also
--------
Series.plot.pie : Generate a pie plot for a Series.
DataFrame.plot : Make plots of a DataFrame.
Examples
--------
In the example below we have a DataFrame with the information about
planet's mass and radius. We pass the 'mass' column to the
pie function to get a pie plot.
.. plot::
:context: close-figs
>>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97],
... 'radius': [2439.7, 6051.8, 6378.1]},
... index=['Mercury', 'Venus', 'Earth'])
>>> plot = df.plot.pie(y='mass', figsize=(5, 5))
.. plot::
:context: close-figs
>>> plot = df.plot.pie(subplots=True, figsize=(11, 6))
"""
if (
isinstance(self._parent, ABCDataFrame)
and kwargs.get("y", None) is None
and not kwargs.get("subplots", False)
):
raise ValueError("pie requires either y column or 'subplots=True'")
return self(kind="pie", **kwargs)
def scatter(self, x, y, s=None, c=None, **kwargs):
"""
Create a scatter plot with varying marker point size and color.
The coordinates of each point are defined by two dataframe columns and
filled circles are used to represent each point. This kind of plot is
useful to see complex correlations between two variables. Points could
be for instance natural 2D coordinates like longitude and latitude in
a map or, in general, any pair of metrics that can be plotted against
each other.
Parameters
----------
x : int or str
The column name or column position to be used as horizontal
coordinates for each point.
y : int or str
The column name or column position to be used as vertical
coordinates for each point.
s : str, scalar or array_like, optional
The size of each point. Possible values are:
- A string with the name of the column to be used for marker's size.
- A single scalar so all points have the same size.
- A sequence of scalars, which will be used for each point's size
recursively. For instance, when passing [2,14] all points size
will be either 2 or 14, alternatively.
.. versionchanged:: 1.1.0
c : str, int or array_like, optional
The color of each point. Possible values are:
- A single color string referred to by name, RGB or RGBA code,
for instance 'red' or '#a98d19'.
- A sequence of color strings referred to by name, RGB or RGBA
code, which will be used for each point's color recursively. For
instance ['green','yellow'] all points will be filled in green or
yellow, alternatively.
- A column name or position whose values will be used to color the
marker points according to a colormap.
**kwargs
Keyword arguments to pass on to :meth:`DataFrame.plot`.
Returns
-------
:class:`matplotlib.axes.Axes` or numpy.ndarray of them
See Also
--------
matplotlib.pyplot.scatter : Scatter plot using multiple input data
formats.
Examples
--------
Let's see how to draw a scatter plot using coordinates from the values
in a DataFrame's columns.
.. plot::
:context: close-figs
>>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1],
... [6.4, 3.2, 1], [5.9, 3.0, 2]],
... columns=['length', 'width', 'species'])
>>> ax1 = df.plot.scatter(x='length',
... y='width',
... c='DarkBlue')
And now with the color determined by a column as well.
.. plot::
:context: close-figs
>>> ax2 = df.plot.scatter(x='length',
... y='width',
... c='species',
... colormap='viridis')
"""
return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs)
def hexbin(self, x, y, C=None, reduce_C_function=None, gridsize=None, **kwargs):
"""
Generate a hexagonal binning plot.
Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None`
(the default), this is a histogram of the number of occurrences
of the observations at ``(x[i], y[i])``.
If `C` is specified, specifies values at given coordinates
``(x[i], y[i])``. These values are accumulated for each hexagonal
bin and then reduced according to `reduce_C_function`,
having as default the NumPy's mean function (:meth:`numpy.mean`).
(If `C` is specified, it must also be a 1-D sequence
of the same length as `x` and `y`, or a column label.)
Parameters
----------
x : int or str
The column label or position for x points.
y : int or str
The column label or position for y points.
C : int or str, optional
The column label or position for the value of `(x, y)` point.
reduce_C_function : callable, default `np.mean`
Function of one argument that reduces all the values in a bin to
a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`).
gridsize : int or tuple of (int, int), default 100
The number of hexagons in the x-direction.
The corresponding number of hexagons in the y-direction is
chosen in a way that the hexagons are approximately regular.
Alternatively, gridsize can be a tuple with two elements
specifying the number of hexagons in the x-direction and the
y-direction.
**kwargs
Additional keyword arguments are documented in
:meth:`DataFrame.plot`.
Returns
-------
matplotlib.AxesSubplot
The matplotlib ``Axes`` on which the hexbin is plotted.
See Also
--------
DataFrame.plot : Make plots of a DataFrame.
matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib,
the matplotlib function that is used under the hood.
Examples
--------
The following examples are generated with random data from
a normal distribution.
.. plot::
:context: close-figs
>>> n = 10000
>>> df = pd.DataFrame({'x': np.random.randn(n),
... 'y': np.random.randn(n)})
>>> ax = df.plot.hexbin(x='x', y='y', gridsize=20)
The next example uses `C` and `np.sum` as `reduce_C_function`.
Note that `'observations'` values ranges from 1 to 5 but the result
plot shows values up to more than 25. This is because of the
`reduce_C_function`.
.. plot::
:context: close-figs
>>> n = 500
>>> df = pd.DataFrame({
... 'coord_x': np.random.uniform(-3, 3, size=n),
... 'coord_y': np.random.uniform(30, 50, size=n),
... 'observations': np.random.randint(1,5, size=n)
... })
>>> ax = df.plot.hexbin(x='coord_x',
... y='coord_y',
... C='observations',
... reduce_C_function=np.sum,
... gridsize=10,
... cmap="viridis")
"""
if reduce_C_function is not None:
kwargs["reduce_C_function"] = reduce_C_function
if gridsize is not None:
kwargs["gridsize"] = gridsize
return self(kind="hexbin", x=x, y=y, C=C, **kwargs)
_backends = {}
def _find_backend(backend: str):
"""
Find a pandas plotting backend>
Parameters
----------
backend : str
The identifier for the backend. Either an entrypoint item registered
with pkg_resources, or a module name.
Notes
-----
Modifies _backends with imported backends as a side effect.
Returns
-------
types.ModuleType
The imported backend.
"""
import pkg_resources # Delay import for performance.
for entry_point in pkg_resources.iter_entry_points("pandas_plotting_backends"):
if entry_point.name == "matplotlib":
# matplotlib is an optional dependency. When
# missing, this would raise.
continue
_backends[entry_point.name] = entry_point.load()
try:
return _backends[backend]
except KeyError:
# Fall back to unregistered, module name approach.
try:
module = importlib.import_module(backend)
except ImportError:
# We re-raise later on.
pass
else:
if hasattr(module, "plot"):
# Validate that the interface is implemented when the option
# is set, rather than at plot time.
_backends[backend] = module
return module
raise ValueError(
f"Could not find plotting backend '{backend}'. Ensure that you've installed "
f"the package providing the '{backend}' entrypoint, or that the package has a "
"top-level `.plot` method."
)
def _get_plot_backend(backend=None):
"""
Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`).
The plotting system of pandas has been using matplotlib, but the idea here
is that it can also work with other third-party backends. In the future,
this function will return the backend from a pandas option, and all the
rest of the code in this file will use the backend specified there for the
plotting.
The backend is imported lazily, as matplotlib is a soft dependency, and
pandas can be used without it being installed.
"""
backend = backend or get_option("plotting.backend")
if backend == "matplotlib":
# Because matplotlib is an optional dependency and first-party backend,
# we need to attempt an import here to raise an ImportError if needed.
try:
import pandas.plotting._matplotlib as module
except ImportError:
raise ImportError(
"matplotlib is required for plotting when the "
'default backend "matplotlib" is selected.'
) from None
_backends["matplotlib"] = module
if backend in _backends:
return _backends[backend]
module = _find_backend(backend)
_backends[backend] = module
return module
| 34.197113
| 88
| 0.567634
|
79523a4c312c1f4481d0cf4659af74932bbf980b
| 1,438
|
py
|
Python
|
conans/test/functional/settings_constraint_test.py
|
wahlm/conan
|
1afadb5cca9e1c688c7b37c69a0ff3c6a6dbe257
|
[
"MIT"
] | null | null | null |
conans/test/functional/settings_constraint_test.py
|
wahlm/conan
|
1afadb5cca9e1c688c7b37c69a0ff3c6a6dbe257
|
[
"MIT"
] | null | null | null |
conans/test/functional/settings_constraint_test.py
|
wahlm/conan
|
1afadb5cca9e1c688c7b37c69a0ff3c6a6dbe257
|
[
"MIT"
] | null | null | null |
import unittest
from conans.test.utils.tools import TestClient
from conans.util.files import save
import os
class SettingConstraintTest(unittest.TestCase):
def settings_constraint_test(self):
conanfile = """from conans import ConanFile
class Test(ConanFile):
name = "Hello"
version = "0.1"
settings = {"compiler": {"gcc": {"version": ["7.1"]}}}
def build(self):
self.output.info("Compiler version!: %s" % self.settings.compiler.version)
"""
test = """from conans import ConanFile
class Test(ConanFile):
requires = "Hello/0.1@user/channel"
def test(self):
pass
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"test_package/conanfile.py": test})
default_profile = os.path.join(client.base_folder, ".conan/profiles/default")
save(default_profile, "[settings]\ncompiler=gcc\ncompiler.version=6.3")
error = client.run("create . user/channel", ignore_error=True)
self.assertTrue(error)
self.assertIn("Invalid setting '6.3' is not a valid 'settings.compiler.version'",
client.user_io.out)
client.run("create . user/channel -s compiler=gcc -s compiler.version=7.1")
self.assertIn("Hello/0.1@user/channel: Compiler version!: 7.1", client.user_io.out)
self.assertIn("Hello/0.1@user/channel: Generating the package", client.user_io.out)
| 39.944444
| 91
| 0.653686
|
79523acce2a54299a63e6840c2e0f7c465ab818f
| 1,939
|
py
|
Python
|
contest/abc156/D.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/abc156/D.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
contest/abc156/D.py
|
mola1129/atcoder
|
1d3b18cb92d0ba18c41172f49bfcd0dd8d29f9db
|
[
"MIT"
] | null | null | null |
MOD = 10 ** 9 + 7
class ModInt:
def __init__(self, x):
self.x = x % MOD
def __str__(self):
return str(self.x)
__repr__ = __str__
def __add__(self, other):
return (
ModInt(self.x + other.x) if isinstance(other, ModInt) else
ModInt(self.x + other)
)
def __sub__(self, other):
return (
ModInt(self.x - other.x) if isinstance(other, ModInt) else
ModInt(self.x - other)
)
def __mul__(self, other):
return (
ModInt(self.x * other.x) if isinstance(other, ModInt) else
ModInt(self.x * other)
)
def __truediv__(self, other):
return (
ModInt(
self.x * pow(other.x, MOD - 2, MOD)
) if isinstance(other, ModInt) else
ModInt(self.x * pow(other, MOD - 2, MOD))
)
def __pow__(self, other):
return (
ModInt(pow(self.x, other.x, MOD)) if isinstance(other, ModInt) else
ModInt(pow(self.x, other, MOD))
)
__radd__ = __add__
def __rsub__(self, other):
return (
ModInt(other.x - self.x) if isinstance(other, ModInt) else
ModInt(other - self.x)
)
__rmul__ = __mul__
def __rtruediv__(self, other):
return (
ModInt(
other.x * pow(self.x, MOD - 2, MOD)
) if isinstance(other, ModInt) else
ModInt(other * pow(self.x, MOD - 2, MOD))
)
def __rpow__(self, other):
return (
ModInt(pow(other.x, self.x, MOD)) if isinstance(other, ModInt) else
ModInt(pow(other, self.x, MOD))
)
n, a, b = map(int, input().split())
ans = pow(2, n, MOD) - 1
product = ModInt(1)
div = ModInt(1)
for i in range(b + 1):
product *= (n - i)
div *= i + 1
if i == a - 1 or i == b - 1:
ans -= product / div
print(ans)
| 24.2375
| 79
| 0.507478
|
79523ad7c4bc264cf85af4a15171c2d3980018f5
| 4,070
|
py
|
Python
|
examples/scara/animation.py
|
yinanl/rocs
|
bf2483903e39f4c0ea254a9ef56720a1259955ad
|
[
"BSD-3-Clause"
] | null | null | null |
examples/scara/animation.py
|
yinanl/rocs
|
bf2483903e39f4c0ea254a9ef56720a1259955ad
|
[
"BSD-3-Clause"
] | null | null | null |
examples/scara/animation.py
|
yinanl/rocs
|
bf2483903e39f4c0ea254a9ef56720a1259955ad
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import re
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as lines
import matplotlib.animation as animation
import sys
from os.path import dirname, realpath
pypath = dirname(dirname(dirname(realpath(__file__)))) + '/python/'
sys.path.insert(1, pypath)
from odes import scara
# # Load simulated trajectories
dirpath = dirname(realpath(__file__))
statefile = "traj_gb2"
torqfile = "torq_gb2"
sfile = dirpath + '/' + statefile + ".npy"
tfile = dirpath + '/' + torqfile + ".npy"
thetas = np.load(sfile)
torqs = np.load(tfile)
# # Joint space to operation space
model = scara(0.1)
xy2 = model.theta2xy(thetas[:, 0], thetas[:, 1])
x1 = model.l1 * np.cos(thetas[:, 0])
y1 = model.l2 * np.sin(thetas[:, 0])
x2 = xy2[0, :]
y2 = xy2[1, :]
# # Setup workspace
FS = 12
fig, ax = plt.subplots()
ax.set_xlim(0, 1.2*(model.l1+model.l2))
ax.set_xlabel(r'$x$')
ax.set_ylim(0, model.l1+model.l2)
ax.set_ylabel(r'$y$')
ax.title
# The bar obstacle
H = 0.8*model.l1
r = 0.5*model.l1
# bar = plt.plot([0, r], [H, H], linewidth=10, color='k')
bar = patches.Rectangle((0, H), r, 0.01, facecolor='tab:gray',
hatch='/', zorder=0)
ax.add_patch(bar)
# Goals
nG = 2
G = np.zeros(shape=(2, 2, nG))
G[:, :, 0] = np.array([[0.0277, 0.0597], [0.1852, 0.2134]])
G[:, :, 1] = np.array([[0.2585, 0.2784], [0.0059, 0.0514]])
for i in range(nG):
ax.add_patch(
patches.Rectangle((G[0, 0, i], G[1, 0, i]),
G[0, 1, i]-G[0, 0, i], G[1, 1, i]-G[1, 0, i],
linewidth=1.5, facecolor='yellowgreen', fill=True,
zorder=0)
)
plt.text((G[0, 0, 0]+G[0, 1, 0])*0.4, (G[1, 1, 0]+G[1, 0, 0])/2,
r'$g_1$', fontsize=FS)
plt.text((G[0, 0, 1]+G[0, 1, 1])*0.49, (G[1, 1, 1]+G[1, 0, 1])/2,
r'$g_2$', fontsize=FS)
# arm1 = lines.Line2D([0, x1[0]], [0, y1[0]],
# linewidth=3, color='k', alpha=0, zorder=1)
# arm2 = lines.Line2D([x1[0], x2[0]], [y1[0], y2[0]],
# linewidth=3, color='k', alpha=0, zorder=1)
# joint1 = patches.Circle((0, 0), radius=0.005, color='k',
# fill=True, alpha=1, zorder=2)
# joint2 = patches.Circle((x1[0], y1[0]), radius=0.005, color='k',
# fill=True, alpha=0, zorder=2)
# end = patches.Circle((x2[0], y2[0]), radius=0.005, color='tab:orange',
# fill=True, alpha=0, zorder=2)
i = 93
arm1 = lines.Line2D([0, x1[i]], [0, y1[i]],
linewidth=3, color='k', alpha=1, zorder=1)
arm2 = lines.Line2D([x1[i], x2[i]], [y1[i], y2[i]],
linewidth=3, color='k', alpha=1, zorder=1)
joint1 = patches.Circle((0, 0), radius=0.005, color='k',
fill=True, alpha=1, zorder=2)
joint2 = patches.Circle((x1[i], y1[i]), radius=0.005, color='k',
fill=True, alpha=1, zorder=2)
end = patches.Circle((x2[i], y2[i]), radius=0.005, color='tab:orange',
fill=True, alpha=1, zorder=2)
ax.add_patch(joint1)
ax.add_patch(joint2)
ax.add_patch(end)
ax.add_artist(arm1)
ax.add_artist(arm2)
# # Animation
torque_text = ax.text(0.05, 0.95, '', transform=ax.transAxes)
torque_template = 'torques=%.3f,%.3f'
def animate(i):
arm1.set_data([0, x1[i]], [0, y1[i]])
arm2.set_data([x1[i], x2[i]], [y1[i], y2[i]])
joint2.center = (x1[i], y1[i])
end.center = (x2[i], y2[i])
arm1.set_alpha(1)
arm2.set_alpha(1)
joint2.set_alpha(1)
end.set_alpha(1)
joint2.set_zorder(10)
end.set_zorder(10)
torque_text.set_text(torque_template % (torqs[i, 0], torqs[i, 1]))
return joint2, end, torque_text, arm1, arm2
ani = animation.FuncAnimation(fig, animate, x1.size,
interval=0.1*500, blit=True)
Writer = animation.writers['ffmpeg']
writer = Writer(fps=15, metadata=dict(artist='Me'), bitrate=1800)
ani.save(dirpath+'/replay'+'.mp4', writer=writer)
# # End-effector trajectory
ax.plot(x2, y2, color='peru')
plt.savefig(dirpath+'/fig_traj-gb2-os.png')
plt.show()
| 31.796875
| 76
| 0.578624
|
79523afeaa88924ca2693659a2e3adab3587009d
| 39
|
py
|
Python
|
examples/celery/client.py
|
mastak/pybrake
|
32968efbebea07580680f16228be818726c13933
|
[
"MIT"
] | 36
|
2018-04-24T09:26:12.000Z
|
2022-02-17T13:44:41.000Z
|
examples/celery/client.py
|
mastak/pybrake
|
32968efbebea07580680f16228be818726c13933
|
[
"MIT"
] | 90
|
2018-03-06T15:13:53.000Z
|
2022-03-25T11:32:25.000Z
|
examples/celery/client.py
|
mastak/pybrake
|
32968efbebea07580680f16228be818726c13933
|
[
"MIT"
] | 21
|
2018-04-04T14:50:12.000Z
|
2022-03-23T05:48:51.000Z
|
from tasks import add
add.delay(4, 4)
| 9.75
| 21
| 0.717949
|
79523b8d4d3293a476946b65f6dbae10ebd073c7
| 4,052
|
py
|
Python
|
gae/layers.py
|
floregol/gae
|
d5db3f32a8d26001a9b44f7a863a75a61807461f
|
[
"MIT"
] | null | null | null |
gae/layers.py
|
floregol/gae
|
d5db3f32a8d26001a9b44f7a863a75a61807461f
|
[
"MIT"
] | null | null | null |
gae/layers.py
|
floregol/gae
|
d5db3f32a8d26001a9b44f7a863a75a61807461f
|
[
"MIT"
] | null | null | null |
from gae.initializations import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs
"""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def dropout_sparse(x, keep_prob, num_nonzero_elems):
"""Dropout for sparse tensors. Currently fails for very large sparse tensors (>1M elements)
"""
noise_shape = [num_nonzero_elems]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
# Properties
name: String, defines the variable scope of the layer.
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in list(kwargs.keys()):
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.issparse = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
outputs = self._call(inputs)
return outputs
class GraphConvolution(Layer):
"""Basic graph convolution layer for undirected graph without edge labels."""
def __init__(self, input_dim, output_dim, adj, dropout=0., act=tf.nn.relu, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
def _call(self, inputs):
x = inputs
x = tf.nn.dropout(x, 1-self.dropout)
x = tf.matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
outputs = self.act(x)
return outputs
class GraphConvolutionSparse(Layer):
"""Graph convolution layer for sparse inputs."""
def __init__(self, input_dim, output_dim, adj, features_nonzero, dropout=0., act=tf.nn.relu, **kwargs):
super(GraphConvolutionSparse, self).__init__(**kwargs)
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = weight_variable_glorot(input_dim, output_dim, name="weights")
self.dropout = dropout
self.adj = adj
self.act = act
self.issparse = True
self.features_nonzero = features_nonzero
def _call(self, inputs):
x = inputs
x = dropout_sparse(x, 1-self.dropout, self.features_nonzero)
x = tf.sparse_tensor_dense_matmul(x, self.vars['weights'])
x = tf.sparse_tensor_dense_matmul(self.adj, x)
outputs = self.act(x)
return outputs
class InnerProductDecoder(Layer):
"""Decoder model layer for link prediction."""
def __init__(self, input_dim, dropout=0., act=tf.nn.sigmoid, **kwargs):
super(InnerProductDecoder, self).__init__(**kwargs)
self.dropout = dropout
self.act = act
def _call(self, inputs):
inputs = tf.nn.dropout(inputs, 1-self.dropout)
x = tf.transpose(inputs)
x = tf.matmul(inputs, x)
x = tf.reshape(x, [-1])
outputs = self.act(x)
return outputs
| 33.487603
| 107
| 0.638944
|
79523ba966f2526d316bb824bb1a129f8f2f69ee
| 2,895
|
py
|
Python
|
djstripe/contrib/rest_framework/views.py
|
Korben11/dj-stripe
|
06661c174f0cda52b33be9a421f8b80438d71ab0
|
[
"MIT"
] | null | null | null |
djstripe/contrib/rest_framework/views.py
|
Korben11/dj-stripe
|
06661c174f0cda52b33be9a421f8b80438d71ab0
|
[
"MIT"
] | null | null | null |
djstripe/contrib/rest_framework/views.py
|
Korben11/dj-stripe
|
06661c174f0cda52b33be9a421f8b80438d71ab0
|
[
"MIT"
] | null | null | null |
"""
.. module:: dj-stripe.contrib.rest_framework.views.
:synopsis: Views for the dj-stripe REST API.
.. moduleauthor:: Philippe Luickx (@philippeluickx)
"""
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ...models import Customer
from ...settings import CANCELLATION_AT_PERIOD_END, subscriber_request_callback
from .serializers import CreateSubscriptionSerializer, SubscriptionSerializer
class SubscriptionRestView(APIView):
"""API Endpoints for the Subscription object."""
permission_classes = (IsAuthenticated,)
def get(self, request, **kwargs):
"""
Return the customer's valid subscriptions.
Returns with status code 200.
"""
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
serializer = SubscriptionSerializer(customer.subscriptions.all(), many=True)
return Response(serializer.data)
def post(self, request, **kwargs):
"""
Create a new current subscription for the user.
Returns with status code 201.
"""
serializer = CreateSubscriptionSerializer(data=request.data)
if serializer.is_valid():
try:
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
customer.add_card(serializer.data["stripe_token"])
charge_immediately = serializer.data.get("charge_immediately")
if charge_immediately is None:
charge_immediately = True
customer.subscribe(serializer.data["plan"], charge_immediately)
return Response(serializer.data, status=status.HTTP_201_CREATED)
except Exception:
# TODO: Better error messages
return Response(
"Something went wrong processing the payment.",
status=status.HTTP_400_BAD_REQUEST,
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, **kwargs):
"""
Mark the customers current subscription as canceled.
Returns with status code 204.
"""
try:
customer, _created = Customer.get_or_create(
subscriber=subscriber_request_callback(self.request)
)
customer.subscription.cancel(at_period_end=CANCELLATION_AT_PERIOD_END)
return Response(status=status.HTTP_204_NO_CONTENT)
except Exception:
return Response(
"Something went wrong cancelling the subscription.",
status=status.HTTP_400_BAD_REQUEST,
)
| 34.058824
| 84
| 0.648705
|
79523bc1d88d5b25c01fb290e8b37b77fb78ab35
| 2,719
|
py
|
Python
|
ui/widgets/video.py
|
AP-Atul/Torpido
|
a646b4d6de7f2e2c96de4c64ce3113f53e3931c2
|
[
"Unlicense"
] | 21
|
2020-12-23T07:13:10.000Z
|
2022-01-12T10:32:22.000Z
|
ui/widgets/video.py
|
AP-Atul/Torpido
|
a646b4d6de7f2e2c96de4c64ce3113f53e3931c2
|
[
"Unlicense"
] | 2
|
2020-12-30T10:45:42.000Z
|
2021-09-25T09:52:00.000Z
|
ui/widgets/video.py
|
AP-Atul/Torpido
|
a646b4d6de7f2e2c96de4c64ce3113f53e3931c2
|
[
"Unlicense"
] | 1
|
2021-02-06T21:39:41.000Z
|
2021-02-06T21:39:41.000Z
|
""" QWidget for Open cv im show alternative with Qt """
import cv2
import numpy as np
from PyQt5 import QtCore, QtWidgets, QtGui
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QMainWindow, QLabel
class OpenCVQImage(QtGui.QImage):
""" Conversion of open cv image to QImage """
def __init__(self, image: np.ndarray) -> None:
if len(image.shape) == 3:
height, width, n_channels = image.shape
fmt = QtGui.QImage.Format_BGR30
else:
height, width = image.shape
n_channels = 1
fmt = QtGui.QImage.Format_Grayscale8
super().__init__(
image.tostring(),
width,
height,
n_channels * width,
fmt
)
class QVideoWidget(QtWidgets.QWidget):
""" Widget with paint events and communication slot for frame retrieval and display """
def __init__(self, parent=None) -> None:
super().__init__(parent)
self.image_label = QLabel()
self.image_label.setWindowTitle("Video Output")
self._frame = None
self.__stopped = False
def set_frame(self, frame: np.ndarray):
""" Set the frame to the window """
if not self.__stopped:
self._frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # frame color convert
w, h = frame.shape[1], frame.shape[0]
self.image_label.resize(w, h)
self.image_label.setPixmap(QPixmap.fromImage(
QImage(self._frame, w, h, QImage.Format_RGB888))
)
self.image_label.show() # displays the widget
def end(self):
""" Close all the windows """
self.__stopped = True
self.close()
def changeEvent(self, event: QtCore.QEvent) -> None:
if event.type() == QtCore.QEvent.EnabledChange:
if self.isEnabled():
self._camera_device.new_frame.connect(self._on_new_frame)
else:
self._camera_device.new_frame.disconnect(self._on_new_frame)
super().changeEvent(event)
def paintEvent(self, event: QtGui.QPaintEvent) -> None:
if self._frame is None:
return
painter = QtGui.QPainter(self)
painter.drawImage(QtCore.QPoint(0, 0), OpenCVQImage(self._frame))
super().paintEvent(event)
class QVideoWindow(QMainWindow):
""" Separate window for video display """
def __init__(self, window):
super().__init__()
self.window = window
self.video = QVideoWidget(self)
self.setCentralWidget(self.video)
self.window.videoFrame.connect(self.video.set_frame)
self.window.videoClose.connect(self.video.end)
| 31.252874
| 91
| 0.614564
|
79523bee7dba5a4e57064a2c4fa6da646614ceac
| 3,626
|
py
|
Python
|
cfgov/v1/management/commands/update_data_snapshot_values.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 1
|
2019-11-26T20:18:22.000Z
|
2019-11-26T20:18:22.000Z
|
cfgov/v1/management/commands/update_data_snapshot_values.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 8
|
2021-03-11T00:55:51.000Z
|
2022-02-13T21:10:14.000Z
|
cfgov/v1/management/commands/update_data_snapshot_values.py
|
higs4281/cfgov-refresh
|
a02b193fb2373d443265c21845adf8a196e05675
|
[
"CC0-1.0"
] | 1
|
2019-12-28T14:04:07.000Z
|
2019-12-28T14:04:07.000Z
|
import json
import logging
import os
from django.core.management.base import BaseCommand
from v1.models.browse_page import BrowsePage
from v1.tests.wagtail_pages.helpers import publish_changes
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Monthly updates to data snapshot values'
def expand_path(self, path):
"""Expands a relative path into an absolute path"""
rootpath = os.path.abspath(os.path.expanduser(path))
return rootpath
def add_arguments(self, parser):
"""Adds all arguments to be processed."""
parser.add_argument(
'--snapshot_file',
required=True,
help='JSON file containing all markets\' data snapshot values'
)
def get_data_snapshots(self):
""" Gets all data snapshots from browse pages
Assumes there is a maximum of one data snapshot per page
"""
snapshots = []
for page in BrowsePage.objects.all():
stream_data = page.specific.content.stream_data
snapshot = list(filter(
lambda item: item['type'] == 'data_snapshot',
stream_data
))
if snapshot:
snapshot[0]['value']['page'] = page
snapshots.append(snapshot[0]['value'])
return snapshots
def find_data_snapshot(self, market_key, snapshots):
""" Look up data snapshot by the provided market key
Assumes there is one data snapshot per key
"""
for snapshot in snapshots:
if snapshot['market_key'] == market_key:
return snapshot
def handle(self, *args, **options):
# Read markets from file into update dicts
with open(self.expand_path(options['snapshot_file'])) as json_data:
data = json.load(json_data)
markets = data['markets']
snapshots = self.get_data_snapshots()
for market in markets:
key = market['market_key']
snapshot = self.find_data_snapshot(key, snapshots)
if not snapshot: # Market may not have been added to Wagtail yet
logger.warn('Market key {} not found'.format(key))
continue
# Update snapshot fields with the provided values
snapshot['last_updated_projected_data'] = market['data_month']
snapshot['num_originations'] = market['num_originations']
snapshot['value_originations'] = market['value_originations']
snapshot['year_over_year_change'] = market['year_over_year_change']
# Update inquiry index info if it exists for this market
if "inquiry_yoy_change" in market:
snapshot['inquiry_month'] = market['inquiry_month']
snapshot['inquiry_year_over_year_change'] = \
market['inquiry_yoy_change']
else:
snapshot['inquiry_month'] = ""
snapshot['inquiry_year_over_year_change'] = ""
# Update tightness index info if it exists for this market
if "tightness_yoy_change" in market:
snapshot['tightness_month'] = market['tightness_month']
snapshot['tightness_year_over_year_change'] = \
market['tightness_yoy_change']
else:
snapshot['tightness_month'] = ""
snapshot['tightness_year_over_year_change'] = ""
# Publish changes to the browse page the data snapshot lives on
page = snapshot['page']
publish_changes(page.specific)
| 37.770833
| 79
| 0.608935
|
79523d1fbc085ed5592b2e401854fe1a8138a05b
| 519
|
py
|
Python
|
questions.py
|
guptaaastha/quiz
|
db0dbb005e91b7e418f97e8cdf1329da6ac088ff
|
[
"MIT"
] | null | null | null |
questions.py
|
guptaaastha/quiz
|
db0dbb005e91b7e418f97e8cdf1329da6ac088ff
|
[
"MIT"
] | null | null | null |
questions.py
|
guptaaastha/quiz
|
db0dbb005e91b7e418f97e8cdf1329da6ac088ff
|
[
"MIT"
] | null | null | null |
question_prompts = [
"In olden days people used the bark of the ________ tree to make a medicine for Malaria.\n (a)Cinchona\n (b)Banyan\n (c)Peepal\n\n",
"On which day the world water day is celebrated?\n (a)23rd March\n (b)22nd March\n (c)21st March\n\n",
"Which is a method to prevent rust?\n (a)Crystallization\n (b)Sedimentation\n (c)Galvanisation\n\n",
"What will happen if carbon dioxide gas is passed through lime water ?\n (a)Calcium carbonate is formed\n (b)The lime water turns milky\n (c)Both of these\n\n"
]
| 86.5
| 159
| 0.739884
|
79523d2b3a60a9b1bf3f0b7d6b706b9174736b66
| 12,502
|
py
|
Python
|
detectron/utils/boxes.py
|
singhnarotam1997/Detectron
|
ecc6b25fc8869486126f1384b4e6e042a718bd5b
|
[
"Apache-2.0"
] | 60
|
2021-08-07T09:16:52.000Z
|
2022-03-14T09:09:00.000Z
|
detectron/utils/boxes.py
|
singhnarotam1997/Detectron
|
ecc6b25fc8869486126f1384b4e6e042a718bd5b
|
[
"Apache-2.0"
] | 4
|
2021-10-14T02:44:49.000Z
|
2022-03-14T08:18:20.000Z
|
detectron/utils/boxes.py
|
singhnarotam1997/Detectron
|
ecc6b25fc8869486126f1384b4e6e042a718bd5b
|
[
"Apache-2.0"
] | 11
|
2021-11-01T00:30:37.000Z
|
2021-12-08T10:01:52.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
#
# Based on:
# --------------------------------------------------------
# Fast/er R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Box manipulation functions. The internal Detectron box format is
[x1, y1, x2, y2] where (x1, y1) specify the top-left box corner and (x2, y2)
specify the bottom-right box corner. Boxes from external sources, e.g.,
datasets, may be in other formats (such as [x, y, w, h]) and require conversion.
This module uses a convention that may seem strange at first: the width of a box
is computed as x2 - x1 + 1 (likewise for height). The "+ 1" dates back to old
object detection days when the coordinates were integer pixel indices, rather
than floating point coordinates in a subpixel coordinate frame. A box with x2 =
x1 and y2 = y1 was taken to include a single pixel, having a width of 1, and
hence requiring the "+ 1". Now, most datasets will likely provide boxes with
floating point coordinates and the width should be more reasonably computed as
x2 - x1.
In practice, as long as a model is trained and tested with a consistent
convention either decision seems to be ok (at least in our experience on COCO).
Since we have a long history of training models with the "+ 1" convention, we
are reluctant to change it even if our modern tastes prefer not to use it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from detectron.core.config import cfg
import detectron.utils.cython_bbox as cython_bbox
import detectron.utils.cython_nms as cython_nms
bbox_overlaps = cython_bbox.bbox_overlaps
def boxes_area(boxes):
"""Compute the area of an array of boxes."""
w = (boxes[:, 2] - boxes[:, 0] + 1)
h = (boxes[:, 3] - boxes[:, 1] + 1)
areas = w * h
assert np.all(areas >= 0), 'Negative areas founds'
return areas
def unique_boxes(boxes, scale=1.0):
"""Return indices of unique boxes."""
v = np.array([1, 1e3, 1e6, 1e9])
hashes = np.round(boxes * scale).dot(v)
_, index = np.unique(hashes, return_index=True)
return np.sort(index)
def xywh_to_xyxy(xywh):
"""Convert [x1 y1 w h] box format to [x1 y1 x2 y2] format."""
if isinstance(xywh, (list, tuple)):
# Single box given as a list of coordinates
assert len(xywh) == 4
x1, y1 = xywh[0], xywh[1]
x2 = x1 + np.maximum(0., xywh[2] - 1.)
y2 = y1 + np.maximum(0., xywh[3] - 1.)
return (x1, y1, x2, y2)
elif isinstance(xywh, np.ndarray):
# Multiple boxes given as a 2D ndarray
return np.hstack(
(xywh[:, 0:2], xywh[:, 0:2] + np.maximum(0, xywh[:, 2:4] - 1))
)
else:
raise TypeError('Argument xywh must be a list, tuple, or numpy array.')
def xyxy_to_xywh(xyxy):
"""Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format."""
if isinstance(xyxy, (list, tuple)):
# Single box given as a list of coordinates
assert len(xyxy) == 4
x1, y1 = xyxy[0], xyxy[1]
w = xyxy[2] - x1 + 1
h = xyxy[3] - y1 + 1
return (x1, y1, w, h)
elif isinstance(xyxy, np.ndarray):
# Multiple boxes given as a 2D ndarray
return np.hstack((xyxy[:, 0:2], xyxy[:, 2:4] - xyxy[:, 0:2] + 1))
else:
raise TypeError('Argument xyxy must be a list, tuple, or numpy array.')
def filter_small_boxes(boxes, min_size):
"""Keep boxes with width and height both greater than min_size."""
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((w > min_size) & (h > min_size))[0]
return keep
def clip_boxes_to_image(boxes, height, width):
"""Clip an array of boxes to an image with the given height and width."""
boxes[:, [0, 2]] = np.minimum(width - 1., np.maximum(0., boxes[:, [0, 2]]))
boxes[:, [1, 3]] = np.minimum(height - 1., np.maximum(0., boxes[:, [1, 3]]))
return boxes
def clip_xyxy_to_image(x1, y1, x2, y2, height, width):
"""Clip coordinates to an image with the given height and width."""
x1 = np.minimum(width - 1., np.maximum(0., x1))
y1 = np.minimum(height - 1., np.maximum(0., y1))
x2 = np.minimum(width - 1., np.maximum(0., x2))
y2 = np.minimum(height - 1., np.maximum(0., y2))
return x1, y1, x2, y2
def clip_tiled_boxes(boxes, im_shape):
"""Clip boxes to image boundaries. im_shape is [height, width] and boxes
has shape (N, 4 * num_tiled_boxes)."""
assert boxes.shape[1] % 4 == 0, \
'boxes.shape[1] is {:d}, but must be divisible by 4.'.format(
boxes.shape[1]
)
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = np.minimum(dw, cfg.BBOX_XFORM_CLIP)
dh = np.minimum(dh, cfg.BBOX_XFORM_CLIP)
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
def bbox_transform_inv(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):
"""Inverse transform that computes target bounding-box regression deltas
given proposal boxes and ground-truth boxes. The weights argument should be
a 4-tuple of multiplicative weights that are applied to the regression
target.
In older versions of this code (and in py-faster-rcnn), the weights were set
such that the regression deltas would have unit standard deviation on the
training dataset. Presently, rather than computing these statistics exactly,
we use a fixed set of weights (10., 10., 5., 5.) by default. These are
approximately the weights one would get from COCO using the previous unit
stdev heuristic.
"""
ex_widths = boxes[:, 2] - boxes[:, 0] + 1.0
ex_heights = boxes[:, 3] - boxes[:, 1] + 1.0
ex_ctr_x = boxes[:, 0] + 0.5 * ex_widths
ex_ctr_y = boxes[:, 1] + 0.5 * ex_heights
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0
gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * np.log(gt_widths / ex_widths)
targets_dh = wh * np.log(gt_heights / ex_heights)
targets = np.vstack((targets_dx, targets_dy, targets_dw,
targets_dh)).transpose()
return targets
def expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def flip_boxes(boxes, im_width):
"""Flip boxes horizontally."""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
def aspect_ratio(boxes, aspect_ratio):
"""Perform width-relative aspect ratio transformation."""
boxes_ar = boxes.copy()
boxes_ar[:, 0::4] = aspect_ratio * boxes[:, 0::4]
boxes_ar[:, 2::4] = aspect_ratio * boxes[:, 2::4]
return boxes_ar
def box_voting(top_dets, all_dets, thresh, scoring_method='ID', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
top_dets_out = top_dets.copy()
top_boxes = top_dets[:, :4]
all_boxes = all_dets[:, :4]
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_overlaps(top_boxes, all_boxes)
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'ID':
# Identity, nothing to do
pass
elif scoring_method == 'TEMP_AVG':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'AVG':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'IOU_AVG':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'GENERALIZED_AVG':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'QUASI_SUM':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError(
'Unknown scoring method {}'.format(scoring_method)
)
return top_dets_out
def nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return []
return cython_nms.nms(dets, thresh)
def soft_nms(
dets, sigma=0.5, overlap_thresh=0.3, score_thresh=0.001, method='linear'
):
"""Apply the soft NMS algorithm from https://arxiv.org/abs/1704.04503."""
if dets.shape[0] == 0:
return dets, []
methods = {'hard': 0, 'linear': 1, 'gaussian': 2}
assert method in methods, 'Unknown soft_nms method: {}'.format(method)
dets, keep = cython_nms.soft_nms(
np.ascontiguousarray(dets, dtype=np.float32),
np.float32(sigma),
np.float32(overlap_thresh),
np.float32(score_thresh),
np.uint8(methods[method])
)
return dets, keep
| 37.770393
| 80
| 0.610942
|
79523d32928d5b66646be72dc85788219dbf989e
| 3,032
|
py
|
Python
|
nova/db/api/migrations/env.py
|
MagnusR/nova
|
798b8d2d825fcf259279ba4370e734242598f76f
|
[
"Apache-2.0"
] | null | null | null |
nova/db/api/migrations/env.py
|
MagnusR/nova
|
798b8d2d825fcf259279ba4370e734242598f76f
|
[
"Apache-2.0"
] | 3
|
2019-05-17T15:49:12.000Z
|
2019-11-21T10:49:54.000Z
|
nova/db/api/migrations/env.py
|
MagnusR/nova
|
798b8d2d825fcf259279ba4370e734242598f76f
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from logging.config import fileConfig
from sqlalchemy import engine_from_config
from sqlalchemy import pool
from alembic import context
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging unless we're told not to.
# This line sets up loggers basically.
if config.attributes.get('configure_logger', True):
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL and not an Engine, though an
Engine is acceptable here as well. By skipping the Engine creation we
don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine and associate a connection
with the context.
This is modified from the default based on the below, since we want to
share an engine when unit testing so in-memory database testing actually
works.
https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing
"""
connectable = config.attributes.get('connection', None)
if connectable is None:
# only create Engine if we don't have a Connection from the outside
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
# when connectable is already a Connection object, calling connect() gives
# us a *branched connection*.
with connectable.connect() as connection:
context.configure(
connection=connection, target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| 31.915789
| 79
| 0.722296
|
79523e7b163963d02daf1131c462ffb25713ab85
| 12,850
|
py
|
Python
|
asreview/review/factory.py
|
DominiqueMaciejewski/asreview
|
eb1066074613a5f6f930ff610ff92184e9244f4f
|
[
"Apache-2.0"
] | 280
|
2020-02-06T12:40:16.000Z
|
2022-03-19T21:13:55.000Z
|
asreview/review/factory.py
|
DominiqueMaciejewski/asreview
|
eb1066074613a5f6f930ff610ff92184e9244f4f
|
[
"Apache-2.0"
] | 519
|
2020-02-06T12:08:29.000Z
|
2022-03-31T16:02:01.000Z
|
asreview/review/factory.py
|
DominiqueMaciejewski/asreview
|
eb1066074613a5f6f930ff610ff92184e9244f4f
|
[
"Apache-2.0"
] | 77
|
2020-03-06T15:22:08.000Z
|
2022-03-30T11:21:06.000Z
|
# Copyright 2019-2020 The ASReview Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from os.path import splitext
from pathlib import PurePath
import numpy as np
from asreview.models.balance.utils import get_balance_model
from asreview.compat import convert_id_to_idx
from asreview.config import AVAILABLE_CLI_MODI, LABEL_NA
from asreview.config import AVAILABLE_REVIEW_CLASSES
from asreview.config import DEFAULT_BALANCE_STRATEGY
from asreview.config import DEFAULT_FEATURE_EXTRACTION
from asreview.config import DEFAULT_MODEL
from asreview.config import DEFAULT_N_INSTANCES
from asreview.config import DEFAULT_N_PRIOR_EXCLUDED
from asreview.config import DEFAULT_N_PRIOR_INCLUDED
from asreview.config import DEFAULT_QUERY_STRATEGY
from asreview.config import EMAIL_ADDRESS
from asreview.config import GITHUB_PAGE
from asreview.config import KERAS_MODELS
from asreview.data import ASReviewData
from asreview.data import load_data
from asreview.io.paper_record import preview_record
from asreview.models.feature_extraction import get_feature_model
from asreview.models.classifiers import get_classifier
from asreview.models.query import get_query_model
from asreview.review.minimal import MinimalReview
from asreview.review.simulate import ReviewSimulate
from asreview.settings import ASReviewSettings
from asreview.state.utils import open_state
from asreview.utils import get_random_state
ASCII_LOGO = """
_____ _____ _
/\ / ____| __ \ (_)
/ \ | (___ | |__) |_____ ___ _____ __
/ /\ \ \___ \| _ // _ \ \ / / |/ _ \ \ /\ / /
/ ____ \ ____) | | \ \ __/\ V /| | __/\ V V /
/_/ \_\_____/|_| \_\___| \_/ |_|\___| \_/\_/
""" # noqa
ASCII_MSG_SIMULATE = """
---------------------------------------------------------------------------------
| |
| Welcome to the ASReview Automated Systematic Review software. |
| In this mode the computer will simulate how well the ASReview software |
| could have accelerate the systematic review of your dataset. |
| You can sit back and relax while the computer runs this simulation. |
| |
| GitHub page: {0: <58}|
| Questions/remarks: {1: <58}|
| |
---------------------------------------------------------------------------------
""".format(GITHUB_PAGE, EMAIL_ADDRESS) # noqa
def _add_defaults(set_param, default_param):
set_param.update({
key: value
for key, value in default_param.items() if key not in set_param
})
def create_as_data(dataset,
included_dataset=[],
excluded_dataset=[],
prior_dataset=[],
new=False):
"""Create ASReviewData object from multiple datasets."""
if isinstance(dataset, (str, PurePath)):
dataset = [dataset]
if isinstance(included_dataset, (str, PurePath)):
included_dataset = [included_dataset]
if isinstance(excluded_dataset, (str, PurePath)):
excluded_dataset = [excluded_dataset]
if isinstance(prior_dataset, (str, PurePath)):
prior_dataset = [prior_dataset]
as_data = ASReviewData()
# Find the URL of the datasets if the dataset is a benchmark dataset.
for data in dataset:
as_data.append(load_data(data))
if new:
as_data.labels = np.full((len(as_data), ), LABEL_NA, dtype=int)
for data in included_dataset:
as_data.append(load_data(data, data_type="included"))
for data in excluded_dataset:
as_data.append(load_data(data, data_type="excluded"))
for data in prior_dataset:
as_data.append(load_data(data, data_type="prior"))
return as_data
def get_reviewer(dataset,
mode="simulate",
model=DEFAULT_MODEL,
query_strategy=DEFAULT_QUERY_STRATEGY,
balance_strategy=DEFAULT_BALANCE_STRATEGY,
feature_extraction=DEFAULT_FEATURE_EXTRACTION,
n_instances=DEFAULT_N_INSTANCES,
n_papers=None,
n_queries=None,
embedding_fp=None,
verbose=0,
prior_idx=None,
prior_record_id=None,
n_prior_included=DEFAULT_N_PRIOR_INCLUDED,
n_prior_excluded=DEFAULT_N_PRIOR_EXCLUDED,
config_file=None,
state_file=None,
model_param=None,
query_param=None,
balance_param=None,
feature_param=None,
seed=None,
included_dataset=[],
excluded_dataset=[],
prior_dataset=[],
new=False,
**kwargs):
"""Get a review object from arguments.
See __main__.py for a description of the arguments.
"""
as_data = create_as_data(dataset,
included_dataset,
excluded_dataset,
prior_dataset,
new=new)
if len(as_data) == 0:
raise ValueError("Supply at least one dataset"
" with at least one record.")
cli_settings = ASReviewSettings(model=model,
n_instances=n_instances,
n_queries=n_queries,
n_papers=n_papers,
n_prior_included=n_prior_included,
n_prior_excluded=n_prior_excluded,
query_strategy=query_strategy,
balance_strategy=balance_strategy,
feature_extraction=feature_extraction,
mode=mode,
data_fp=None)
cli_settings.from_file(config_file)
if state_file is not None:
with open_state(state_file) as state:
if state.is_empty():
state.settings = cli_settings
settings = state.settings
else:
settings = cli_settings
if n_queries is not None:
settings.n_queries = n_queries
if n_papers is not None:
settings.n_papers = n_papers
if model_param is not None:
settings.model_param = model_param
if query_param is not None:
settings.query_param = query_param
if balance_param is not None:
settings.balance_param = balance_param
if feature_param is not None:
settings.feature_param = feature_param
# Check if mode is valid
if mode in AVAILABLE_REVIEW_CLASSES:
logging.info(f"Start review in '{mode}' mode.")
else:
raise ValueError(f"Unknown mode '{mode}'.")
logging.debug(settings)
# Initialize models.
random_state = get_random_state(seed)
train_model = get_classifier(settings.model,
**settings.model_param,
random_state=random_state)
query_model = get_query_model(settings.query_strategy,
**settings.query_param,
random_state=random_state)
balance_model = get_balance_model(settings.balance_strategy,
**settings.balance_param,
random_state=random_state)
feature_model = get_feature_model(settings.feature_extraction,
**settings.feature_param,
random_state=random_state)
# LSTM models need embedding matrices.
if train_model.name.startswith("lstm-"):
texts = as_data.texts
train_model.embedding_matrix = feature_model.get_embedding_matrix(
texts, embedding_fp)
# prior knowledge
if prior_idx is not None and prior_record_id is not None and \
len(prior_idx) > 0 and len(prior_record_id) > 0:
raise ValueError(
"Not possible to provide both prior_idx and prior_record_id"
)
if prior_record_id is not None and len(prior_record_id) > 0:
prior_idx = convert_id_to_idx(as_data, prior_record_id)
# Initialize the review class.
if mode == "simulate":
reviewer = ReviewSimulate(as_data,
model=train_model,
query_model=query_model,
balance_model=balance_model,
feature_model=feature_model,
n_papers=settings.n_papers,
n_instances=settings.n_instances,
n_queries=settings.n_queries,
prior_idx=prior_idx,
n_prior_included=settings.n_prior_included,
n_prior_excluded=settings.n_prior_excluded,
state_file=state_file,
**kwargs)
elif mode == "minimal":
reviewer = MinimalReview(as_data,
model=train_model,
query_model=query_model,
balance_model=balance_model,
feature_model=feature_model,
n_papers=settings.n_papers,
n_instances=settings.n_instances,
n_queries=settings.n_queries,
state_file=state_file,
**kwargs)
else:
raise ValueError("Error finding mode, should never come here...")
return reviewer
def review(*args,
mode="simulate",
model=DEFAULT_MODEL,
save_model_fp=None,
**kwargs):
"""Perform a review from arguments. Compatible with the CLI interface"""
if mode not in AVAILABLE_CLI_MODI:
raise ValueError(f"Unknown mode '{mode}'.")
reviewer = get_reviewer(*args, mode=mode, model=model, **kwargs)
# output the prior indices
print("The following records are prior knowledge:\n")
for prior_record_id in reviewer.start_idx:
preview = preview_record(reviewer.as_data.record(prior_record_id))
print(f"{prior_record_id} - {preview}")
# Start the review process.
reviewer.review()
# If we're dealing with a keras model, we can save the last model weights.
if save_model_fp is not None and model in KERAS_MODELS:
save_model_h5_fp = splitext(save_model_fp)[0] + ".h5"
json_model = model.model.to_json()
with open(save_model_fp, "w") as f:
json.dump(json_model, f, indent=2)
model.model.save_weights(save_model_h5_fp, overwrite=True)
def review_simulate(dataset, *args, **kwargs):
"""CLI simulate mode."""
print(ASCII_LOGO + ASCII_MSG_SIMULATE)
# backwards comp
if isinstance(dataset, list) and len(dataset) >= 1 and \
dataset[0] in ["ptsd", "example_ptsd", "schoot"]:
print(f"\n\nWarning '{dataset[0]}' will deprecate in the future,",
"use 'benchmark:van_de_Schoot_2017' instead.\n\n")
dataset = "benchmark:van_de_Schoot_2017"
# backwards comp
if isinstance(dataset, list) and len(dataset) >= 1 and \
dataset[0] in ["ace", "example_cohen", "example_ace"]:
print(f"\n\nWarning '{dataset[0]}' will deprecate in the future,",
"use 'benchmark:Cohen_2006_ACEInhibitors' instead.\n\n")
dataset = "benchmark:Cohen_2006_ACEInhibitors"
# backwards comp
if isinstance(dataset, list) and len(dataset) >= 1 and \
dataset[0] in ["hall", "example_hall", "example_software"]:
print(f"\n\nWarning '{dataset[0]}' will deprecate in the future,",
"use 'benchmark:Hall_2012' instead.\n\n")
dataset = "benchmark:Hall_2012"
review(dataset, *args, mode='simulate', **kwargs)
| 40.793651
| 82
| 0.583502
|
79523e98d86d0844dbe52845ad09d768b13b8c35
| 427
|
py
|
Python
|
accelerator/migrations/0071_partnerteammember_champion_admin.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | 6
|
2017-06-14T19:34:01.000Z
|
2020-03-08T07:16:59.000Z
|
accelerator/migrations/0071_partnerteammember_champion_admin.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | 160
|
2017-06-20T17:12:13.000Z
|
2022-03-30T13:53:12.000Z
|
accelerator/migrations/0071_partnerteammember_champion_admin.py
|
masschallenge/django-accelerator
|
8af898b574be3b8335edc8961924d1c6fa8b5fd5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.10 on 2021-08-26 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accelerator', '0070_judginground_champion_partner_label')
]
operations = [
migrations.AddField(
model_name='partnerteammember',
name='champion_admin',
field=models.BooleanField(default=False),
),
]
| 22.473684
| 67
| 0.639344
|
79523eb4acb1729bfa56a8a1f4d17b5c1574642e
| 12,690
|
py
|
Python
|
webapp/python/app.py
|
g4-5u/isucon7-qualify
|
4598bbe1941a5df14e55c7150970a3f16e32bcfc
|
[
"MIT"
] | null | null | null |
webapp/python/app.py
|
g4-5u/isucon7-qualify
|
4598bbe1941a5df14e55c7150970a3f16e32bcfc
|
[
"MIT"
] | null | null | null |
webapp/python/app.py
|
g4-5u/isucon7-qualify
|
4598bbe1941a5df14e55c7150970a3f16e32bcfc
|
[
"MIT"
] | null | null | null |
import MySQLdb.cursors
import flask
import functools
import hashlib
import math
import os
import pathlib
import random
import string
import tempfile
import time
from flask_caching import Cache
static_folder = pathlib.Path(__file__).resolve().parent.parent / 'public'
icons_folder = static_folder / 'icons'
app = flask.Flask(__name__, static_folder=str(static_folder), static_url_path='')
app.secret_key = 'tonymoris'
avatar_max_size = 1 * 1024 * 1024
if not os.path.exists(str(icons_folder)):
os.makedirs(str(icons_folder))
config = {
'db_host': os.environ.get('ISUBATA_DB_HOST', 'localhost'),
'db_port': int(os.environ.get('ISUBATA_DB_PORT', '3306')),
'db_user': os.environ.get('ISUBATA_DB_USER', 'root'),
'db_password': os.environ.get('ISUBATA_DB_PASSWORD', ''),
}
cache = Cache(app, config={
'CACHE_TYPE': 'RedisCache',
'CACHE_DEFAULT_TIMEOUT': 60,
'CACHE_REDIS_HOST': 'redis',
'CACHE_REDIS_PORT': 6379,
'CACHE_REDIS_DB': '0'
})
def dbh():
if hasattr(flask.g, 'db'):
return flask.g.db
flask.g.db = MySQLdb.connect(
host = config['db_host'],
port = config['db_port'],
user = config['db_user'],
passwd = config['db_password'],
db = 'isubata',
charset= 'utf8mb4',
cursorclass= MySQLdb.cursors.DictCursor,
autocommit = True,
)
cur = flask.g.db.cursor()
cur.execute("SET SESSION sql_mode='TRADITIONAL,NO_AUTO_VALUE_ON_ZERO,ONLY_FULL_GROUP_BY'")
return flask.g.db
@app.teardown_appcontext
def teardown(error):
if hasattr(flask.g, "db"):
flask.g.db.close()
@app.route('/initialize')
def get_initialize():
cur = dbh().cursor()
cur.execute("DELETE FROM user WHERE id > 1000")
cur.execute("DELETE FROM image WHERE id > 1001")
cur.execute("DELETE FROM channel WHERE id > 10")
cur.execute("DELETE FROM message WHERE id > 10000")
cur.execute("DELETE FROM haveread")
cur.close()
return ('', 204)
def db_get_user(cur, user_id):
cur.execute("SELECT * FROM user WHERE id = %s", (user_id,))
return cur.fetchone()
def db_add_message(cur, channel_id, user_id, content):
cur.execute("INSERT INTO message (channel_id, user_id, content, created_at) VALUES (%s, %s, %s, NOW())",
(channel_id, user_id, content))
def login_required(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not "user_id" in flask.session:
return flask.redirect('/login', 303)
flask.request.user_id = user_id = flask.session['user_id']
user = db_get_user(dbh().cursor(), user_id)
if not user:
flask.session.pop('user_id', None)
return flask.redirect('/login', 303)
flask.request.user = user
return func(*args, **kwargs)
return wrapper
def random_string(n):
return ''.join([random.choice(string.ascii_letters + string.digits) for i in range(n)])
def register(cur, user, password):
salt = random_string(20)
pass_digest = hashlib.sha1((salt + password).encode('utf-8')).hexdigest()
try:
cur.execute(
"INSERT INTO user (name, salt, password, display_name, avatar_icon, created_at)"
" VALUES (%s, %s, %s, %s, %s, NOW())",
(user, salt, pass_digest, user, "default.png"))
cur.execute("SELECT LAST_INSERT_ID() AS last_insert_id")
return cur.fetchone()['last_insert_id']
except MySQLdb.IntegrityError:
flask.abort(409)
@app.route('/')
def get_index():
if "user_id" in flask.session:
return flask.redirect('/channel/1', 303)
return flask.render_template('index.html')
def get_channel_list_info(focus_channel_id=None):
cur = dbh().cursor()
cur.execute("SELECT * FROM channel ORDER BY id")
channels = cur.fetchall()
description = ""
for c in channels:
if c['id'] == focus_channel_id:
description = c['description']
break
return channels, description
@app.route('/channel/<int:channel_id>')
@login_required
def get_channel(channel_id):
channels, description = get_channel_list_info(channel_id)
return flask.render_template('channel.html',
channels=channels, channel_id=channel_id, description=description)
@app.route('/register')
def get_register():
return flask.render_template('register.html')
@app.route('/register', methods=['POST'])
def post_register():
name = flask.request.form['name']
pw = flask.request.form['password']
if not name or not pw:
flask.abort(400)
user_id = register(dbh().cursor(), name, pw)
flask.session['user_id'] = user_id
return flask.redirect('/', 303)
@app.route('/login')
def get_login():
return flask.render_template('login.html')
@app.route('/login', methods=['POST'])
def post_login():
name = flask.request.form['name']
cur = dbh().cursor()
cur.execute("SELECT id, salt, password FROM user WHERE name = %s", (name,))
row = cur.fetchone()
if not row or row['password'] != hashlib.sha1(
(row['salt'] + flask.request.form['password']).encode('utf-8')).hexdigest():
flask.abort(403)
flask.session['user_id'] = row['id']
return flask.redirect('/', 303)
@app.route('/logout')
def get_logout():
flask.session.pop('user_id', None)
return flask.redirect('/', 303)
@app.route('/message', methods=['POST'])
def post_message():
user_id = flask.session['user_id']
# user = db_get_user(dbh().cursor(), user_id)
message = flask.request.form['message']
channel_id = int(flask.request.form['channel_id'])
if not user_id or not message or not channel_id:
flask.abort(403)
db_add_message(dbh().cursor(), channel_id, user_id, message)
return ('', 204)
@app.route('/message')
def get_message():
user_id = flask.session.get('user_id')
if not user_id:
flask.abort(403)
channel_id = int(flask.request.args.get('channel_id'))
last_message_id = int(flask.request.args.get('last_message_id'))
cur = dbh().cursor()
cur.execute("SELECT id, user_id, content, created_at FROM message WHERE id > %s AND channel_id = %s ORDER BY id DESC LIMIT 100",
(last_message_id, channel_id))
rows = cur.fetchall()
response = []
for row in rows:
r = {}
r['id'] = row['id']
cur.execute("SELECT name, display_name, avatar_icon FROM user WHERE id = %s", (row['user_id'],))
r['user'] = cur.fetchone()
r['date'] = row['created_at'].strftime("%Y/%m/%d %H:%M:%S")
r['content'] = row['content']
response.append(r)
response.reverse()
max_message_id = max(r['id'] for r in rows) if rows else 0
cur.execute('INSERT INTO haveread (user_id, channel_id, message_id, updated_at, created_at)'
' VALUES (%s, %s, %s, NOW(), NOW())'
' ON DUPLICATE KEY UPDATE message_id = %s, updated_at = NOW()',
(user_id, channel_id, max_message_id, max_message_id))
return flask.jsonify(response)
@app.route('/fetch')
def fetch_unread():
user_id = flask.session.get('user_id')
if not user_id:
flask.abort(403)
# time.sleep(1.0)
cur = dbh().cursor()
cur.execute('SELECT id FROM channel')
rows = cur.fetchall()
channel_ids = [row['id'] for row in rows]
res = []
for channel_id in channel_ids:
cur.execute('SELECT message_id FROM haveread WHERE user_id = %s AND channel_id = %s',
(user_id, channel_id))
row = cur.fetchone()
if row:
cur.execute('SELECT COUNT(*) as cnt FROM message WHERE channel_id = %s AND %s < id',
(channel_id, row['message_id']))
else:
cur.execute('SELECT COUNT(*) as cnt FROM message WHERE channel_id = %s', (channel_id,))
r = {}
r['channel_id'] = channel_id
r['unread'] = int(cur.fetchone()['cnt'])
res.append(r)
return flask.jsonify(res)
@app.route('/history/<int:channel_id>')
@login_required
def get_history(channel_id):
page = flask.request.args.get('page')
if not page:
page = '1'
if not page.isnumeric():
flask.abort(400)
page = int(page)
N = 20
cur = dbh().cursor()
cur.execute("SELECT COUNT(*) as cnt FROM message WHERE channel_id = %s", (channel_id,))
cnt = int(cur.fetchone()['cnt'])
max_page = math.ceil(cnt / N)
if not max_page:
max_page = 1
if not 1 <= page <= max_page:
flask.abort(400)
cur.execute("SELECT id, user_id, content, created_at FROM message WHERE channel_id = %s ORDER BY id DESC LIMIT %s OFFSET %s",
(channel_id, N, (page - 1) * N))
rows = cur.fetchall()
messages = []
for row in rows:
r = {}
r['id'] = row['id']
cur.execute("SELECT name, display_name, avatar_icon FROM user WHERE id = %s", (row['user_id'],))
r['user'] = cur.fetchone()
r['date'] = row['created_at'].strftime("%Y/%m/%d %H:%M:%S")
r['content'] = row['content']
messages.append(r)
messages.reverse()
channels, description = get_channel_list_info(channel_id)
return flask.render_template('history.html',
channels=channels, channel_id=channel_id,
messages=messages, max_page=max_page, page=page)
@app.route('/profile/<user_name>')
@login_required
def get_profile(user_name):
channels, _ = get_channel_list_info()
cur = dbh().cursor()
cur.execute("SELECT * FROM user WHERE name = %s", (user_name,))
user = cur.fetchone()
if not user:
flask.abort(404)
self_profile = flask.request.user['id'] == user['id']
return flask.render_template('profile.html', channels=channels, user=user, self_profile=self_profile)
@app.route('/add_channel')
@login_required
def get_add_channel():
channels, _ = get_channel_list_info()
return flask.render_template('add_channel.html', channels=channels)
@app.route('/add_channel', methods=['POST'])
@login_required
def post_add_channel():
name = flask.request.form['name']
description = flask.request.form['description']
if not name or not description:
flask.abort(400)
cur = dbh().cursor()
cur.execute("INSERT INTO channel (name, description, updated_at, created_at) VALUES (%s, %s, NOW(), NOW())",
(name, description))
channel_id = cur.lastrowid
return flask.redirect('/channel/' + str(channel_id), 303)
@app.route('/profile', methods=['POST'])
@login_required
def post_profile():
user_id = flask.session.get('user_id')
if not user_id:
flask.abort(403)
cur = dbh().cursor()
# user = db_get_user(cur, user_id)
# if not user:
# flask.abort(403)
display_name = flask.request.form.get('display_name')
avatar_name = None
avatar_data = None
if 'avatar_icon' in flask.request.files:
file = flask.request.files['avatar_icon']
if file.filename:
ext = os.path.splitext(file.filename)[1] if '.' in file.filename else ''
if ext not in ('.jpg', '.jpeg', '.png', '.gif'):
flask.abort(400)
with tempfile.TemporaryFile() as f:
file.save(f)
f.flush()
if avatar_max_size < f.tell():
flask.abort(400)
f.seek(0)
data = f.read()
digest = hashlib.sha1(data).hexdigest()
avatar_name = digest + ext
avatar_data = data
if avatar_name and avatar_data:
cur.execute("INSERT INTO image (name, data) VALUES (%s, _binary %s)", (avatar_name, avatar_data))
cur.execute("UPDATE user SET avatar_icon = %s WHERE id = %s", (avatar_name, user_id))
if display_name:
cur.execute("UPDATE user SET display_name = %s WHERE id = %s", (display_name, user_id))
return flask.redirect('/', 303)
def ext2mime(ext):
if ext in ('.jpg', '.jpeg'):
return 'image/jpeg'
if ext == '.png':
return 'image/png'
if ext == '.gif':
return 'image/gif'
return ''
@app.route('/icons/<file_name>')
@cache.cached(timeout=60)
def get_icon(file_name):
cur = dbh().cursor()
cur.execute("SELECT data FROM image WHERE name = %s", (file_name,))
row = cur.fetchone()
ext = os.path.splitext(file_name)[1] if '.' in file_name else ''
mime = ext2mime(ext)
if row and mime:
return flask.Response(row['data'], mimetype=mime)
flask.abort(404)
if __name__ == "__main__":
app.run(port=8080, debug=True, threaded=True)
| 30.726392
| 132
| 0.619307
|
79523eedb4e3585cf02cda1512a11b90da2d1239
| 1,248
|
py
|
Python
|
cgi-bin/registro.py
|
raulsenaferreira/Projeto-Myosotis
|
b51cc4d7970f8647bcfde8f6dc9fa3d0c96093b6
|
[
"Apache-2.0"
] | 5
|
2015-09-15T15:30:49.000Z
|
2021-07-16T23:11:35.000Z
|
cgi-bin/registro.py
|
raulsenaferreira/Projeto-Myosotis
|
b51cc4d7970f8647bcfde8f6dc9fa3d0c96093b6
|
[
"Apache-2.0"
] | 6
|
2015-08-08T22:47:19.000Z
|
2015-09-08T01:37:49.000Z
|
cgi-bin/registro.py
|
raulsenaferreira/Projeto-Myosotis
|
b51cc4d7970f8647bcfde8f6dc9fa3d0c96093b6
|
[
"Apache-2.0"
] | 2
|
2016-07-11T19:56:26.000Z
|
2018-04-08T14:07:52.000Z
|
class Registro(object):
def __init__(self, nome, imagem, sexo, olhos, corDaPele, cabelo, pesoAproximado, alturaAproximada, tipoFisico, transtornoMental, idade, dataNascimento, diasDesaparecido, dataDesaparecimento, bairroDesaparecimento, cidadeDesaparecimento, ufDesaparecimento, marcaCaracteristica, status, informacoes, boletimOcorrencia, fonte):
self.nome = nome
self.imagem = imagem
self.sexo = sexo
self.olhos = olhos
self.corDaPele = corDaPele
self.cabelo = cabelo
self.pesoAproximado = pesoAproximado
self.alturaAproximada = alturaAproximada
self.tipoFisico =tipoFisico
self.transtornoMental = transtornoMental
self.idade = idade
self.dataNascimento = dataNascimento
self.diasDesaparecido = diasDesaparecido
self.dataDesaparecimento = dataDesaparecimento
self.bairroDesaparecimento = bairroDesaparecimento
self.cidadeDesaparecimento = cidadeDesaparecimento
self.ufDesaparecimento = ufDesaparecimento
self.marcaCaracteristica = marcaCaracteristica
self.status = status
self.informacoes = informacoes
self.boletimOcorrencia = boletimOcorrencia
self.fonte = fonte
| 49.92
| 328
| 0.72516
|
79523fdc5318bd999f7805caf1edfefeb50d75b7
| 859
|
py
|
Python
|
scam/types/coin_solution.py
|
grayfallstown/scam-blockchain
|
2183020cc74bbd1a63dda6eb0d0e73c2a3429594
|
[
"Apache-2.0"
] | 12
|
2021-08-04T14:35:02.000Z
|
2022-02-09T04:31:44.000Z
|
scam/types/coin_solution.py
|
grayfallstown/scam-blockchain
|
2183020cc74bbd1a63dda6eb0d0e73c2a3429594
|
[
"Apache-2.0"
] | 8
|
2021-08-04T20:58:10.000Z
|
2021-09-11T17:08:28.000Z
|
scam/types/coin_solution.py
|
grayfallstown/scam-blockchain
|
2183020cc74bbd1a63dda6eb0d0e73c2a3429594
|
[
"Apache-2.0"
] | 4
|
2021-07-28T09:50:55.000Z
|
2022-03-15T08:43:53.000Z
|
from dataclasses import dataclass
from typing import List
from scam.types.blockchain_format.coin import Coin
from scam.types.blockchain_format.program import SerializedProgram, INFINITE_COST
from scam.util.chain_utils import additions_for_solution
from scam.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class CoinSolution(Streamable):
"""
This is a rather disparate data structure that validates coin transfers. It's generally populated
with data from different sources, since burned coins are identified by name, so it is built up
more often that it is streamed.
"""
coin: Coin
puzzle_reveal: SerializedProgram
solution: SerializedProgram
def additions(self) -> List[Coin]:
return additions_for_solution(self.coin.name(), self.puzzle_reveal, self.solution, INFINITE_COST)
| 34.36
| 105
| 0.784633
|
7952408e177f6ebf5dc3b4321167f07018bd57ba
| 7,476
|
py
|
Python
|
web_scraper.py
|
lschimmel/scraper
|
1937e0a4b1f7206b8871b3e93314e67635a4012c
|
[
"MIT"
] | null | null | null |
web_scraper.py
|
lschimmel/scraper
|
1937e0a4b1f7206b8871b3e93314e67635a4012c
|
[
"MIT"
] | null | null | null |
web_scraper.py
|
lschimmel/scraper
|
1937e0a4b1f7206b8871b3e93314e67635a4012c
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import os
import pathlib
import re
import csv
N = 100
# scrapes webpage's search results
# saves a subset of the webpage, and images to a folder
def main():
#test conditions
# url = 'URL GOES HERE'
# i = 1
# scrape_how_to(url, i)
# log = ['test', 'item']
# failed = ['fone', 'f2']
# i = 0
# savelog(failed, log, i)
# run conditions
log = []
failed = []
for i in range(0, N):
if i == (N/2):
savelog(log, failed, i)
else:
print('\npage: ', i, '\n____________________________________________________________________________________')
try:
filename = search_results(i, failed)
url = next_url(filename, i, log)
except Exception:
pass
print('failed:\n', failed)
savelog(log, failed, i)
def savelog(log, failed, i):
filename = 'log' + str(i) + '.txt'
filename_csv = 'log' + str(i) + '.csv'
with open(filename, 'w', encoding='utf-8') as f:
f.write('log\n')
for item in log:
f.write(str(item))
f.write('\n')
f.write('\n\nfailed\n')
for item in failed:
f.write(str(item))
f.write('\n')
with open(filename_csv, 'w') as e:
writer = csv.writer(e)
for item in log:
writer.writerow([item])
def scrape_search_results(i, failed):
url = 'URL GOES HERE' + str(i*24)
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, "lxml")
# print(soup)
soupa = soup.find_all('a')
filename = 'searchresults' + str(i) + '.txt'
link_set = set()
for link in soupa:
link_txt = str(link.get('href')) + "\n"
link_set.add(link_txt)
# print('link set:', link_set)
with open(filename, 'w') as f:
for item in link_set:
# print(item)
link_txt1 = str(item)
link_txt2 = link_txt1.replace('\\', '')
link_txt3 = link_txt2.replace('"n', '')
link_txt4 = link_txt3.replace('"', '')
# print('link text 4:', link_txt4)
if 'URL GOES HERE' in link_txt4:
if not "specialoffers" in link_txt4 and not "sale" in link_txt4:
f.write(link_txt4)
else:
failed.append(link_txt4)
return filename
def next_url(filename, i, log):
#iterates over each url in a .txt file, scrapes the webpage for instructions and saves the result in a txt file (file name is the url, first element is the url, second element is either the instructions or the full scrape
with open(filename, 'r') as f:
for row in f:
url = str(row)
# print(url)
scrape_how_to(url, i, log)
# return url
def scrape_how_to(urlstringname, i, log):
#test conditions
url = urlstringname.strip('\n')
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, "lxml")
soup_content = soup.find_all('div', attrs={'class':'loc content-body'})
#find subsections of the soup
#breadcrumbs for subfolders
breadcrumbs = soup.find('div', attrs= {'class': 'breadcrumbs'})
i = 0
pathlevel0 = 'output_dir'
if breadcrumbs:
for child in breadcrumbs:
if child.string != '\n' and child.string != None:
if i == 0:
i += 1
# print(i, child.string)
foldername = 'lvl' + str(i) + '_' + child.string
pathlevel1 = pathlib.Path(pathlevel0, foldername)
if pathlevel1.exists():
# print('folder level', i, 'exists')
pathlevelj = pathlevel1
pass
else:
pathlevel1.mkdir(parents=True, exist_ok=False)
pathlevelk = pathlib.Path(pathlevel1)
# pathlevelj = pathlib.Path(pathlevelk, foldername)
else:
i += 1
foldername = 'lvl' + str(i) + '_' + child.string
pathlevelj= pathlib.Path(pathlevelk, foldername)
if pathlevelj.exists():
# print('folder level', i, 'exists')
pathlevelk = pathlib.Path(pathlevelj)
else:
# print('creating new level 1 folder: ', child.string)
pathlevelj.mkdir(parents=True, exist_ok=False)
pathlevelk = pathlib.Path(pathlevelj)
else:
pathlevelj = pathlib.Path(pathlevel0, 'Uncategorized')
#body content
soupa = soup.find_all('div', attrs={'class': 'loc content'})
url_name = url.replace('https://', '-')
url_name2 = url_name.replace('/', '-')
#make folders and name them + files according to headers
try:
valid_file_name = re.sub('[^\w_.)( -]', '', soup.h1.string)
except Exception:
try:
valid_file_name = re.sub('[^\w_.)( -]', '', soup.h2.string)
except Exception:
valid_file_name = 'scrapedarticle_' + str(i) + '.txt'
foldername = valid_file_name.strip(' ')
new_dir_name = 'a_' + foldername
if i == 1:
project_dir_name = pathlib.Path(pathlevel1, new_dir_name)
else:
project_dir_name = pathlib.Path(pathlevelj, new_dir_name)
new_dir = pathlib.Path(project_dir_name)
new_dir.mkdir(parents=True, exist_ok=True)
img_tags = soup.find_all('img')
try:
# save image urls as txt files
urls = [img['data-src'] for img in img_tags]
i = 0
for url in urls:
i += 1
image_filename = 'image_' + str(i) + '.txt'
with open(project_dir_name/image_filename, 'w') as f:
f.write(url)
except Exception:
image_filename = 'image_' + str(i) + '.txt'
# print(url)
with open(project_dir_name / image_filename, 'w', encoding='utf-8') as f:
f.write(str(urlstringname))
f.write('\nerror, saved img_tags instead\n')
f.write(str(img_tags))
pass
# checks if there is an article container content section, else if there is an instruction section class, else returns full soup
# save contents as txt file
filename = foldername + '.txt'
log.append(filename)
log.append(urlstringname)
try:
if soup_content:
with open(new_dir/filename, 'w', encoding="utf-8") as f:
f.write(str(urlstringname))
f.write(str(soup_content))
elif soupa:
with open(new_dir/filename, 'w', encoding="utf-8") as f:
f.write(str(urlstringname))
f.write('\naloc content-body not found, pulled loc content instead. \n\n')
f.write(str(soupa))
else:
with open(new_dir/filename, 'w', encoding="utf-8") as f:
f.write(str(urlstringname))
f.write('\nloc content-body not found, pulled soup instead. \n\n')
f.write(str(soup))
except Exception:
pass
# print(log)
return log
if __name__ == "__main__":
main()
| 35.942308
| 226
| 0.535447
|
795240d1b6fde8dbab5e8d0739943a699c504a76
| 2,693
|
py
|
Python
|
data/plugins/npcs/dialogue/vorki_pet.py
|
FavyTeam/Elderscape_server
|
38bf75396e4e13222be67d5f15eb0b9862dca6bb
|
[
"MIT"
] | 3
|
2019-05-09T16:59:13.000Z
|
2019-05-09T18:29:57.000Z
|
data/plugins/npcs/dialogue/vorki_pet.py
|
FavyTeam/Elderscape_server
|
38bf75396e4e13222be67d5f15eb0b9862dca6bb
|
[
"MIT"
] | null | null | null |
data/plugins/npcs/dialogue/vorki_pet.py
|
FavyTeam/Elderscape_server
|
38bf75396e4e13222be67d5f15eb0b9862dca6bb
|
[
"MIT"
] | 7
|
2019-07-11T23:04:40.000Z
|
2021-08-02T14:27:13.000Z
|
#Plugin to handle dialogue for Vorki (Vorkath) pet
#Written by Owain
#15/08/18
dialogue_ids = [8501, 8502, 8503, 8504, 8503, 8504, 8505, 8506, 8507, 8508, 8509, 8510]
def first_click_npc_8025(player):
player.getDH().sendDialogues(8500)
def chat_8500(player):
player.getDH().sendPlayerChat("Hey Vorki, got any interesting dragon facts?", 591)
dialogue = random.choice(dialogue_ids)
player.nextDialogue = dialogue;
def chat_8501(player):
player.getDH().sendNpcChat("Although they have wings, dragons rarely fly. This is because" , "the animals they prey on are all ground dwelling.", 591)
def chat_8502(player):
player.getDH().sendNpcChat("Unlike their creators, dragons have the ability to reproduce." , "Like most reptiles, they are oviparous. This means that they" , "lay eggs rather than birthing live young.", 591)
def chat_8503(player):
player.getDH().sendNpcChat("Dragons have a very long lifespan and can live for thousands" , "of years. With a lifespan that long, most dragons die to" , "combat instead of age.", 591)
def chat_8504(player):
player.getDH().sendNpcChat("While very closely related, dragons and wyverns are" , "actually different species. You can easily tell the difference" , "between them by counting the number of legs, dragons" , "have four while wyverns have two.", 591)
def chat_8505(player):
player.getDH().sendNpcChat("Metallic dragons were created by inserting molten metal" , "into the eggs of other dragons. Very few eggs survived" , "this process.", 591)
def chat_8506(player):
player.getDH().sendNpcChat("The dragonkin created dragons by fusing their own lifeblood" , "with that of a lizard. The dragonkin created other species" , "in similar ways by using different types of reptile.", 591)
def chat_8507(player):
player.getDH().sendNpcChat("Dragons have the ability to speak. However, most dragons" , "don't have the brain capacity to do it very well.", 591)
def chat_8508(player):
player.getDH().sendNpcChat("Dragons share their name with dragon equipment, which" , "was also created by the dragonkin. This equipment is" , "fashioned out of Orikalkum.", 591)
def chat_8509(player):
player.getDH().sendNpcChat("Although very aggressive, dragons do not typically stray from" , "their own territory. They instead make their homes in places" , "where there is plenty of prey to be found.", 591)
def chat_8510(player):
player.getDH().sendNpcChat("Dragons have a duct in their mouth from which they" , "can expel various internally produced fluids. The most common" , "of these is a fluid which ignites when it reacts" , "with air. This is how dragons breathe fire.", 591)
| 53.86
| 256
| 0.738953
|
7952417bbbe7ba6bc326dcfe2f03b6078660121a
| 60,255
|
py
|
Python
|
alphax/factor_alpha101.py
|
wangjiehui11235/panther
|
cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07
|
[
"Apache-2.0"
] | 3
|
2020-01-23T22:23:08.000Z
|
2020-10-12T20:02:16.000Z
|
alphax/factor_alpha101.py
|
wangjiehui11235/panther
|
cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07
|
[
"Apache-2.0"
] | 1
|
2019-10-28T05:53:08.000Z
|
2019-10-28T05:53:08.000Z
|
alphax/factor_alpha101.py
|
wangjiehui11235/panther
|
cf1ca2f0c7107c5cdacf2f7ff4002d43427d9b07
|
[
"Apache-2.0"
] | 9
|
2019-08-21T07:48:32.000Z
|
2020-04-04T09:17:54.000Z
|
# -*- coding: utf-8 -*-
import pdb
import numba
import six
import pandas as pd
import numpy as np
import inspect
import datetime
from sklearn import preprocessing
from numpy import log
from utilities.singleton import Singleton
# rolling corr of two pandas dataframes
def rolling_corr(x, y, win):
corr_df = pd.DataFrame(data=np.NaN, index=x.index, columns=x.columns)
for begin, end in zip(x.index[:-win + 1], x.index[win - 1:]):
corr_df.loc[end] = x.loc[begin:end].corrwith(y.loc[begin:end])
return corr_df
# rolling cov of two pandas dataframes
def rolling_cov(x, y, win):
cov_df = pd.DataFrame(data=np.NaN, index=x.index, columns=x.columns)
for begin, end in zip(x.index[:-win + 1], x.index[win - 1:]):
x_std = x.loc[begin:end].std()
y_std = y.loc[begin:end].std()
cov_df.loc[end] = x.loc[begin:end].corrwith(y.loc[begin:end]) * x_std * y_std
return cov_df
# rolling rank of a pandas dataframe
def rolling_rank(df, win):
rank_df = pd.DataFrame(data=np.NaN, index=df.index, columns=df.columns)
for begin, end in zip(df.index[:-win + 1], df.index[win - 1:]):
rank_df.loc[end] = df.loc[begin:end].rank(axis=0, pct=True).iloc[-1]
return rank_df
# rolling dot of a pandas dataframe
def rolling_dot(df, x, win):
dot_df = pd.DataFrame(data=np.NaN, index=df.index, columns=df.columns)
for begin, end in zip(df.index[:-win + 1], df.index[win - 1:]):
# dot_df.loc[end] = x.dot(df.loc[begin:end])
dot_df.loc[end] = np.dot(x, df.loc[begin:end].values)
return dot_df
# rolling regression residual
def rolling_regresi(y, x, win):
resi_df = pd.DataFrame(data=np.NaN, index=y.index, columns=y.columns)
for begin, end in zip(y.index[:-win + 1], y.index[win - 1:]):
yy = y.loc[begin:end]
xx = x.loc[begin:end]
resi_df.loc[end] = sm.OLS(yy, sm.add_constant(xx)).fit().resid.loc[end]
return resi_df
# columns covariance of two dataframes
def df_covariance(x, y):
y = y[x.columns]
corr_se = x.corrwith(y)
x_cov, y_cov = np.diag(np.cov(x.T)), np.diag(np.cov(y.T))
cov_se = (corr_se * np.sqrt(x_cov) * np.sqrt(y_cov))
return cov_se
# return a series of decay linear sum value of last win rows of dataframe df.
def decay_linear(df, win):
weights = list(range(1, win + 1))
weights = [x * 1. / np.sum(weights) for x in weights]
dot_df = rolling_dot(df.iloc[-win:], weights, win)
return dot_df.iloc[-1]
# return a series of decay linear sum value of last win rows of dataframe df.
def decay_linear(df, win):
weights = list(range(1, win + 1))
weights = [x * 1. / np.sum(weights) for x in weights]
dot_df = rolling_dot(df.iloc[-win:], weights, win)
return dot_df.iloc[-1]
# return a dataframe of rolling decay linear sum value of dataframe df.
def rolling_decay(df, win):
weights = list(range(1, win + 1))
weights = [x * 1. / np.sum(weights) for x in weights]
dot_df = rolling_dot(df, weights, win)
return dot_df
# return winsorized series
def se_winsorize(se, method='sigma', limits=(3.0, 3.0), drop=False):
se = se.copy(deep=True)
if method == 'quantile':
down, up = se.quantile([limits[0], 1.0 - limits[1]])
elif method == 'sigma':
std, mean = se.std(), se.mean()
down, up = mean - limits[0]*std, mean + limits[1]*std
if drop:
se[se<down] = np.NaN
se[se>up] = np.NaN
else:
se[se<down] = down
se[se>up] = up
return se
# return standardized series
def se_standardize(se):
try:
res = (se - se.mean()) / se.std()
except:
res = pd.Series(data=np.NaN, index=se.index)
return res
# return indneutralized series
def se_indneutralize(se, indu_dict):
date = se.name[0] if type(se.name) is tuple else se.name
indu = indu_dict[date]
try:
se = se_winsorize(se, method='quantile', limits=(0.05, 0.05))
se = se_winsorize(se, method='sigma', limits=(3.0, 3.0))
se = se.dropna()
if se.empty:
return se
codes = se.index.intersection(indu.index)
se = se.loc[codes]
indu = indu.loc[codes]
x = np.linalg.lstsq(indu.values, np.matrix(se).T)[0]
y = se - indu.dot(x)[0]
except:
print(date, ': neutralize error!')
return y
# return indneutralized pandas dataframe
def df_indneutralize(df, indu_dict):
neu_dict = {}
for bar_id in df.index:
se = df.loc[bar_id]
neu_dict[bar_id] = se_indneutralize(se, indu_dict)
neu_df = pd.DataFrame(neu_dict).T
return neu_df
@six.add_metaclass(Singleton)
class FactorAlpha101(object):
def __init__(self):
__str__ = 'factor_alpha101'
self.name = 'Alpha101'
self.factor_type1 = 'Features'
self.factor_type2 = 'Features'
self.description = 'price and volumns features'
def alpha101_2(self, data, param1=2, param2=6, dependencies=['turnover_vol', 'close_price', 'open_price'],
max_window=10):
# -1 * correlation(rank(delta(LOG(VOLUME), 2)), rank(((CLOSE - OPEN) / OPEN)), 6)
# 价格和成交量都是一阶diff,所以可以都用复权价
rank_price = ((data['close_price'] - data['open_price']) / data['open_price']).rank(axis=1, pct=True)
rank_volume = (np.log(data['turnover_vol'])).diff(periods=param1).rank(axis=1, pct=True)
corr_win = param2
id_begin = rank_price.index[-corr_win]
alpha = rank_price.loc[id_begin:].corrwith(rank_volume.loc[id_begin:])
return -1.0 * alpha
def alpha101_3(self, data, param1=10, param2=-1, dependencies=['open_price', 'turnover_vol'], max_window=11):
# -1 * correlation(rank(OPEN), rank(VOLUME), 10)
rank_open_df = data['open_price'].rank(axis=1, pct=True)
rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param1
id_begin = rank_open_df.index[-corr_win]
alpha = rank_open_df.loc[id_begin:].corrwith(rank_volume_df.loc[id_begin:])
return float(param2) * alpha
def alpha101_4(self, data, param1=9, param2=-1, dependencies=['lowest_price'], max_window=10):
# -1 * ts_rank(rank(LOW), 9)
rank_low = data['lowest_price'].rank(axis=1, pct=True)
ts_win = param1
id_begin = rank_low.index[-ts_win]
alpha = rank_low.loc[id_begin:].rank(axis=0, pct=True).iloc[-1]
return float(param2) * alpha
def alpha101_5(self, data, param1=10, dependencies=['open_price',
'close_price', 'vwap'], max_window=10):
# rank((OPEN - (sum(VWAP, 10) / 10))) * (-1 * abs(rank((CLOSE - VWAP))))
mean_win = param1
open_vwap = data['open_price'] - data['vwap'].rolling(window=mean_win).mean()
rank_open = open_vwap.rank(axis=1, pct=True)
abs_rank = (data['close_price'] - data['vwap']).rank(axis=1, pct=True).abs() * (-1.0)
alpha = (rank_open * abs_rank).iloc[-1]
return alpha
def alpha101_6(self, data, param1=10, param2=-1, dependencies=['open_price', 'turnover_vol'], max_window=11):
# -1 * correlation(OPEN, VOLUME, 10)
# correlation of window history price and volume, use adjusted data here.
corr_win = param1
id_begin = data['open_price'].index[-corr_win]
alpha = data['open_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])
return float(param2) * alpha
def alpha101_11(self, data, param1=3, param2=3,
dependencies=['close_price', 'vwap', 'turnover_vol'], max_window=5):
# (rank(ts_max((VWAP - CLOSE), 3)) + rank(ts_min((VWAP - CLOSE), 3))) * rank(delta(VOLUME, 3))
ts_max = (data['vwap'] - data['close_price']).rolling(window=param1).max()
ts_min = (data['vwap'] - data['close_price']).rolling(window=param1).min()
delta_volume = data['turnover_vol'].diff(periods=param2)
rank_ts_max = ts_max.rank(axis=1, pct=True)
rank_ts_min = ts_min.rank(axis=1, pct=True)
rank_vol = delta_volume.rank(axis=1, pct=True)
alpha = ((rank_ts_max + rank_ts_min) * rank_vol).iloc[-1]
return alpha
def alpha101_12(self, data, param1=1, param2=-1, dependencies=['close_price', 'turnover_vol'], max_window=2):
# sign(delta(VOLUME, 1)) * (-1 * delta(CLOSE, 1))
alpha = np.sign(data['turnover_vol'].diff(periods=param1)) * data['close_price'].diff(periods=param1)
alpha = alpha.iloc[-1] * float(param2)
return alpha
def alpha101_13(self, data, param1=5, param2=-1, dependencies=['close_price', 'turnover_vol'], max_window=6):
# -1 * rank(covariance(rank(CLOSE), rank(VOLUME), 5))
rank_close_df = data['close_price'].rank(axis=1, pct=True)
rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param1
id_begin = rank_close_df.index[-corr_win]
alpha = df_covariance(rank_close_df.loc[id_begin:], rank_volume_df.loc[id_begin:])
return float(param2) * alpha.rank(pct=True)
def alpha101_14(self, data, param1=10, param2=3, param3=-1,
dependencies=['open_price', 'turnover_vol', 'returns'], max_window=10):
# (-1 * rank(delta(RETURNS, 3))) * correlation(OPEN, VOLUME, 10)
corr_win = param1
id_begin = data['open_price'].index[-corr_win]
corr_se = data['open_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])
rank_ret_se = (param3) * data['returns'].diff(periods=param2).rank(axis=1, pct=True).iloc[-1]
alpha = rank_ret_se * corr_se
return alpha
def alpha101_15(self, data, param1=3, param2=3, param3=-1,
dependencies=['highest_price', 'turnover_vol'], max_window=6):
# -1 * sum(rank(correlation(rank(HIGH), rank(VOLUME), 3)), 3)
rank_high_df = data['highest_price'].rank(axis=1, pct=True)
rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param1
corr_df = rolling_corr(rank_high_df, rank_volume_df, win=corr_win)
sum_win = param2
id_begin = corr_df.index[-sum_win]
alpha = corr_df.loc[id_begin:].rank(axis=1, pct=True).sum()
return float(param3) * alpha
def alpha101_16(self, data, param1=5, param2=-1,
dependencies=['highest_price', 'turnover_vol'], max_window=6):
# -1 * rank(covariance(rank(HIGH), rank(VOLUME), 5))
rank_high_df = data['highest_price'].rank(axis=1, pct=True)
rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param1
id_begin = rank_high_df.index[-corr_win]
alpha = df_covariance(rank_high_df.loc[id_begin:], rank_volume_df.loc[id_begin:])
return float(param2) * alpha.rank(pct=True)
def alpha101_18(self, data, param1=10, param2=5, param3=-1,
dependencies=['open_price', 'close_price'], max_window=10):
# -1 * rank((stddev(abs((CLOSE - OPEN)), 5) + (CLOSE - OPEN)) + correlation(CLOSE, OPEN, 10))
corr_win = param1
id_begin = data['open_price'].index[-corr_win]
corr_se = data['open_price'].loc[id_begin:].corrwith(data['close_price'].loc[id_begin:])
price = data['close_price'] - data['open_price']
price_se = (price.abs().rolling(window=param2).std() + price).iloc[-1]
alpha = float(param3) * (price_se + corr_se).rank(pct=True)
return alpha
def alpha101_19(self, data, param1=7, param2=70, param3=1, param4=-1,
dependencies=['close_price', 'returns'], max_window=70):
# -1 * sign(((CLOSE - delay(CLOSE, 7)) + delta(CLOSE, 7))) * (1 + rank(1 + sum(RETURNS, 250)))
sign_se = np.sign(data['close_price'].diff(param1)).iloc[-1]
# rank_se = (1.0 + data['returns'].rolling(window=250).sum()).iloc[-1].rank(pct=True) + 1.0
ret_win = param2
rank_se = (float(param3) + data['returns'].rolling(window=ret_win).sum()).iloc[-1].rank(pct=True)
alpha = float(param4) * sign_se * rank_se
return alpha
def alpha101_22(self, data, param1=5, param2=20, param3=-1,
dependencies=['close_price', 'highest_price', 'turnover_vol'], max_window=20):
# -1 * (delta(correlation(HIGH, VOLUME, 5), 5) * rank(stddev(CLOSE, 20)))
corr_df = rolling_corr(data['highest_price'], data['turnover_vol'], win=param1)
delta_corr_se = corr_df.diff(periods=param1).iloc[-1]
rank_std_se = data['close_price'].rolling(window=param2).std().rank(axis=1, pct=True).iloc[-1]
alpha = float(param3) * delta_corr_se * rank_std_se
return alpha
def alpha101_23(self, data, param1=20, param2=-1, param3=2, param4=0.25,
dependencies=['highest_price', 'highest_price'], max_window=20):
# ((sum(HIGH, 20) / 20) < HIGH) ? (-1 * delta(HIGH, 2)) : 0
# # original factor calc method
# mark = data['high'].rolling(window=20).mean() < data['high']
# delta_high = -1.0 * data['high_raw'].diff(2)
# delta_high[mark==False] = 0.0
# alpha = delta_high.iloc[-1]
# adjusted factor calc method
mark = data['highest_price'].rolling(window=param1).mean() < data['highest_price']
delta_high = float(param2) * data['highest_price'].diff(param3)
delta_high[mark==False] = delta_high[mark==False] * param4
alpha = delta_high.iloc[-1]
return alpha
def alpha101_24(self, data, param1=40, param2=20, param3=6, param4=-1,
dependencies=['close_price'], max_window=70):
# (((delta((sum(CLOSE, 100) / 100), 100) / delay(CLOSE, 100)) < 0.05) ||
# ((delta((sum(CLOSE, 100) / 100), 100) / delay(CLOSE, 100))== 0.05)) ?
# (-1 * (CLOSE - ts_min(CLOSE, 100))) : (-1 * delta(CLOSE, 3))
# # rearranged
# mask = (delta((sum(CLOSE, 100) / 100), 100) / delay(CLOSE, 100))
# mask > 0.05 ? (-1 * delta(CLOSE, 3)) : (-1 * (CLOSE - ts_min(CLOSE, 100)))
# # original factor calc method
# delta_close = data['close'].rolling(window=100).mean().diff(periods=100)
# delay_close = data['close'].shift(periods=100)
# mask = delta_close / delay_close
# mask_se = mask.iloc[-1] > 0.05
# true_se = -1.0 * data['close_raw'].diff(periods=3).iloc[-1]
# false_se = -1.0 * (data['close_raw'] - data['close_raw'].rolling(window=100).min()).iloc[-1]
# true_se = true_se.reindex(mask_se.index)
# false_index = mask_se[mask_se==False].index
# true_se.loc[false_index] = false_se.loc[false_index]
# # adjusted factor calc method
delta_close = data['close_price'].rolling(window=param1).mean().diff(periods=param2)
delay_close = data['close_price'].shift(periods=param2)
mask = delta_close / delay_close
mask_se = mask.iloc[-1] > mask.iloc[-1].median()
true_se = float(param4) * data['close_price'].diff(periods=param3).iloc[-1]
false_se = float(param4) * (data['close_price'] - data['close_price'].rolling(
window=param1).min()).iloc[-1]
true_se = true_se.reindex(mask_se.index)
false_index = mask_se[mask_se==False].index
true_se.loc[false_index] = false_se.loc[false_index]
return true_se
def alpha101_26(self, data, param1=10, param2=10, param3=5, param4=-1,
dependencies=['highest_price', 'turnover_vol'], max_window=30):
# -1 * ts_max(correlation(ts_rank(VOLUME, 5), ts_rank(HIGH, 5), 5), 3)
ts_rank_vol = rolling_rank(data['turnover_vol'], win=param1)
ts_rank_high = rolling_rank(data['highest_price'], win=param1)
corr_df = rolling_corr(ts_rank_vol, ts_rank_high, win=param2)
alpha = float(param4) * corr_df.rolling(window=param3).max().iloc[-1]
return alpha
def alpha101_27(self, data, param1=10, param2=2, param3=-1,
dependencies=['vwap', 'turnover_vol'], max_window=12):
# (0.5 < rank((sum(correlation(rank(VOLUME), rank(VWAP), 6), 2) / 2.0))) ? (-1 * 1) : 1
rank_vol = data['turnover_vol'].rank(axis=1, pct=True)
rank_vwap = data['vwap'].rank(axis=1, pct=True)
# # original factor calc method
# corr_df = rolling_corr(rank_vol, rank_vwap, win=10)
# corr_mean = corr_df.rolling(window=2).mean()
# alpha = corr_mean.rank(axis=1, pct=True).iloc[-1]
# alpha = -1.0 * np.sign((alpha - 0.5))
# adjusted factor calc method
# sum(correlation(rank(VOLUME), rank(VWAP), 6), 2) / 2.0
corr_df = rolling_corr(rank_vol, rank_vwap, win=param1)
corr_mean = corr_df.rolling(window=param2).mean()
alpha = float(param3)* corr_mean.iloc[-1]
return alpha
def alpha101_29(self, data, param1=5, param2=4, param3=3,param4=-1,param5=6,param7=20,
dependencies=['close_price', 'returns'], max_window=30):
# # original formula
# min(product(rank(sum(ts_min(rank(-1 * rank(delta(CLOSE, 5))), 2), 1)), 1), 5) +
# ts_rank(delay((-1 * RETURNS), 6), 5)
# # adjusted formula
# min(product(rank(sum(ts_min(rank(-1 * rank(delta(CLOSE, 5))), 4), 3)), 3), 5) +
# ts_rank(delay((-1 * RETURNS), 6), 20)
df = (float(param4) * data['close_price'].diff(periods=param1).rank(axis=1, pct=True)).rank(axis=1, pct=True)
df = np.log(df.rolling(window=param3).min().rolling(window=param3).sum()).rank(axis=1, pct=True)
df = df.rolling(window=param3).apply(lambda x: np.prod(x)).rolling(window=param1).min()
delay_ret = (float(param4) * data['returns']).shift(periods=param5)
rank_win = param7
id_begin = data['returns'].index[-rank_win]
ts_rank_ret = delay_ret.loc[id_begin:].rank(axis=0, pct=True)
alpha = df.iloc[-1] + ts_rank_ret.iloc[-1]
return alpha
'''
def alpha101_32(self, data, param1=7, param2=40, param3=5, param4=20,
dependencies=['close_price', 'vwap'], max_window=50):
# # original formula
# scale((sum(CLOSE, 7) / 7) - CLOSE) + 20 * scale(correlation(VWAP, delay(CLOSE, 5), 230))
# # adjusted formula
# scale((sum(CLOSE, 7) / 7) - CLOSE) + 20 * scale(correlation(VWAP, delay(CLOSE, 5), 40))
close_se = (data['close_price'].rolling(window=param1).mean() - data['close_price']).iloc[-1]
scale_close_se = close_se / close_se.abs().sum()
corr_win = param2
id_begin = data['close_price'].index[-corr_win]
corr_se = data['close_price'].shift(periods=param3).loc[id_begin:].corrwith(data['vwap'].loc[id_begin:])
scale_corr_se = corr_se / corr_se.abs().sum()
alpha = scale_close_se + param4 * scale_corr_se
return alpha
'''
def alpha101_36(self, data, param1=15, param2=6, param3=10, param4=20, param5=50,
param6=2.21, param7=0.7, param8=0.73, param9=0.6, param10=-1,
dependencies=['close_price', 'open_price', 'close_price',
'vwap', 'turnover_vol', 'returns'], max_window=60):
# # original formula
# 2.21 * rank(correlation((CLOSE - OPEN), delay(VOLUME, 1), 15)) +
# 0.7 * rank(OPEN - CLOSE) + 0.73 * rank(ts_rank(delay((-1 * RETURNS), 6), 5)) +
# rank(abs(correlation(VWAP, ADV20, 6))) + 0.6 * rank((sum(CLOSE, 200) / 200 - OPEN) * (CLOSE - OPEN))
# rank(correlation((CLOSE - OPEN), delay(VOLUME, 1), 15))
corr_win = param1
id_begin = data['close_price'].index[-corr_win]
corr_se = data['turnover_vol'].shift(periods=1
).loc[id_begin:].corrwith((data['close_price'] - data['open_price']).loc[id_begin:])
part1 = corr_se.rank(pct=True)
# rank(OPEN - CLOSE)
part2 = (data['open_price'] - data['close_price']).iloc[-1].rank(pct=True)
# rank(ts_rank(delay((-1 * RETURNS), 6), 5))
ts_rank_win = param1 # change from orignal 5 to 15
id_begin = data['returns'].index[-ts_rank_win]
ts_rank_df = (float(param10) * data['returns']).shift(periods=param2).loc[id_begin:].rank(axis=0, pct=True)
part3 = ts_rank_df.iloc[-1].rank(pct=True)
# rank(abs(correlation(VWAP, ADV20, 6)))
corr_win = param3 # change from orignal 6 to 10
id_begin = data['vwap'].index[-corr_win]
adv20 = data['turnover_vol'].rolling(window=param4).mean()
corr_se = data['vwap'].loc[id_begin:].corrwith(adv20.loc[id_begin:])
part4 = corr_se.abs().rank(pct=True)
# rank((sum(CLOSE, 200) / 200 - OPEN) * (CLOSE - OPEN))
sum_win = param5 # change from orignal 200 to 50
sum_close = data['close_price'].rolling(window=sum_win).mean() - data['open_price']
close_open = data['close_price'] - data['open_price']
part5 = (sum_close * close_open).iloc[-1].rank(pct=True)
alpha = param6 * part1 + param7 * part2 + param8 * part3 + part4 + param9 * part5
return alpha
def alpha101_40(self, data, param1=10, param2=10, param3=-1,
dependencies=['highest_price', 'turnover_vol'], max_window=12):
# (-1 * rank(stddev(HIGH, 10))) * correlation(HIGH, VOLUME, 10)
part1 = float(param3) * data['highest_price'].rolling(window=param1).std().iloc[-1].rank(pct=True)
corr_win = param2
id_begin = data['highest_price'].index[-corr_win]
part2 = data['highest_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])
alpha = part1 * part2
return alpha
def alpha101_44(self, data, param1=5, param2=-1,
dependencies=['highest_price', 'turnover_value'], max_window=11):
# -1 * correlation(HIGH, rank(VOLUME), 5)
high_df = data['highest_price']
rank_volume_df = data['turnover_value'].rank(axis=1, pct=True)
corr_win = param1
id_begin = high_df.index[-corr_win]
alpha = high_df.loc[id_begin:].corrwith(rank_volume_df.loc[id_begin:])
return float(param2) * alpha
def alpha101_45(self, data, param1=5, param2=20, param3=6, param4=6, param5=5, param6=20, param7=-1,
dependencies=['close_price', 'turnover_vol'], max_window=30):
# -1 * rank(sum(delay(CLOSE, 5), 20) / 20) * correlation(CLOSE, VOLUME, 2) * rank(correlation(sum(CLOSE, 5), sum(CLOSE, 20), 2))
# rank(sum(delay(CLOSE, 5), 20) / 20)
part1 = data['close_price'].shift(periods=param1).rolling(window=param2).mean().iloc[-1].rank(pct=True)
# correlation(CLOSE, VOLUME, 2)
corr_win = param3 # change from orignal 2 to 6
id_begin = data['close_price'].index[-corr_win]
part2 = data['close_price'].loc[id_begin:].corrwith(data['turnover_vol'].loc[id_begin:])
# rank(correlation(sum(CLOSE, 5), sum(CLOSE, 20), 2))
corr_win = param4 # change from orignal 2 to 6
id_begin = data['close_price'].index[-corr_win]
close_sum5 = data['close_price'].rolling(window=param5).sum()
close_sum20 = data['close_price'].rolling(window=param6).sum()
part3 = (close_sum5.loc[id_begin:].corrwith(close_sum20.loc[id_begin:])).rank(pct=True)
alpha = float(param7) * part1 * part2 * part3
return alpha
def alpha101_50(self, data, param1=5, param2=5, param3=-1,
dependencies=['vwap', 'turnover_vol'], max_window=10):
# -1 * ts_max(rank(correlation(rank(VOLUME), rank(VWAP), 5)), 5)
rank_vwap_df = data['vwap'].rank(axis=1, pct=True)
rank_volume_df = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param1
corr_df = rolling_corr(rank_vwap_df, rank_volume_df, win=corr_win)
ts_max_win = param2
id_begin = corr_df.index[-ts_max_win]
alpha = corr_df.loc[id_begin:].rank(axis=1, pct=True).max()
return float(param3) * alpha
def alpha101_52(self, data, param1=8, param2=8, param3=80, param4=8, param5=8,
dependencies=['lowest_price', 'returns', 'turnover_vol'], max_window=10):
# (-ts_min(LOW, 5) + delay(ts_min(LOW, 5), 5)) *
# rank(((sum(RETURNS, 240) - sum(RETURNS, 20)) / 220)) * ts_rank(VOLUME, 5)
# (-ts_min(LOW, 5) + delay(ts_min(LOW, 5), 5))
ts_max_win = param1
id_begin = data['lowest_price'].index[-ts_max_win]
part1 = data['lowest_price'].shift(periods=param2
).loc[id_begin:].min() - data['lowest_price'].loc[id_begin:].min()
# rank(((sum(RETURNS, 240) - sum(RETURNS, 20)) / 220))
long_win, short_win = param3, param4 # change from original 240,20 to 80,8
ret_se = data['returns'].iloc[-long_win:].sum() - data['returns'].iloc[-short_win:].sum()
part2 = (ret_se / (1.0 * (long_win - short_win))).rank(pct=True)
# ts_rank(VOLUME, 5)
ts_rank_win = param5
part3 = data['turnover_vol'].iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
alpha = part1 * part2 * part3
return alpha
def alpha101_53(self, data, param1=2, param2=9, param3=0.001,
dependencies=['close_price', 'lowest_price', 'highest_price'], max_window=12):
# -1 * delta((((CLOSE - LOW) - (HIGH - CLOSE)) / (CLOSE - LOW)), 9)
# rearranged formula
# -1 * delta(((2 * CLOSE - LOW - HIGH) / (CLOSE - LOW)), 9)
price_df = (data['close_price'] * float(param1) - data['lowest_price'] - data['highest_price']) / (
(data['close_price'] - data['lowest_price']) + param3)
alpha = price_df.diff(periods=param2).iloc[-1]
return alpha
def alpha101_54(self, data, param1=0.001, param2=5, param3=4, param5=-1,
dependencies=['close_price', 'lowest_price', 'highest_price', 'open_price'], max_window=5):
# (-1 * ((LOW - CLOSE) * (OPEN^5))) / ((LOW - HIGH) * (CLOSE^5))
numerator = (data['lowest_price'] - data['close_price'] + param1) * (data['open_price'] ** param2)
denominator = (data['lowest_price'] - data['highest_price'] + param1) * (data['close_price'] ** param2)
# use mean average factor of ma_win bars
ma_win = param3
alpha = (float(param5)* numerator / denominator).iloc[-ma_win:].mean()
alpha[alpha==float(param5)] = np.NaN
return alpha
def alpha101_55(self, data, param1=12, param2=12, param3=6, param4=-1,
dependencies=['close_price','lowest_price', 'highest_price',
'turnover_value'], max_window=20):
# -1 * correlation(rank((CLOSE - ts_min(LOW, 12)) / (ts_max(HIGH, 12) - ts_min(LOW, 12))), rank(VOLUME), 6)
# CLOSE - ts_min(LOW, 12)) / (ts_max(HIGH, 12) - ts_min(LOW, 12)): 此项价格相除无量纲,所以
# 用复权价格计算; 后续的volume使用value代替
ts_min_low = data['lowest_price'].rolling(window=param1).min()
ts_max_high = data['highest_price'].rolling(window=param2).max()
price_df = (data['close_price'] - ts_min_low) / (ts_max_high - ts_min_low)
rank_price = price_df.rank(axis=1, pct=True)
rank_volume = data['turnover_value'].rank(axis=1, pct=True)
corr_win = param3
corr_df = rolling_corr(rank_price, rank_volume, win=corr_win)
return float(param4) * corr_df.iloc[-1]
def alpha101_57(self, data, param1=2, param2=30, param3=4, param4=-1,
dependencies=['close_price', 'vwap'], max_window=35):
# -1 * (CLOSE - VWAP) / decay_linear(rank(ts_argmax(CLOSE, 30)), 2)
# (CLOSE - VWAP)
ma_win = param1
numerator = (data['close_price'] - data['vwap']).iloc[-ma_win:].mean()
# decay_linear(rank(ts_argmax(CLOSE, 30)), 2)
rank_df = data['close_price'].rolling(window=param2).apply(lambda x: x.argmax()).rank(axis=1, pct=True)
denominator = decay_linear(rank_df, win=param3) # change win from original 2 to 4
alpha = (float(param4) * numerator / denominator)
return alpha
def alpha101_58(self, data, param1=9, param2=8, param3=7, param4=-1,
dependencies=['vwap', 'turnover_vol','indu'], max_window=25):
# -1 * ts_rank(decay_linear(correlation(indneutralize(VWAP, indclass), VOLUME, 3.92795), 7.89291), 5.50322)
# indneutralize(VWAP, indclass)
neu_df = df_indneutralize(data['vwap'], data['indu'])
# # original formula
# corr_win, decay_win, ts_rank_win = 9, 8, 7
# decay_df = rolling_decay(rolling_corr(neu_df, data['volume_raw'], win=corr_win), win=decay_win)
# ts_rank_se = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
# alpha = -1.0 * ts_rank_se
# adjusted formula --- use a new method instead of ts rank.
corr_win, decay_win, ts_mean_win = param1, param2, param3
decay_df = rolling_decay(rolling_corr(neu_df, data['turnover_vol'], win=corr_win), win=decay_win)
data_se = decay_df.iloc[-1] - decay_df.iloc[-ts_mean_win:].mean(axis=0)
alpha = float(param4)* data_se
return alpha
def alpha101_59(self, data, param1=0.7, param2=0.3, param3=9, param4=12, param5=10, param6=-1,
dependencies=['vwap', 'close_price', 'turnover_vol','indu'], max_window=30):
# -1 * ts_rank(decay_linear(correlation(indneutralize(((VWAP * 0.728317) + (VWAP * (1 - 0.728317))),
# indclass), VOLUME, 4.25197), 16.2289), 8.19648)
# Caution: the original formula is almost same with alpha101_58 (
# ((VWAP * 0.728317) + (VWAP * (1 - 0.728317))) == VWAP), so we take an adjusted formula here.
# adjusted formula
# -1 * ts_rank(decay_linear(correlation(indneutralize(((VWAP * 0.728317) + (CLOSE * (1 - 0.728317))),
# indclass), VOLUME, 4.25197), 16.2289), 8.19648)
# indneutralize(VWAP, indclass)
neu_df = df_indneutralize(data['vwap'] * param1 + data['close_price'] * param2, data['indu'])
# # original formula
# corr_win, decay_win, ts_rank_win = 9, 12, 10
# decay_df = rolling_decay(rolling_corr(neu_df, data['volume_raw'], win=corr_win), win=decay_win)
# ts_rank_se = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
# alpha = -1.0 * ts_rank_se
# adjusted formula --- use a new method instead of ts rank.
corr_win, decay_win, ts_mean_win = param3, param4, param5
decay_df = rolling_decay(rolling_corr(neu_df, data['turnover_vol'], win=corr_win), win=decay_win)
data_se = decay_df.iloc[-1] - decay_df.iloc[-ts_mean_win:].mean(axis=0)
alpha = float(param6) * data_se
return alpha
def alpha101_62(self, data, param1=20, param2=10, param3=10, param4=2, param5=-1,
dependencies=['turnover_vol', 'vwap', 'open_price', 'highest_price',
'lowest_price'], max_window=40):
# -1.0 * (rank(correlation(VWAP, sum(ADV20, 22.4101), 9.91009)) <
# rank(((rank(OPEN) * 2) < (rank(((HIGH + LOW) / 2)) + rank(HIGH)))))
# adjusted formula: between two parts, use - instead of <; between two parts
# in the second condition, use - instead of < too;
# -1.0 * (rank(correlation(VWAP, sum(ADV20, 22.4101), 9.91009)) -
# rank(((rank(OPEN) * 2) - (rank(((HIGH + LOW) / 2)) + rank(HIGH)))))
# rank(correlation(VWAP, sum(ADV20, 22.4101), 9.91009))
adv_win, sum_adv_win, corr_win = param1, param2, param3
sum_adv = data['turnover_vol'].rolling(window=adv_win).mean().rolling(window=sum_adv_win).mean()
part1 = data['vwap'].iloc[-corr_win:].corrwith(sum_adv.iloc[-corr_win:]).rank(pct=True)
# rank(((rank(OPEN) * 2) - (rank(((HIGH + LOW) / 2)) + rank(HIGH))))
rank_open = data['open_price'].rank(axis=1, pct=True)
rank_high_low = ((data['highest_price'] + data['lowest_price']) / float(param4)).rank(axis=1, pct=True)
rank_high = data['highest_price'].rank(axis=1, pct=True)
part2 = (rank_open - rank_high_low - rank_high).rank(axis=1, pct=True).iloc[-1]
alpha = float(param5) * (part1 - part2)
return alpha
def alpha101_66(self, data, param1=0.001, param2=2, param3=12, param4=7, param5=-1,
dependencies=['vwap', 'lowest_price', 'open_price', 'highest_price'], max_window=20):
# -1 * (rank(decay_linear(delta(VWAP, 3.51013), 7.23052)) +
# ts_rank(decay_linear((((LOW * 0.96633) + (LOW * (1 - 0.96633))) - VWAP) / (OPEN - ((HIGH + LOW) / 2)), 11.4157), 6.72611))
# rank(decay_linear(delta(VWAP, 3.51013), 7.23052))
part1 = decay_linear(data['vwap'].diff(periods=4), win=8).rank(pct=True)
# ts_rank(decay_linear((((LOW * 0.96633) + (LOW * (1 - 0.96633))) - VWAP) / (OPEN - ((HIGH + LOW) / 2)), 11.4157), 6.72611)
# rearranged
# ts_rank(decay_linear((LOW - VWAP) / (OPEN - ((HIGH + LOW) / 2)), 11.4157), 6.72611)
price_df = (data['lowest_price'] - data['lowest_price'] + param1
) / (data['open_price'] - (data['highest_price'] + data['lowest_price']) / float(param2) + param1)
price_df = (data['lowest_price'] - data['vwap']) / (
data['open_price'] - (data['highest_price'] + data['lowest_price']) / float(param2))
decay_win, ts_win = param3, param4
part2 = rolling_decay(price_df, win=decay_win).iloc[-ts_win:].rank(axis=0, pct=True).iloc[-1]
alpha = float(param5) * (part1 + part2)
return alpha
def alpha101_67(self, data, param1=20, param2=10, param3=5, param4=8, param5=-1,
dependencies=['highest_price', 'vwap', 'turnover_vol','indu'], max_window=30):
# -1.0 * rank(HIGH - ts_min(HIGH, 2.14593))^
# rank(correlation(indneutralize(VWAP, indclass), indneutralize(ADV20, indclass), 6.02936))
# rank(HIGH - ts_min(HIGH, 2.14593))
# use adjusted formula: mean(rank(HIGH - ts_min(HIGH, 10)), 10)
high_df = data['highest_price'] - data['highest_price'].rolling(window=param1).min()
part1 = high_df.rank(axis=1, pct=True).iloc[-param2:].mean()
# rank(correlation(indneutralize(VWAP, indclass), indneutralize(ADV20, indclass), 6.02936))
neu_vwap = df_indneutralize(data['vwap'], data['indu'])
neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=param3).mean(), data['indu'])
corr_win = param4
part2 = neu_vwap.iloc[-corr_win:].corrwith(neu_adv.iloc[-corr_win:]).rank(pct=True)
alpha = float(param5) * part1 ** part2
return alpha
def alpha101_69(self, data, param1=3, param2=5, param3=8, param4=-1,
dependencies=['vwap', 'turnover_vol','indu'], max_window=15):
# -1 * rank(ts_max(delta(indneutralize(VWAP, indclass), 2.72412), 4.79344))^
# ts_rank(correlation(((CLOSE * 0.490655) + (VWAP * (1 - 0.490655))), ADV20, 4.92416), 9.0615)
neu_vwap = df_indneutralize(data['vwap'], data['indu'])
neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=5).mean(), data['indu'])
# rank(ts_max(delta(indneutralize(VWAP, indclass), 2.72412), 4.79344))
diff_win, ts_max_win = param1, param2
ts_max_df = neu_vwap.diff(periods=diff_win).rolling(window=ts_max_win).max()
part1 = ts_max_df.iloc[-1].rank(pct=True)
# rank(correlation(indneutralize(VWAP, indclass), indneutralize(ADV20, indclass), 6.02936))
corr_win = param3
part2 = neu_vwap.iloc[-corr_win:].corrwith(neu_adv.iloc[-corr_win:]).rank(pct=True)
alpha = float(param4) * (part1 ** part2)
return alpha
def alpha101_72(self, data, param1=5, param2=1.0e6, param3=9, param4=10, param5=2,
param6=8, param7=20, param8=7,param9=3,
dependencies=['turnover_vol', 'lowest_price', 'highest_price', 'vwap'], max_window=30):
# rank(decay_linear(correlation(((HIGH + LOW) / 2), ADV40, 8.93345), 10.1519)) /
# rank(decay_linear(correlation(ts_rank(VWAP, 3.72469), ts_rank(VOLUME, 18.5188), 6.86671), 2.95011))
# rank(decay_linear(correlation(((HIGH + LOW) / 2), ADV40, 8.93345), 10.1519))
ma_vol_win = param1
avg_vol = data['turnover_vol'].rolling(window=ma_vol_win).mean() / param2
corr_win, decay_win = param3, param4
part1 = decay_linear(rolling_corr((data['highest_price'] + data['lowest_price'])/float(param5),
avg_vol, win=corr_win), win=decay_win).rank(pct=True)
# rank(decay_linear(correlation(ts_rank(VWAP, 3.72469), ts_rank(VOLUME, 18.5188), 6.86671), 2.95011))
ts_rank_vwap = rolling_rank(data['vwap'], win=param6)
ts_rank_vol = rolling_rank(data['turnover_vol'], win=param7)
corr_win, decay_win = param8, param9
part2 = decay_linear(rolling_corr(ts_rank_vwap, ts_rank_vol, win=corr_win), win=decay_win).rank(pct=True)
alpha = part1 / part2
return alpha
def alpha101_73(self, data, param1=5, param2=3, param3=0.147155, param4=0.147155,
param5=2, param6=4, param7=17,param8=-1,param9=-1,
dependencies=['vwap', 'lowest_price', 'open_price'], max_window=25):
# -1 * max(rank(decay_linear(delta(VWAP, 4.72775), 2.91864)),
# ts_rank(decay_linear((delta((OPEN * 0.147155 + LOW * (1 - 0.147155)), 2.03608) /
# (OPEN * 0.147155 + LOW * (1 - 0.147155))) * -1, 3.33829), 16.7411))
# rank(decay_linear(delta(VWAP, 4.72775), 2.91864))
diff_win, decay_win = param1, param2
part1 = decay_linear(data['vwap'].diff(periods=diff_win), win=decay_win).rank(pct=True)
# (OPEN * 0.147155 + LOW * (1 - 0.147155))
price = data['open_price'] * param3 + data['lowest_price'] * (1 - param4)
# ts_rank(decay_linear((delta((OPEN * 0.147155 + LOW * (1 - 0.147155)), 2.03608) /
# (OPEN * 0.147155 + LOW * (1 - 0.147155))) * -1, 3.33829), 16.7411)
diff_win, decay_win, ts_rank_win = param5, param6, param7
decay_df = rolling_decay(float(param8) * price.diff(periods=diff_win) / price, win=decay_win)
part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
alpha = (param9) * pd.DataFrame({'part1': part1, 'part2': part2}).max(axis=1)
return alpha
def alpha101_74(self, data, param1=10, param2=16, param3=15, param4=0.0261661, param5=12, param6=-1,
dependencies=['turnover_vol', 'close_price', 'highest_price', 'vwap'], max_window=40):
# -1 * (rank(correlation(CLOSE, sum(ADV30, 37.4843), 15.1365)) <
# rank(correlation(rank(((HIGH * 0.0261661) + (VWAP * (1 - 0.0261661)))), rank(VOLUME), 11.4791)))
# rearranged formula: between two parts, use - instead of <
# -1 * (rank(correlation(CLOSE, sum(ADV30, 37.4843), 15.1365)) -
# rank(correlation(rank(((HIGH * 0.0261661) + (VWAP * (1 - 0.0261661)))), rank(VOLUME), 11.4791)))
# rank(correlation(CLOSE, sum(ADV30, 37.4843), 15.1365))
mean_win, sum_win = param1, param2 # change from 30, 37.48 to 10, 16
adv_sum = data['turnover_vol'].rolling(window=mean_win).mean().rolling(window=sum_win).sum()
corr_win = param3 # change from orignal 15.13 to 15
part1 = data['close_price'].iloc[-corr_win:].corrwith(adv_sum.iloc[-corr_win:]).rank(pct=True)
# rank(correlation(rank(HIGH * 0.0261661 + VWAP * (1 - 0.0261661)), rank(VOLUME), 11.4791))
rank_price = (data['highest_price'] * param4 + data['vwap'] * (1 - param4)).rank(axis=1, pct=True)
rank_vol = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param5 # change from orignal 11.4791 to 12
part2 = rank_price.iloc[-corr_win:].corrwith(rank_vol.iloc[-corr_win:]).rank(pct=True)
alpha = float(param6) * (part1 - part2)
return alpha
def alpha101_75(self, data, param1=8, param2=12, param3=12,
dependencies=['vwap', 'turnover_vol', 'lowest_price', 'turnover_vol'], max_window=30):
# rank(correlation(VWAP, VOLUME, 4.24304)) < rank(correlation(rank(LOW), rank(ADV50), 12.4413))
# rearranged formula: between two parts, use - instead of <
# rank(correlation(VWAP, VOLUME, 4.24304)) - rank(correlation(rank(LOW), rank(ADV50), 12.4413))
# rank(correlation(VWAP, VOLUME, 4.24304))
corr_win = param1 # change from orignal 4.243 to 8
part1 = data['vwap'].iloc[-corr_win:].corrwith(data['turnover_vol'].iloc[-corr_win:]).rank(pct=True)
# rank(correlation(rank(LOW), rank(ADV50), 12.4413))
mean_win = param2 # change from orignal 50 to 12
rank_price = data['lowest_price'].rank(axis=1, pct=True)
rank_adv = data['turnover_vol'].rolling(window=mean_win).mean().rank(axis=1, pct=True)
corr_win = param3 # change from orignal 12.4413 to 12
part2 = rank_price.iloc[-corr_win:].corrwith(rank_adv.iloc[-corr_win:]).rank(pct=True)
alpha = part1 - part2
return alpha
def alpha101_76(self, data, param1=5, param2=1, param3=5, param4=8, param5=20, param6=5,
param7=20, param8=-1, param9=0.5,
dependencies=['close_price', 'vwap', 'lowest_price', 'turnover_vol','indu'],
max_window=50):
# -1 * max(rank(decay_linear(delta(VWAP, 1.24383), 11.8259)),
# ts_rank(decay_linear(ts_rank(correlation(indneutralize(LOW, indclass), ADV81, 8.14941), 19.569), 17.1543), 19.383))
neu_low = df_indneutralize(data['lowest_price'], data['indu'])
adv = data['turnover_vol'].rolling(window=param1).mean()
# rank(decay_linear(delta(VWAP, 1.24383), 11.8259))
diff_win, decay_win = param2, param3
decay_df = rolling_decay(data['vwap'].diff(periods=diff_win), win=decay_win)
part1 = decay_df.iloc[-1].rank(pct=True)
# ts_rank(decay_linear(ts_rank(correlation(indneutralize(LOW, indclass), ADV81, 8.14941), 19.569), 17.1543), 19.383)
corr_win, ts_rank_win1, decay_win, ts_rank_win2 = param4, param5, param6, param7
corr_df = rolling_corr(neu_low, adv, win=corr_win)
decay_df = rolling_decay(rolling_rank(corr_df, win=ts_rank_win1), win=decay_win)
part2 = decay_df.iloc[-ts_rank_win2:].rank(axis=0, pct=True).iloc[-1]
res_df = pd.DataFrame({'part1': part1, 'part2': part2})
# alpha = -1.0 * res_df.max(axis=1)
# # use adjusted formula
alpha = float(param8) * (res_df.max(axis=1) - param9 * res_df.min(axis=1))
return alpha
def alpha101_80(self, data, param1=0.85, param2=0.15, param3=5, param4=4, param5=5,
param6=6, param7=6, param8=-1,
dependencies=['open_price', 'highest_price', 'turnover_vol', 'highest_price','indu'], max_window=20):
# -1 * (rank(sign(delta(indneutralize(((OPEN * 0.868128) + (HIGH * (1 - 0.868128))), indclass), 4.04545)))^
# ts_rank(correlation(HIGH, ADV10, 5.11456), 5.53756))
neu_price = df_indneutralize(data['open_price'] * param1 + data['highest_price'] * param2, data['indu'])
adv = data['turnover_vol'].rolling(window=param3).mean()
# rank(sign(delta(indneutralize(((OPEN * 0.868128) + (HIGH * (1 - 0.868128))), indclass), 4.04545)))
# use decay_linear instead of sign in part1 formula
# rank(decay_linear(delta(indneutralize(((OPEN * 0.868128) + (HIGH * (1 - 0.868128))), indclass), 4.04545), 5))
diff_win, decay_win = param4, param5
part1 = decay_linear(neu_price.diff(periods=diff_win), win=decay_win).rank(pct=True)
# ts_rank(correlation(HIGH, ADV10, 5.11456), 5.53756)
corr_win, ts_rank_win = param6, param7
corr_df = rolling_corr(data['highest_price'], adv, win=corr_win)
part2 = corr_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
alpha = float(param8) * part1 ** part2
return alpha
def alpha101_81(self, data, param1=10, param2=10, param3=8, param4=10, param5=4,
param6=8, param7=-1,
dependencies=['vwap', 'turnover_vol', 'vwap'], max_window=32):
# -1 * (rank(LOG(product(rank((rank(correlation(VWAP, sum(ADV10, 49.6054), 8.47743))^4)), 14.9655))) <
# rank(correlation(rank(VWAP), rank(VOLUME), 5.07914)))
# rearranged formula: between two parts, use - instead of <
# -1 * (rank(LOG(product(rank((rank(correlation(VWAP, sum(ADV10, 49.6054), 8.47743))^4)), 14.9655))) -
# rank(correlation(rank(VWAP), rank(VOLUME), 5.07914)))
# rank(LOG(product(rank((rank(correlation(VWAP, sum(ADV10, 49.6054), 8.47743))^4)), 14.9655)))
mean_win, sum_win = param1, param2 # change from 10, 49.6054 to 10, 10
adv_sum = data['turnover_vol'].rolling(window=mean_win).mean().rolling(window=sum_win).sum()
corr_win, prod_win = param3, param4 # change from orignal 8.47743, 14.9655 to 8, 10
corr_df = rolling_corr(data['vwap'], adv_sum, corr_win)
prod_se = ((corr_df.rank(axis=1, pct=True)) ** param5).rank(axis=1, pct=True).iloc[-prod_win:].cumprod().iloc[-1]
part1 = np.log(prod_se).rank(pct=True)
# rank(correlation(rank(VWAP), rank(VOLUME), 5.07914))
rank_price = data['vwap'].rank(axis=1, pct=True)
rank_vol = data['turnover_vol'].rank(axis=1, pct=True)
corr_win = param6 # change from orignal 5.07914 to 8
part2 = rank_price.iloc[-corr_win:].corrwith(rank_vol.iloc[-corr_win:]).rank(pct=True)
alpha = float(param7) * (part1 - part2)
return alpha
def alpha101_82(self, data, param1=1, param2=10, param3=16, param4=6, param5=14, param6=-1, param7=0.5,
dependencies=['open_price', 'turnover_vol','indu'], max_window=40):
# -1 * min(rank(decay_linear(delta(OPEN, 1.46063), 14.8717)),
# ts_rank(decay_linear(correlation(indneutralize(VOLUME, indclass), ((OPEN * 0.634196) + (OPEN * (1 - 0.634196))), 17.4842), 6.92131), 13.4283))
# rearranged formula
# -1 * min(rank(decay_linear(delta(OPEN, 1.46063), 14.8717)),
# ts_rank(decay_linear(correlation(indneutralize(VOLUME, indclass), OPEN, 17.4842), 6.92131), 13.4283))
# rank(decay_linear(delta(OPEN, 1.46063), 14.8717))
diff_win, decay_win = param1, param2
part1 = decay_linear(data['open_price'].diff(periods=diff_win), win=decay_win).rank(pct=True)
# ts_rank(decay_linear(correlation(indneutralize(VOLUME, indclass), OPEN, 17.4842), 6.92131), 13.4283)
neu_vol = df_indneutralize(data['turnover_vol'], data['indu'])
corr_win, decay_win, ts_rank_win = param3, param4, param5
decay_df = rolling_decay(rolling_corr(neu_vol, data['open_price'], win=corr_win), win=decay_win)
part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
alpha101_df = pd.DataFrame({'part1': part1, 'part2': part2})
# # original alpha formula
# alpha = -1.0 * alpha101_df.min(axis=1)
# adjusted alpha formula
alpha = float(param6) * (alpha101_df.min(axis=1) - float(param7) * alpha101_df.max(axis=1))
return alpha
def alpha101_83(self, data, param1=10, param2=2,
dependencies=['highest_price', 'lowest_price',
'close_price', 'turnover_vol',
'vwap'], max_window=20):
# (rank(delay(((HIGH - LOW) / (sum(CLOSE, 5) / 5)), 2)) * rank(VOLUME)) /
# (((HIGH - LOW) / (sum(CLOSE, 5) / 5)) / (VWAP - CLOSE))
# rearranged formula
# rank(delay(((HIGH - LOW) / (sum(CLOSE, 5) / 5)), 2)) * rank(VOLUME) * (VWAP - CLOSE) /
# ((HIGH - LOW) / (sum(CLOSE, 5) / 5))
# rank(delay(((HIGH - LOW) / (sum(CLOSE, 5) / 5)), 2))
mean_win, delay_win = param1, param2
price_df = ((data['highest_price'] - data['lowest_price']) / data['close_price'].rolling(window=mean_win).mean())
part1 = price_df.diff(periods=delay_win).iloc[-1].rank(pct=True)
# rank(VOLUME) * (VWAP - CLOSE)
part2 = (data['turnover_vol'].rank(axis=1, pct=True) * (data['vwap'] - data['close_price'])).iloc[-1]
# ((HIGH - LOW) / (sum(CLOSE, 5) / 5))
part3 = price_df.iloc[-1]
alpha = part1 * part2 / part3
return alpha
def alpha101_84(self, data, param1=15, param2=20, param3=6,
dependencies=['vwap', 'close_price'], max_window=40):
# signedpower(ts_rank((VWAP - ts_max(VWAP, 15.3217)), 20.7127), delta(CLOSE, 4.96796))
# ts_rank((VWAP - ts_max(VWAP, 15.3217)), 20.7127)
max_win, rank_win = param1, param2
price_df = data['vwap'] - data['vwap'].rolling(window=max_win).max()
part1 = price_df.iloc[-rank_win:].rank(axis=0, pct=True).iloc[-1]
# delta(CLOSE, 4.96796)
diff_win = param3
part2 = data['close_price'].diff(periods=diff_win).iloc[-1]
part2 = data['close_price'].diff(periods=diff_win).iloc[-1].rank(pct=True)
alpha = np.sign(part1) * (part1.abs() ** part2)
return alpha
def alpha101_87(self, data, param1=2, param2=3, param3=0.37, param4=0.63, param5=12, param6=5,
param7=14,
dependencies=['close_price', 'vwap', 'turnover_vol','indu'], max_window=30):
# -1 * max(rank(decay_linear(delta(((CLOSE * 0.369701) + (VWAP * (1 - 0.369701))), 1.91233), 2.65461)),
# ts_rank(decay_linear(abs(correlation(indneutralize(ADV81, indclass), CLOSE, 13.4132)), 4.89768), 14.4535))
# rank(decay_linear(delta(((CLOSE * 0.369701) + (VWAP * (1 - 0.369701))), 1.91233), 2.65461))
diff_win, decay_win = param1, param2
price_df = data['close_price'] * param3 + data['vwap'] * param4
part1 = decay_linear(price_df.diff(periods=diff_win), win=decay_win).rank(pct=True)
# ts_rank(decay_linear(abs(correlation(indneutralize(ADV81, indclass), CLOSE, 13.4132)), 4.89768), 14.4535)
neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=8).mean(), data['indu'])
corr_win, decay_win, ts_rank_win = param5, param6, param6
decay_df = rolling_decay(rolling_corr(neu_adv, data['close_price'], win=corr_win).abs(), win=decay_win)
part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
alpha101_df = pd.DataFrame({'part1': part1, 'part2': part2})
# # original alpha formula
# alpha = -1.0 * alpha101_df.max(axis=1)
# adjusted alpha formula
alpha = -1.0 * (alpha101_df.max(axis=1) - 0.5 * alpha101_df.min(axis=1))
return alpha
def alpha101_88(self, data, param1=8, param2=20, param3=9, param4=20, param5=9, param6=6, param7=20,
dependencies=['open_price', 'highest_price',
'lowest_price', 'close_price',
'turnover_vol'], max_window=50):
# min(rank(decay_linear(((rank(OPEN) + rank(LOW)) - (rank(HIGH) + rank(CLOSE))), 8.06882)),
# ts_rank(decay_linear(correlation(ts_rank(CLOSE, 8.44728), ts_rank(ADV60, 20.6966), 8.01266), 6.65053), 2.61957))
# rank(decay_linear(((rank(OPEN) + rank(LOW)) - (rank(HIGH) + rank(CLOSE))), 8.06882))
decay_win = param1
open_low = data['open_price'].rank(axis=1, pct=True) + data['lowest_price'].rank(axis=1, pct=True)
high_close = data['highest_price'].rank(axis=1, pct=True) + data['close_price'].rank(axis=1, pct=True)
part1 = decay_linear(open_low - high_close, win=decay_win).rank(pct=True)
# ts_rank(decay_linear(correlation(ts_rank(CLOSE, 8.44728), ts_rank(ADV60, 20.6966), 8.01266), 6.65053), 2.61957)
adv_win, ts_close_win, ts_adv_win = param2, param3, param4
adv_df = data['turnover_vol'].rolling(window=adv_win).mean()
rank_close = rolling_rank(data['close_price'], win=ts_close_win)
rank_adv = rolling_rank(adv_df, win=ts_adv_win)
corr_win, decay_win, ts_rank_win = param5, param6, param7 # change from original 8.01266, 6.65053, 2.61957 to 9, 6, 10
decay_df = rolling_decay(rolling_corr(rank_close, rank_adv, win=corr_win), win=decay_win)
part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
# original
alpha = pd.DataFrame({'part1': part1, 'part2': part2}).min(axis=1)
# # adjusted formula
# alpha = pd.DataFrame({'part1': part1, 'part2': part2}).mean(axis=1)
return alpha
def alpha101_90(self, data, param1=5, param2=4, param3=8, param4=8, param5=6, param6=-1,
dependencies=['close_price', 'lowest_price', 'turnover_vol','indu'], max_window=20):
# -1 *(rank((CLOSE - ts_max(CLOSE, 4.66719)))^
# ts_rank(correlation(indneutralize(ADV40, indclass), LOW, 5.38375), 3.21856))
# rank((CLOSE - ts_max(CLOSE, 4.66719)))
# add a decay linear
close_df = data['close_price'] - data['close_price'].rolling(window=param1).max()
part1 = decay_linear(close_df.rank(axis=1, pct=True), win=param2).rank(pct=True)
# ts_rank(correlation(indneutralize(ADV40, indclass), LOW, 5.38375), 3.21856)
neu_adv = df_indneutralize(data['turnover_vol'].rolling(window=param3).mean(), data['indu'])
corr_win, ts_rank_win = param4, param5
corr_df = rolling_corr(neu_adv, data['lowest_price'], win=corr_win)
part2 = corr_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
alpha = float(param6) * part1 ** part2
return alpha
def alpha101_91(self, data, param1=5, param2=10, param3=10, param4=3, param5=10,
param6=8, param7=3, param8=-1,
dependencies=['close_price', 'turnover_vol',
'vwap','indu'], max_window=32):
# -1 * (ts_rank(decay_linear(decay_linear(correlation(indneutralize(CLOSE, indclass), VOLUME, 9.74928), 16.398), 3.83219), 4.8667) -
# rank(decay_linear(correlation(VWAP, ADV30, 4.01303), 2.6809)))
neu_close = df_indneutralize(data['close_price'], data['indu'])
adv = data['turnover_vol'].rolling(window=param1).mean()
# ts_rank(decay_linear(decay_linear(correlation(indneutralize(CLOSE, indclass), VOLUME, 9.74928), 16.398), 3.83219), 4.8667)
corr_win, decay_win1, decay_win2, ts_rank_win = param2, param3, param4, param5
decay_df = rolling_decay(rolling_decay(rolling_corr(neu_close, data['turnover_vol'],
win=corr_win), win=decay_win1), win=decay_win2)
part1 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
# rank(decay_linear(correlation(VWAP, ADV30, 4.01303), 2.6809))
corr_win, decay_win = param6, param7
part2 = decay_linear(rolling_corr(data['vwap'], adv, win=corr_win), win=decay_win)
alpha = float(param8) * (part1 - part2)
return alpha
def alpha101_96(self, data, param1=6, param2=4, param3=14, param4=20, param5=8, param6=6,
param7=8, param8=12, param9=6, param10=13, param11=-1,
dependencies=['vwap', 'turnover_vol',
'close_price'], max_window=50):
# -1.0 * max(ts_rank(decay_linear(correlation(rank(VWAP), rank(VOLUME), 3.83878), 4.16783), 8.38151),
# ts_rank(decay_linear(ts_argmax(correlation(ts_rank(CLOSE, 7.45404), ts_rank(ADV60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143))
# ts_rank(decay_linear(correlation(rank(VWAP), rank(VOLUME), 3.83878), 4.16783), 8.38151)
rank_vwap = data['vwap'].rank(axis=1, pct=True)
rank_vol = data['turnover_vol'].rank(axis=1, pct=True)
corr_win, decay_win, ts_rank_win = param1, param2, param3
decay_df = rolling_decay(rolling_corr(rank_vwap, rank_vol, win=corr_win), win=decay_win)
part1 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
# ts_rank(decay_linear(ts_argmax(correlation(ts_rank(CLOSE, 7.45404), ts_rank(ADV60, 4.13242), 3.65459), 12.6556), 14.0365), 13.4143)
adv_win, ts_close_win, ts_adv_win = param4, param5, param6
adv_df = data['turnover_vol'].rolling(window=adv_win).mean()
rank_close = rolling_rank(data['close_price'], win=ts_close_win)
rank_adv = rolling_rank(adv_df, win=ts_adv_win)
# change from original 3.65459, 12.6556, 14.0365, 13.4143 to 8, 12, 6, 13
corr_win, ts_max_win, decay_win, ts_rank_win = param7, param8, param9, param10
corr_df = rolling_corr(rank_close, rank_adv, win=corr_win)
ts_argmax_df = corr_df.rolling(window=ts_max_win).apply(lambda x: x.argmax())
decay_df = rolling_decay(ts_argmax_df, win=decay_win)
part2 = decay_df.iloc[-ts_rank_win:].rank(axis=0, pct=True).iloc[-1]
# original formula
alpha = float(param11) * pd.DataFrame({'part1': part1, 'part2': part2}).max(axis=1)
# # adjusted formula
# alpha = -1 * pd.DataFrame({'part1': part1, 'part2': part2}).mean(axis=1)
return alpha
def alpha101_97(self, data, param1=4, param2=12, param3=0.7, param4=10, param5=17,
param6=8, param7=18, param8=5, param9=16, param10=-1,
dependencies=['lowest_price', 'vwap', 'turnover_vol', 'lowest_price','indu'], max_window=45):
# -(rank(decay_linear(delta(indneutralize(((LOW * 0.721001) + (VWAP * (1 - 0.721001))), indclass), 3.3705), 20.4523)) -
# ts_rank(decay_linear(ts_rank(correlation(ts_rank(LOW, 7.87871), ts_rank(ADV60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659))
# rank(decay_linear(delta(indneutralize(((LOW * 0.721001) + (VWAP * (1 - 0.721001))), indclass), 3.3705), 20.4523))
diff_win, decay_win = param1, param2
price_df = data['lowest_price'] * param3 + data['vwap'] * (1 - param3)
part1 = decay_linear(df_indneutralize(price_df, data['indu']).diff(periods=diff_win), win=decay_win).rank(pct=True)
# ts_rank(decay_linear(ts_rank(correlation(ts_rank(LOW, 7.87871), ts_rank(ADV60, 17.255), 4.97547), 18.5925), 15.7152), 6.71659)
ts_rank_low = rolling_rank(data['lowest_price'], win=param4)
ts_rank_adv = rolling_rank(data['turnover_vol'].rolling(window=param4).mean(), win=param5)
corr_win, ts_win1, decay_win, ts_win2 = param6, param7, param8, param9
decay_df = rolling_decay(rolling_rank(rolling_corr(ts_rank_low, ts_rank_adv, win=corr_win), win=ts_win1), win=decay_win)
part2 = decay_df.iloc[-ts_win2:].rank(axis=0, pct=True).iloc[-1]
alpha = float(param10) * (part1 - part2)
return alpha
def alpha101_99(self, data, param1=20, param2=16, param3=16, param4=9, param5=2, param6=7, param7=-1,
dependencies=['highest_price', 'lowest_price', 'turnover_vol'], max_window=50):
# -1 * (rank(correlation(sum(((HIGH + LOW) / 2), 19.8975), sum(ADV60, 19.8975), 8.8136)) <
# rank(correlation(LOW, VOLUME, 6.28259)))
# rearranged formula: between two parts, use - instead of <
# -1 * (rank(correlation(sum(((HIGH + LOW) / 2), 19.8975), sum(ADV60, 19.8975), 8.8136)) -
# rank(correlation(LOW, VOLUME, 6.28259)))
# rank(correlation(sum(((HIGH + LOW) / 2), 19.8975), sum(ADV60, 19.8975), 8.8136))
adv_win, sum_price_win, sum_adv_win, corr_win = param1, param2, param3, param4
sum_price = ((data['highest_price'] + data['lowest_price']) / float(param5)).rolling(window=sum_price_win).mean()
sum_adv = data['turnover_vol'].rolling(window=adv_win).mean().rolling(window=sum_adv_win).mean()
part1 = sum_price.iloc[-corr_win:].corrwith(sum_adv.iloc[-corr_win:]).rank(pct=True)
# rank(correlation(LOW, VOLUME, 6.28259))
corr_win = param6
part2 = data['lowest_price'].iloc[-corr_win:].corrwith(data['turnover_vol'].iloc[-corr_win:]).rank(pct=True)
alpha = float(param7) * (part1 - part2)
return alpha
| 52.078652
| 152
| 0.614422
|
795241b7895e6ede3731f43193a16154039353c7
| 5,834
|
py
|
Python
|
onlinecourse/views.py
|
acromafireair/final-onlinecourseapp
|
1c2a0e8304548b5e612d487545d7aeb757150d78
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/views.py
|
acromafireair/final-onlinecourseapp
|
1c2a0e8304548b5e612d487545d7aeb757150d78
|
[
"Apache-2.0"
] | null | null | null |
onlinecourse/views.py
|
acromafireair/final-onlinecourseapp
|
1c2a0e8304548b5e612d487545d7aeb757150d78
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponseRedirect
# <HINT> Import any new Models here
from .models import Course, Enrollment, Question, Choice, Submission
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views import generic
from django.contrib.auth import login, logout, authenticate
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def registration_request(request):
context = {}
if request.method == 'GET':
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
elif request.method == 'POST':
# Check if user exists
username = request.POST['username']
password = request.POST['psw']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
User.objects.get(username=username)
user_exist = True
except:
logger.error("New user")
if not user_exist:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
password=password)
login(request, user)
return redirect("onlinecourse:index")
else:
context['message'] = "User already exists."
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['psw']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('onlinecourse:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
else:
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
def logout_request(request):
logout(request)
return redirect('onlinecourse:index')
def check_if_enrolled(user, course):
is_enrolled = False
if user.id is not None:
# Check if user enrolled
num_results = Enrollment.objects.filter(user=user, course=course).count()
if num_results > 0:
is_enrolled = True
return is_enrolled
# CourseListView
class CourseListView(generic.ListView):
template_name = 'onlinecourse/course_list_bootstrap.html'
context_object_name = 'course_list'
def get_queryset(self):
user = self.request.user
courses = Course.objects.order_by('-total_enrollment')[:10]
for course in courses:
if user.is_authenticated:
course.is_enrolled = check_if_enrolled(user, course)
return courses
class CourseDetailView(generic.DetailView):
model = Course
template_name = 'onlinecourse/course_detail_bootstrap.html'
def enroll(request, course_id):
course = get_object_or_404(Course, pk=course_id)
user = request.user
is_enrolled = check_if_enrolled(user, course)
if not is_enrolled and user.is_authenticated:
# Create an enrollment
Enrollment.objects.create(user=user, course=course, mode='honor')
course.total_enrollment += 1
course.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:course_details', args=(course.id,)))
# <HINT> Create a submit view to create an exam submission record for a course enrollment,
# you may implement it based on following logic:
# Get user and course object, then get the associated enrollment object created when the user enrolled the course
# Create a submission object referring to the enrollment
# Collect the selected choices from exam form
# Add each selected choice object to the submission object
# Redirect to show_exam_result with the submission id
def submit(request, course_id):
user=request.user
course=get_object_or_404(Course,pk=course_id)
enrollment=Enrollment.objects.get(user=user,course=course)
submission=Submission.objects.create(enrollment=enrollment)
selected_answers=extract_answers(request)
submission.choices.set(answers)
submission.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:show_exam_result', args=(course_id, submission.pk)))
# <HINT> A example method to collect the selected choices from the exam form from the request object
def extract_answers(request):
submitted_anwsers = []
for key in request.POST:
if key.startswith('choice'):
value = request.POST[key]
choice_id = int(value)
submitted_anwsers.append(choice_id)
return submitted_anwsers
# <HINT> Create an exam result view to check if learner passed exam and show their question results and result for each question,
# you may implement it based on the following logic:
# Get course and submission based on their ids
# Get the selected choice ids from the submission record
# For each selected choice, check if it is a correct answer or not
# Calculate the total score
def show_exam_result(request, course_id, submission_id):
course=get_object_or_404(Course,pk=course_id)
submission=get_object_or_404(Submission,pk=submission_id)
answers=submission.objects.all()
grade=0.0
for answer in answers.choice_set():
if answer.is_correct:
grade += answer.questions.grade
context={
'course':course,
'grade':grade,
}
return render(request, 'onlinecourse/exam_result_bootstrap.html', context)
| 38.381579
| 129
| 0.69592
|
7952436a9c3207148bd6d7e5075d21bb3cbe02a4
| 1,430
|
py
|
Python
|
setup.py
|
tcordiello/electrumx
|
f62c982f9b5ace30bdb962d07c4c9c8e7d87032a
|
[
"MIT"
] | 1
|
2021-08-14T13:46:27.000Z
|
2021-08-14T13:46:27.000Z
|
setup.py
|
denza/electrumx
|
2ab1dce09152ff917881c2b5e45c15d0efe8e31a
|
[
"MIT"
] | null | null | null |
setup.py
|
denza/electrumx
|
2ab1dce09152ff917881c2b5e45c15d0efe8e31a
|
[
"MIT"
] | null | null | null |
import setuptools
version = '1.8.6'
setuptools.setup(
name='electrumX',
version=version,
scripts=['electrumx_server', 'electrumx_rpc'],
python_requires='>=3.6',
# via environment variables, in which case I've tested with 15.0.4
# "x11_hash" package (1.4) is required to sync DASH network.
# "tribus_hash" package is required to sync Denarius network.
# "blake256" package is required to sync Decred network.
# "xevan_hash" package is required to sync Xuez network.
# "groestlcoin_hash" package is required to sync Groestlcoin network.
install_requires=['aiorpcX>=0.7.3,<0.8', 'attrs',
'plyvel', 'pylru', 'aiohttp >= 2', 'uvloop'],
packages=setuptools.find_packages(include=('electrumx*',)),
description='ElectrumX Server',
author='Neil Booth',
author_email='kyuupichan@gmail.com',
license='MIT Licence',
url='https://github.com/kyuupichan/electrumx',
long_description='Server implementation for the Electrum protocol',
download_url=('https://github.com/kyuupichan/electrumX/archive/'
f'{version}.tar.gz'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: AsyncIO',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
"Programming Language :: Python :: 3.6",
"Topic :: Database",
'Topic :: Internet',
],
)
| 39.722222
| 73
| 0.645455
|
79524407262885df9a9939287a346d65a627c7a9
| 3,456
|
bzl
|
Python
|
test/generate_tests.bzl
|
raytracer/as-tree
|
991629712f43429874bbf11089d9efcff9c04c45
|
[
"BlueOak-1.0.0"
] | 333
|
2020-05-10T15:44:48.000Z
|
2022-03-30T15:10:20.000Z
|
test/generate_tests.bzl
|
raytracer/as-tree
|
991629712f43429874bbf11089d9efcff9c04c45
|
[
"BlueOak-1.0.0"
] | 19
|
2020-05-13T11:02:34.000Z
|
2021-11-19T08:22:02.000Z
|
test/generate_tests.bzl
|
raytracer/as-tree
|
991629712f43429874bbf11089d9efcff9c04c45
|
[
"BlueOak-1.0.0"
] | 18
|
2020-05-13T10:57:09.000Z
|
2021-12-28T01:34:09.000Z
|
_update_tags = [
# Avoid being caught with `//...`
"manual",
# Forces the test to be run locally, without sandboxing
"local",
# Unconditionally run this rule, and don't run in the sandbox
"external",
]
def fixture_tests(input_files):
tests = []
updates = []
for input_txt_file in input_files:
test_name = "test/{}".format(input_txt_file)
input_txt_exp_file = "{}.exp".format(input_txt_file)
native.sh_test(
name = test_name,
srcs = ["run_one_fixture.sh"],
args = [
"test",
"$(location {})".format(input_txt_file),
"$(location {})".format(input_txt_exp_file),
],
data = [
"//src:as-tree",
input_txt_file,
input_txt_exp_file,
],
size = "small",
)
tests.append(test_name)
update_name = "update_test/{}".format(input_txt_file)
native.sh_test(
name = update_name,
srcs = ["run_one_fixture.sh"],
args = [
"update",
"$(location {})".format(input_txt_file),
"$(location {})".format(input_txt_exp_file),
],
data = [
"//src:as-tree",
input_txt_file,
input_txt_exp_file,
],
tags = _update_tags,
size = "small",
)
updates.append(update_name)
native.test_suite(
name = "test/fixture",
tests = tests,
)
native.test_suite(
name = "update_test/fixture",
tests = updates,
tags = ["manual"],
)
def cli_tests(input_files):
tests = []
updates = []
for run_sh_file in input_files:
input_folder, _slash, _file = run_sh_file.rpartition('/')
sh_binary_name = run_sh_file[:-3]
native.sh_binary(
name = sh_binary_name,
srcs = [run_sh_file],
data = native.glob([
"{}/**/*.txt".format(input_folder)
]) + ["//src:as-tree"],
)
test_name = "test/{}".format(run_sh_file)
run_sh_exp_file = "{}.exp".format(run_sh_file)
native.sh_test(
name = test_name,
srcs = ["run_one_cli.sh"],
args = [
"test",
"$(location {})".format(sh_binary_name),
"$(location {})".format(run_sh_exp_file),
],
data = [
run_sh_exp_file,
":{}".format(sh_binary_name),
],
size = "small",
)
tests.append(test_name)
update_name = "update_test/{}".format(run_sh_file)
native.sh_test(
name = update_name,
srcs = ["run_one_cli.sh"],
args = [
"update",
"$(location {})".format(sh_binary_name),
"$(location {})".format(run_sh_exp_file),
],
data = [
run_sh_exp_file,
":{}".format(sh_binary_name),
],
tags = _update_tags,
size = "small",
)
updates.append(update_name)
native.test_suite(
name = "test/cli",
tests = tests,
)
native.test_suite(
name = "update_test/cli",
tests = updates,
tags = ["manual"],
)
| 27.870968
| 65
| 0.471065
|
79524441bb74895ed09d43e8732db5ef6aa5db26
| 2,678
|
py
|
Python
|
drf_encrypt_content/mixins.py
|
oguzhancelikarslan/drf-encrypt-content
|
ec9912dd0938d727374c7215bfc3c5040322e72c
|
[
"BSD-3-Clause"
] | 8
|
2020-10-03T18:45:29.000Z
|
2022-02-17T10:21:36.000Z
|
drf_encrypt_content/mixins.py
|
oguzhancelikarslan/drf-encrypt-content
|
ec9912dd0938d727374c7215bfc3c5040322e72c
|
[
"BSD-3-Clause"
] | 1
|
2021-04-20T08:02:42.000Z
|
2021-12-20T10:39:00.000Z
|
drf_encrypt_content/mixins.py
|
oguzhancelikarslan/drf-encrypt-content
|
ec9912dd0938d727374c7215bfc3c5040322e72c
|
[
"BSD-3-Clause"
] | 1
|
2022-02-17T10:21:39.000Z
|
2022-02-17T10:21:39.000Z
|
from collections import OrderedDict
from django.core.exceptions import ImproperlyConfigured
from drf_encrypt_content.rest_encrypt_content import RestEncryptContent
class RestEncryptContentMixin(RestEncryptContent):
__ALL__ = '__all__'
def to_representation(self, instance):
representation = super(RestEncryptContentMixin, self).to_representation(instance)
encrypted_fields = getattr(self.Meta, 'encrypted_fields', self.__ALL__)
excluded_fields = getattr(self.Meta, 'excluded_fields', None)
fields_list = list()
model_class = getattr(self.Meta, 'model')
if encrypted_fields and encrypted_fields != self.__ALL__ and not isinstance(encrypted_fields, (list, tuple)):
raise TypeError(
'The `encrypted_fields` option must be a list or tuple or "__all__". '
'Got %s.' % type(encrypted_fields).__name__
)
if excluded_fields and not isinstance(excluded_fields, (list, tuple)):
raise TypeError(
'The `excluded_fields` option must be a list or tuple. Got %s.' %
type(excluded_fields).__name__
)
assert not ((encrypted_fields != self.__ALL__) and excluded_fields), (
"Cannot set both 'encrypted_fields' and 'excluded_fields' options on "
"serializer {serializer_class}.".format(
serializer_class=self.__class__.__name__
)
)
if encrypted_fields == self.__ALL__:
fields_list = [key for key, value in representation.items()]
else:
for field in encrypted_fields:
if not (field in representation.keys()):
raise ImproperlyConfigured(
'Field name `%s` is not valid for model `%s`.' %
(field, model_class.__name__)
)
for key in representation.keys():
if key in encrypted_fields:
fields_list.append(key)
if excluded_fields is not None:
for field in excluded_fields:
if not (field in fields_list):
raise ImproperlyConfigured(
'Field name `%s` is not valid for model `%s`.' %
(field, model_class.__name__)
)
else:
fields_list.remove(field)
for key, value in representation.items():
if key in fields_list:
if type(representation[key]) is not OrderedDict:
representation[key] = self.encrypt_data(str(value))
return representation
| 39.970149
| 117
| 0.59186
|
7952467e84ec1fb8c8bda205ccd35954409ff891
| 3,032
|
py
|
Python
|
setup.py
|
technige/pansi
|
8014f67f14372bb6d88ca522d40744fff575814a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
technige/pansi
|
8014f67f14372bb6d88ca522d40744fff575814a
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
technige/pansi
|
8014f67f14372bb6d88ca522d40744fff575814a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import dirname, join as path_join
from setuptools import setup, find_packages
from pansi import (
__author__,
__email__,
__license__,
__package__,
__version__,
)
source_url = "https://github.com/technige/pansi"
with open(path_join(dirname(__file__), "README.rst")) as f:
README = f.read().replace(".. image :: art/",
".. image :: {}/raw/master/art/".format(source_url))
packages = find_packages(exclude=("demo", "docs", "test"))
package_metadata = {
"name": __package__,
"version": __version__,
"description": "ANSI escape code library for Python",
"long_description": README,
"author": __author__,
"author_email": __email__,
"url": source_url,
"project_urls": {
"Bug Tracker": "{}/issues".format(source_url),
"Source Code": source_url,
},
"entry_points": {
"console_scripts": [
],
},
"packages": packages,
"py_modules": [
],
"install_requires": [
"six",
],
"extras_require": {
},
"license": __license__,
"classifiers": [
"Development Status :: 1 - Planning",
"Environment :: Console",
"Environment :: Console :: Curses",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: System :: Console Fonts",
"Topic :: System :: Shells",
"Topic :: Terminals",
"Topic :: Terminals :: Terminal Emulators/X Terminals",
"Topic :: Text Processing",
"Topic :: Text Processing :: Markup",
"Topic :: Utilities",
],
}
setup(**package_metadata)
| 30.938776
| 82
| 0.615435
|
795246857529c74f1c239c7c6dcedfa69df6398b
| 911
|
py
|
Python
|
BOJ/graph_boj/game_develop.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
BOJ/graph_boj/game_develop.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
BOJ/graph_boj/game_develop.py
|
mrbartrns/swacademy_structure
|
778f0546030385237c383d81ec37d5bd9ed1272d
|
[
"MIT"
] | null | null | null |
# BOJ 1516
from collections import deque
import sys
sys.stdin = open("../input.txt", "r")
si = sys.stdin.readline
def topological_sort(indegree):
que = deque()
ret = [0] * (N + 1)
for i in range(1, N + 1):
if indegree[i] == 0:
ret[i] = time[i]
que.append(i)
while que:
node = que.popleft()
for nxt in graph[node]:
indegree[nxt] -= 1
if indegree[nxt] == 0:
que.append(nxt)
ret[nxt] = max(ret[nxt], ret[node] + time[nxt])
return ret
N = int(si())
indegree = [0] * (N + 1)
graph = [[] for _ in range(N + 1)]
time = [0] * (N + 1)
for i in range(1, N + 1):
arr = list(map(int, si().split(" ")))
time[i] = arr[0]
for j in range(1, len(arr) - 1):
graph[arr[j]].append(i)
indegree[i] += 1
ret = topological_sort(indegree)
for i in range(1, N + 1):
print(ret[i])
| 23.358974
| 59
| 0.507135
|
795247061340c63f7bb7c956d6bf7cf15a1b9162
| 3,084
|
py
|
Python
|
src/run_directory.py
|
zhutianpeng/ImgProcessPlatform_pose
|
748be1b98a44450671809e1d6040fd9aa0ff7998
|
[
"Apache-2.0"
] | 4
|
2019-03-05T08:58:18.000Z
|
2019-09-16T08:30:31.000Z
|
src/run_directory.py
|
zhutianpeng/ImgProcessPlatform_pose
|
748be1b98a44450671809e1d6040fd9aa0ff7998
|
[
"Apache-2.0"
] | null | null | null |
src/run_directory.py
|
zhutianpeng/ImgProcessPlatform_pose
|
748be1b98a44450671809e1d6040fd9aa0ff7998
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import logging
import time
import glob
import ast
import os
# import dill
import redis
from RedisQueue import RedisQueue
import dealimg
import common
import cv2
import numpy as np
from estimator import TfPoseEstimator
from networks import get_graph_path, model_wh
from lifting.prob_model import Prob3dPose
from lifting.draw import plot_pose
# 为了引入hashset
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run by folder')
parser.add_argument('--folder', type=str, default='../images/')
parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')
parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')
parser.add_argument('--scales', type=str, default='[None]', help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
args = parser.parse_args()
scales = ast.literal_eval(args.scales)
w, h = model_wh(args.resolution)
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# 从redis里面读取
# q_face = RedisQueue('face_forward_list') #接收队列
q_pose = RedisQueue('pose_forward_list') #接收队列
q2 = RedisQueue('test_return_list') #发送队列
DB_set = RedisQueue('return_set') #缓存的set
i = 0
while True:
if q_pose.empty() == False:
#1. 接收string,转成 dict
dic_receive = eval(q_pose.get())
id_receive = dic_receive['id'] #接收的ID
task_receive = dic_receive['task']
jpg = dic_receive['img'] #接收到的是二进制的图片格式
print("接收set测试:"+ "id_receive:"+str(id_receive) + " ;task_receive: "+str(task_receive)) #测试->通过
imgweb = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), cv2.IMREAD_COLOR) # 转成mat格式
# print(imgweb)
# image = dealimg.image_decode_openpose(base64_data, i)
#2. open pose deals imgs:
# humans = e.inference(imgweb, scales=scales)
# image = TfPoseEstimator.draw_humans(imgweb, humans, imgcopy=False) #mat
humans = e.inference(imgweb, scales=scales)
resultDic = TfPoseEstimator.draw_humans_json(imgweb, humans, imgcopy=False) # dic
print(str(resultDic))
# cv2.imwrite('../outimg/ %s.jpg ' % (i), image)
# print("现在处理的图片:%s" % i)
# i = i + 1
# 将 mat 转成 二进制,存进 hashset 里面
# imghtml = cv2.imencode('.jpg', image)[1].tostring() #二进制格式
# 找到的task;img(原图);处理结果;存成 Dic形式,转成String
valueDic = {'task': task_receive, 'img': jpg,'result_pose':str(resultDic)}
string_send_list = str(valueDic)
# q2.put(imghtml)
DB_set.setSet("imgset", id_receive, string_send_list)
| 34.651685
| 118
| 0.646887
|
7952490068a9005138fea4fef6a6f9e7c5bf0836
| 6,430
|
py
|
Python
|
tests/mpd/test_tokenizer.py
|
rdbhost/mopidy
|
cb15ccef08b15c30ab6cbbac2419622d0804e7ac
|
[
"Apache-2.0"
] | null | null | null |
tests/mpd/test_tokenizer.py
|
rdbhost/mopidy
|
cb15ccef08b15c30ab6cbbac2419622d0804e7ac
|
[
"Apache-2.0"
] | null | null | null |
tests/mpd/test_tokenizer.py
|
rdbhost/mopidy
|
cb15ccef08b15c30ab6cbbac2419622d0804e7ac
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from mopidy.mpd import exceptions, tokenize
class TestTokenizer(unittest.TestCase):
def assertTokenizeEquals(self, expected, line): # noqa: N802
self.assertEqual(expected, tokenize.split(line))
def assertTokenizeRaises(self, exception, message, line): # noqa: N802
with self.assertRaises(exception) as cm:
tokenize.split(line)
self.assertEqual(cm.exception.message, message)
def test_empty_string(self):
ex = exceptions.MpdNoCommand
msg = "No command given"
self.assertTokenizeRaises(ex, msg, "")
self.assertTokenizeRaises(ex, msg, " ")
self.assertTokenizeRaises(ex, msg, "\t\t\t")
def test_command(self):
self.assertTokenizeEquals(["test"], "test")
self.assertTokenizeEquals(["test123"], "test123")
self.assertTokenizeEquals(["foo_bar"], "foo_bar")
def test_command_trailing_whitespace(self):
self.assertTokenizeEquals(["test"], "test ")
self.assertTokenizeEquals(["test"], "test\t\t\t")
def test_command_leading_whitespace(self):
ex = exceptions.MpdUnknownError
msg = "Letter expected"
self.assertTokenizeRaises(ex, msg, " test")
self.assertTokenizeRaises(ex, msg, "\ttest")
def test_invalid_command(self):
ex = exceptions.MpdUnknownError
msg = "Invalid word character"
self.assertTokenizeRaises(ex, msg, "foo/bar")
self.assertTokenizeRaises(ex, msg, "æøå")
self.assertTokenizeRaises(ex, msg, "test?")
self.assertTokenizeRaises(ex, msg, 'te"st')
def test_unquoted_param(self):
self.assertTokenizeEquals(["test", "param"], "test param")
self.assertTokenizeEquals(["test", "param"], "test\tparam")
def test_unquoted_param_leading_whitespace(self):
self.assertTokenizeEquals(["test", "param"], "test param")
self.assertTokenizeEquals(["test", "param"], "test\t\tparam")
def test_unquoted_param_trailing_whitespace(self):
self.assertTokenizeEquals(["test", "param"], "test param ")
self.assertTokenizeEquals(["test", "param"], "test param\t\t")
def test_unquoted_param_invalid_chars(self):
ex = exceptions.MpdArgError
msg = "Invalid unquoted character"
self.assertTokenizeRaises(ex, msg, 'test par"m')
self.assertTokenizeRaises(ex, msg, "test foo\bbar")
self.assertTokenizeRaises(ex, msg, 'test foo"bar"baz')
self.assertTokenizeRaises(ex, msg, "test foo'bar")
def test_unquoted_param_numbers(self):
self.assertTokenizeEquals(["test", "123"], "test 123")
self.assertTokenizeEquals(["test", "+123"], "test +123")
self.assertTokenizeEquals(["test", "-123"], "test -123")
self.assertTokenizeEquals(["test", "3.14"], "test 3.14")
def test_unquoted_param_extended_chars(self):
self.assertTokenizeEquals(["test", "æøå"], "test æøå")
self.assertTokenizeEquals(["test", "?#$"], "test ?#$")
self.assertTokenizeEquals(["test", "/foo/bar/"], "test /foo/bar/")
self.assertTokenizeEquals(["test", "foo\\bar"], "test foo\\bar")
def test_unquoted_params(self):
self.assertTokenizeEquals(["test", "foo", "bar"], "test foo bar")
def test_quoted_param(self):
self.assertTokenizeEquals(["test", "param"], 'test "param"')
self.assertTokenizeEquals(["test", "param"], 'test\t"param"')
def test_quoted_param_leading_whitespace(self):
self.assertTokenizeEquals(["test", "param"], 'test "param"')
self.assertTokenizeEquals(["test", "param"], 'test\t\t"param"')
def test_quoted_param_trailing_whitespace(self):
self.assertTokenizeEquals(["test", "param"], 'test "param" ')
self.assertTokenizeEquals(["test", "param"], 'test "param"\t\t')
def test_quoted_param_invalid_chars(self):
ex = exceptions.MpdArgError
msg = "Space expected after closing '\"'"
self.assertTokenizeRaises(ex, msg, 'test "foo"bar"')
self.assertTokenizeRaises(ex, msg, 'test "foo"bar" ')
self.assertTokenizeRaises(ex, msg, 'test "foo"bar')
self.assertTokenizeRaises(ex, msg, 'test "foo"bar ')
def test_quoted_param_numbers(self):
self.assertTokenizeEquals(["test", "123"], 'test "123"')
self.assertTokenizeEquals(["test", "+123"], 'test "+123"')
self.assertTokenizeEquals(["test", "-123"], 'test "-123"')
self.assertTokenizeEquals(["test", "3.14"], 'test "3.14"')
def test_quoted_param_spaces(self):
self.assertTokenizeEquals(["test", "foo bar"], 'test "foo bar"')
self.assertTokenizeEquals(["test", "foo bar"], 'test "foo bar"')
self.assertTokenizeEquals(["test", " param\t"], 'test " param\t"')
def test_quoted_param_extended_chars(self):
self.assertTokenizeEquals(["test", "æøå"], 'test "æøå"')
self.assertTokenizeEquals(["test", "?#$"], 'test "?#$"')
self.assertTokenizeEquals(["test", "/foo/bar/"], 'test "/foo/bar/"')
def test_quoted_param_escaping(self):
self.assertTokenizeEquals(["test", "\\"], r'test "\\"')
self.assertTokenizeEquals(["test", '"'], r'test "\""')
self.assertTokenizeEquals(["test", " "], r'test "\ "')
self.assertTokenizeEquals(["test", "\\n"], r'test "\\\n"')
def test_quoted_params(self):
self.assertTokenizeEquals(["test", "foo", "bar"], 'test "foo" "bar"')
def test_mixed_params(self):
self.assertTokenizeEquals(["test", "foo", "bar"], 'test foo "bar"')
self.assertTokenizeEquals(["test", "foo", "bar"], 'test "foo" bar')
self.assertTokenizeEquals(["test", "1", "2"], 'test 1 "2"')
self.assertTokenizeEquals(["test", "1", "2"], 'test "1" 2')
self.assertTokenizeEquals(
["test", "foo bar", "baz", "123"], 'test "foo bar" baz 123'
)
self.assertTokenizeEquals(
["test", 'foo"bar', "baz", "123"], r'test "foo\"bar" baz 123'
)
def test_unbalanced_quotes(self):
ex = exceptions.MpdArgError
msg = "Invalid unquoted character"
self.assertTokenizeRaises(ex, msg, 'test "foo bar" baz"')
def test_missing_closing_quote(self):
ex = exceptions.MpdArgError
msg = "Missing closing '\"'"
self.assertTokenizeRaises(ex, msg, 'test "foo')
self.assertTokenizeRaises(ex, msg, 'test "foo a ')
| 43.445946
| 77
| 0.630638
|
795249480cc0f47f2a08c7ca9cdcc4b29850734b
| 11,295
|
py
|
Python
|
Mobility Algorithms/Queues at Known Locations/Machine_Learning/train_test_export_ML_model_offline.py
|
James-OHara/NCHRP-BSM-Traffic-Measures
|
d6842c9dc63de8c2d470482fbfd1ec91a9c2ae56
|
[
"Apache-2.0"
] | null | null | null |
Mobility Algorithms/Queues at Known Locations/Machine_Learning/train_test_export_ML_model_offline.py
|
James-OHara/NCHRP-BSM-Traffic-Measures
|
d6842c9dc63de8c2d470482fbfd1ec91a9c2ae56
|
[
"Apache-2.0"
] | null | null | null |
Mobility Algorithms/Queues at Known Locations/Machine_Learning/train_test_export_ML_model_offline.py
|
James-OHara/NCHRP-BSM-Traffic-Measures
|
d6842c9dc63de8c2d470482fbfd1ec91a9c2ae56
|
[
"Apache-2.0"
] | null | null | null |
"""This python script trains supervised machine learning Random Forest models on the complete data provided,
ouptputs performance metrics, and exports the trained Random Forest to a pickle file.
Test with: python train_test_export_ML_model_offline.py sample_BSMs_X_file.csv sample_max_queues_Y_file.csv sample_vehicle_length_by_type_file.csv sample_stoplines_file.csv sample_signal_timing_file.csv sample_link_corner_points_file.csv
"""
# load additional libraries for this script (the rest are in queue_fx.py)
import argparse
import numpy as np
import pandas as pd
from datetime import timedelta
from datetime import datetime
from sklearn.model_selection import train_test_split
# ML Model
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import joblib
# python module in the Machine_Learning folder
import queue_fx
# define path names
supporting_files_path = "../Supporting_Files/"
# Queue values from definition
QUEUE_START_SPEED = 0.00
QUEUE_FOLLOWING_SPEED = (10.0 *0.681818) # convert ft/sec to mph
QUEUE_HEADWAY_DISTANCE = 20.0 #ft, in queue definition
QUEUE_DISTANCE_WITHIN_STOP_POINT = 20 #ft
def format_queues(y_df_name):
"""Bin the number of vehicles in queue into pairs.
Assumption: This reduces the number of classes for multiclass classification by half without losing too much information."""
# Creating a queue indicator column
# add a column to y_df that is an indicator 1/0 for queue at intersection yes/no
y_df_name['queue_indicator'] = 0
mask_queue = (y_df_name['queue_count_max']>0)
y_df_name.loc[mask_queue,'queue_indicator'] = 1
# Creating a queue count binned column, pairs of # vehs
# bin the queue counts into pairs as high as your max queue count observed in your training data
binned_queues = [-np.inf,0,2,4,6,8,10,12,14,16,18,20,22,24,26,28]
bin_labels = ["no_queue","1-2", "3-4", "5-6", "7-8", "9-10", "11-12", "13-14", "15-16",
"17-18", "19-20", "21-22", "23-24", "25-26", "27-28"]
# convert the categorically binned queue_count_binned column to int with .cat.codes
y_df_name['queue_count_binned']=pd.cut(x=y_df_name['queue_count_max'], bins=binned_queues,
labels = bin_labels, include_lowest =True).cat.codes
#print(y_df_name.head())
return y_df_name
def join_features_and_labels(base_df_name, y_df_name, stopline_avg_df_name, signals_df_name):
"""Join the aggregated features (created from BSMs and supporting files) and their labels (queue count and length).
Stopline_avg_df is outputted from the create_avg_stoplines_df function."""
# join the labels (y_df) to the featuers (base_df)
df_xy = y_df_name.merge(base_df_name, how= 'left', left_on=['time','link'],right_on=['time_30', 'assigned_linkID'])
df_xy = df_xy.drop(['assigned_linkID', 'time_30'], axis=1)
# Bring in a few link-specific features (e.g., # lanes, direction) by joining stopline_avg_df to base_df
# This will add two features based on link: n_lanes and link_direction
df_xy = df_xy.merge(stopline_avg_df_name, how='left',
left_on=['link'],
right_on=['Link_ID'])
df_xy = df_xy.drop(['Link_ID','mean_X', 'mean_Y'], axis=1)
# join signals df columns to base_df
# issue of link ID 16:15 since accidentally coded as 15:16 in signals file! whoops.
df_xy = df_xy.merge(signals_df_name, how='left',
left_on=['time','link'],
right_on=['time_30','Link_ID'])
df_xy = df_xy.drop(['Link_ID', 'time_30'], axis=1)
#print(df_xy.columns)
return df_xy
def add_previous_time_queue_count_col(df_xy_name):
"""Creating a column that captures the previous 30 seconds queue_count for each link as a new feature"""
# to datetime
df_xy_name['time_30_dt']= pd.to_datetime(df_xy_name['time'], format="%H:%M:%S")
# add a new column that is 30 secs prior to current time
df_xy_name['previous_time_30sec'] = df_xy_name['time_30_dt'] - timedelta(seconds=30)
# now remove the date from the datetime
df_xy_name['time_30_dt'] = df_xy_name['time_30_dt'].dt.time
df_xy_name['previous_time_30sec'] = df_xy_name['previous_time_30sec'].dt.time
# self inner join, left on current time, right on previous time 30sec (same link!)
base = pd.merge(df_xy_name, df_xy_name, left_index = True,
left_on=['previous_time_30sec','link'],
right_on=['time_30_dt','link'],
how = 'inner', copy=False, suffixes=('', '_previous'))
# columns to keep in base
cols_keep = df_xy_name.columns.tolist()
cols_keep.append('queue_count_max_previous')
# keep only the original columns plus the queue_count_max_previous
base = base.loc[:,base.columns.isin(['time','link','queue_count_max_previous'])]
df_xy = df_xy_name.merge(base, how='left',
left_on=['time','link'],
right_on=['time','link'])
df_xy.drop(['previous_time_30sec', 'time_30_dt'], axis=1, inplace=True)
#print(df_xy.columns)
return df_xy
def split_into_X_and_Y(df_xy_name, label_selection = 'queue_count_binned'):
"""Separate the features (X) and the labels (Y). The default label selection (Y) is queue_count_binned.
However, you could use queue_count_max (not binned) or queue_indicator for the classifier instead."""
# preparing X and y
col_lst = ['queue_count_max', 'queue_len_max', 'queue_indicator', 'queue_count_binned','time']
X = df_xy_name.loc[:,~df_xy_name.columns.isin(col_lst)] #.to_numpy()
#print(X.shape, "shape of features X")
y = df_xy_name[label_selection] #.to_numpy()
#print(y.shape, "shape of labels y")
return X, y
def train_RF_model(X_train, X_test, y_train):
"""Train the Random Forest Classifier and make predictions on held out test data.
Model parameters are set to those that worked well for testing and validation in this project."""
model_rf = RandomForestClassifier(n_estimators=150, max_depth=50, random_state=0)
model_rf.fit(X_train, y_train)
# make predictions
# no changes to 33% test set other than scaling
predicted_rf = model_rf.predict(X_test)
return predicted_rf, model_rf
def evaluate_RF_model(expected, predicted_rf):
"""Report out performance measures for the trained RF model on unseen test data.
Measures include accuracy, weighted F1-score, confusion matrix, False Positive Rate (FPR), and False Negative Rate (FNR)."""
# summarize the fit of the model
print("Accuracy:", metrics.accuracy_score(expected, predicted_rf))
# choose F1-score type
print("Weighted F1-Score:", metrics.f1_score(expected, predicted_rf, average ='weighted'))
#print(metrics.classification_report(expected, predicted_rf))
print("Confusion Matrix", metrics.confusion_matrix(expected, predicted_rf))
rf_conf_matrix = metrics.confusion_matrix(expected, predicted_rf)
# calculating FNR and FPR
all_0_preds = len(predicted_rf[predicted_rf ==0])
correct_0_preds = rf_conf_matrix[0,0]
FN = all_0_preds-correct_0_preds
FP = sum(rf_conf_matrix[0])-rf_conf_matrix[0,0]
TN = rf_conf_matrix[0,0]
total_n = np.sum(rf_conf_matrix)
TP = total_n-FN-FP-TN
print("FN:", FN, "FP:", FP, "TP:", TP, "TN:", TN)
FNR = FN/(FN+TP)
FPR = FP/(FP+TN)
print("FPR:",FPR*100,'%')
print("FNR:",FNR*100,'%')
def export_trained_RF_model(model_name, joblib_output_filename):
"""Export the trained RF model as a joblib pickel .pkl file"""
print("FB joblib file:", joblib_output_filename, "MUST END IN .pkl")
joblib.dump(model_name, joblib_output_filename)
def main():
"""Parse six command line arguments then run data cleaning, feature engineering, ML Random Forest training, and model export."""
parser = argparse.ArgumentParser(description='Script to output trained Supervised ML Classifier (Random Forest), for offline purposes.')
parser.add_argument('BSMs_X_filename') # CSV file of all BSMs for ML model training and testing. The BSMs will be used to create aggregated ML features (X).
parser.add_argument('max_queues_Y_filename') # CSV file outtputed from ground_truth_max_queue_counts_and_lengths.py. These are the supervised ML labels (Y).
parser.add_argument('veh_lengths_filename') # supporting CSV file of vehicle lengths in ft by type
parser.add_argument('stoplines_filename') # supporting CSV file of stop line X,Y coordinates for each link and lane
parser.add_argument('signal_timing_filename') # supporting CSV file of signal timing for each link every 30 seconds
parser.add_argument('link_corners_filename') # supporting CSV file with link corner points for BSM assignment
parser.add_argument('--out', help = 'Output pkl file (include .pkl)') # name of exported ML model, needs .pkl extension
args = parser.parse_args()
# read in the six files
df = queue_fx.read_BSMs_file(args.BSMs_X_filename)
y_df = format_queues(queue_fx.read_max_queues_Y_file(args.max_queues_Y_filename))
veh_len_df = queue_fx.read_veh_lengths_file(args.veh_lengths_filename)
stoplines_df = queue_fx.read_stoplines_file(args.stoplines_filename)
signals_df = queue_fx.read_signal_timing_file(args.signal_timing_filename)
link_points_df = queue_fx.read_link_corners_file(args.link_corners_filename)
# create the avg stoplines X,Y df
stopline_avg_df = queue_fx.create_avg_stoplines_df(stoplines_df)
# format the time
df.transtime = df.transtime.apply(queue_fx.format_result)
# create a new column that assigns BSM to 30 second time interval
df['transtime_30sec'] = df['transtime'].dt.round('30S')
# Assign BSMs to links
df = queue_fx.assign_BSMs_to_roadway_links(df, link_points_df)
# join columns from veh len to main BSMs df
df = queue_fx.join_veh_len_to_BSM_df(df, veh_len_df)
# Engineer the aggregated BSM features by assigned link and 30 secs
base_df = queue_fx.feature_engineering(df, stopline_avg_df)
# Join all features and labels
df_xy = join_features_and_labels(base_df, y_df, stopline_avg_df, signals_df)
# Add a column to the features for the previous time step's queue count for each link
df_xy = add_previous_time_queue_count_col(df_xy)
# Handle any missing values
df_xy = queue_fx.label_encode_categorical_features(queue_fx.handle_missing_data(df_xy, df))
X,y = split_into_X_and_Y(df_xy)
# scale the features X for classification
X = queue_fx.feature_scaling_X(X)
# Split the data into training and testing sets for ML.
# This code is for training and testing new ML models.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)
# train the RF classifier and make predictions from the test set X's
predicted_rf, model_rf = train_RF_model(X_train, X_test, y_train)
# print model performance results
evaluate_RF_model(y_test, predicted_rf)
# export the trained RF model as a .pkl file
if args.out:
output_file = args.out
else:
output_file = "Exported_Trained_RF_Model.pkl"
export_trained_RF_model(model_rf, output_file)
if __name__ == "__main__":
main()
| 49.108696
| 237
| 0.723063
|
795249e8fde080574bbc776ca2d274f0a0a663dd
| 15,430
|
py
|
Python
|
desktop/libs/notebook/src/notebook/connectors/base.py
|
integrateai/hue
|
ca2ea6c5e3979b963b151931ac4a689f69131b54
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/notebook/src/notebook/connectors/base.py
|
integrateai/hue
|
ca2ea6c5e3979b963b151931ac4a689f69131b54
|
[
"Apache-2.0"
] | null | null | null |
desktop/libs/notebook/src/notebook/connectors/base.py
|
integrateai/hue
|
ca2ea6c5e3979b963b151931ac4a689f69131b54
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import uuid
from django.utils.translation import ugettext as _
from desktop.conf import has_multi_cluster
from desktop.lib.exceptions_renderable import PopupException
from desktop.lib.i18n import smart_unicode
from notebook.conf import get_ordered_interpreters
LOG = logging.getLogger(__name__)
class SessionExpired(Exception):
pass
class QueryExpired(Exception):
def __init__(self, message=None):
super(QueryExpired, self).__init__()
self.message = message
class AuthenticationRequired(Exception):
def __init__(self, message=None):
super(AuthenticationRequired, self).__init__()
self.message = message
class OperationTimeout(Exception):
pass
class OperationNotSupported(Exception):
pass
class QueryError(Exception):
def __init__(self, message, handle=None):
super(QueryError, self).__init__(message)
self.message = message or _('No error message, please check the logs.')
self.handle = handle
self.extra = {}
def __unicode__(self):
return smart_unicode(self.message)
class Notebook(object):
def __init__(self, document=None, **options):
self.document = None
if document is not None:
self.data = document.data
self.document = document
else:
_data = {
'name': 'My Notebook',
'uuid': str(uuid.uuid4()),
'description': '',
'type': 'notebook',
'isSaved': False,
'isManaged': False, # Aka isTask
'skipHistorify': False,
'sessions': [],
'snippets': [],
}
_data.update(options)
self.data = json.dumps(_data)
def get_json(self):
_data = self.get_data()
return json.dumps(_data)
def get_data(self):
_data = json.loads(self.data)
if self.document is not None:
_data['id'] = self.document.id
_data['is_history'] = self.document.is_history
return _data
def get_str(self):
return '\n\n\n'.join(['USE %s;\n\n%s' % (snippet['database'], Notebook.statement_with_variables(snippet)) for snippet in self.get_data()['snippets']])
@staticmethod
def statement_with_variables(snippet):
statement_raw = snippet['statement_raw']
hasCurlyBracketParameters = snippet['type'] != 'pig'
variables = {}
for variable in snippet['variables']:
variables[variable['name']] = variable
if variables:
variables_names = []
for variable in snippet['variables']:
variables_names.append(variable['name'])
variablesString = '|'.join(variables_names)
def replace(match):
p1 = match.group(1)
p2 = match.group(2)
variable = variables[p2]
value = str(variable['value'])
return p1 + (value if value is not None else variable['meta'].get('placeholder',''))
return re.sub("([^\\\\])\\$" + ("{(" if hasCurlyBracketParameters else "(") + variablesString + ")(=[^}]*)?" + ("}" if hasCurlyBracketParameters else ""), replace, statement_raw)
return statement_raw
def add_hive_snippet(self, database, sql):
_data = json.loads(self.data)
_data['snippets'].append(self._make_snippet({
'status': 'running',
'statement_raw': sql,
'statement': sql,
'type': 'hive',
'properties': {
'files': [],
'functions': [],
'settings': [],
},
'database': database,
}))
self._add_session(_data, 'hive')
self.data = json.dumps(_data)
def add_java_snippet(self, clazz, app_jar, arguments, files):
_data = json.loads(self.data)
_data['snippets'].append(self._make_snippet({
u'type': u'java',
u'status': u'running',
u'properties': {
u'files': files,
u'class': clazz,
u'app_jar': app_jar,
u'arguments': arguments,
u'archives': [],
}
}))
self._add_session(_data, 'java')
self.data = json.dumps(_data)
def add_sqoop_snippet(self, statement, arguments, files):
_data = json.loads(self.data)
_data['snippets'].append(self._make_snippet({
u'type': u'sqoop1',
u'status': u'running',
u'properties': {
u'files': files,
u'arguments': arguments,
u'archives': [],
u'statement': statement
}
}))
self._add_session(_data, 'java')
self.data = json.dumps(_data)
def add_spark_snippet(self, clazz, jars, arguments, files):
_data = json.loads(self.data)
_data['snippets'].append(self._make_snippet({
u'type': u'spark',
u'status': u'running',
u'properties': {
u'files': files,
u'class': clazz,
u'app_jar': jars,
u'arguments': arguments,
u'archives': [],
u'spark_opts': ''
}
}))
self._add_session(_data, 'spark')
self.data = json.dumps(_data)
def add_shell_snippet(self, shell_command, arguments=None, archives=None, files=None, env_var=None, last_executed=None, capture_output=True):
_data = json.loads(self.data)
if arguments is None:
arguments = []
if archives is None:
archives = []
if files is None:
files = []
if env_var is None:
env_var = []
_data['snippets'].append(self._make_snippet({
u'type': u'shell',
u'status': u'running',
u'properties': {
u'files': files,
u'shell_command': shell_command,
u'arguments': arguments,
u'archives': archives,
u'env_var': env_var,
u'command_path': shell_command,
u'capture_output': capture_output
},
u'lastExecuted': last_executed
}))
self._add_session(_data, 'shell')
self.data = json.dumps(_data)
def _make_snippet(self, _snippet):
return {
'status': _snippet.get('status', 'ready'),
'id': str(uuid.uuid4()),
'statement_raw': _snippet.get('statement', ''),
'statement': _snippet.get('statement', ''),
'type': _snippet.get('type'),
'properties': _snippet['properties'],
'name': _snippet.get('name', '%(type)s snippet' % _snippet),
'database': _snippet.get('database'),
'result': {},
'variables': [],
'lastExecuted': _snippet.get('lastExecuted'),
'capture_output': _snippet.get('capture_output', True)
}
def _add_session(self, data, snippet_type):
from notebook.connectors.hiveserver2 import HS2Api # Cyclic dependency
if snippet_type not in [_s['type'] for _s in data['sessions']]:
data['sessions'].append({
'type': snippet_type,
'properties': HS2Api.get_properties(snippet_type),
'id': None
}
)
def execute(self, request, batch=False):
from notebook.api import _execute_notebook # Cyclic dependency
notebook_data = self.get_data()
snippet = notebook_data['snippets'][0]
snippet['wasBatchExecuted'] = batch
return _execute_notebook(request, notebook_data, snippet)
def get_api(request, snippet):
from notebook.connectors.oozie_batch import OozieApi
if snippet.get('wasBatchExecuted'):
return OozieApi(user=request.user, request=request)
if snippet['type'] == 'report':
snippet['type'] = 'impala'
interpreter = [interpreter for interpreter in get_ordered_interpreters(request.user) if interpreter['type'] == snippet['type']]
if not interpreter:
if snippet['type'] == 'hbase':
interpreter = [{
'name': 'hbase',
'type': 'hbase',
'interface': 'hbase',
'options': {},
'is_sql': False
}]
elif snippet['type'] == 'kafka':
interpreter = [{
'name': 'kafka',
'type': 'kafka',
'interface': 'kafka',
'options': {},
'is_sql': False
}]
elif snippet['type'] == 'solr':
interpreter = [{
'name': 'solr',
'type': 'solr',
'interface': 'solr',
'options': {},
'is_sql': False
}]
elif snippet['type'] == 'custom':
interpreter = [{
'name': snippet['name'],
'type': snippet['type'],
'interface': snippet['interface'],
'options': snippet.get('options', {}),
'is_sql': False
}]
else:
raise PopupException(_('Snippet type %(type)s is not configured in hue.ini') % snippet)
interpreter = interpreter[0]
interface = interpreter['interface']
# Multi cluster
if has_multi_cluster():
cluster = json.loads(request.POST.get('cluster', '""')) # Via Catalog autocomplete API or Notebook create sessions
if cluster == '""' or cluster == 'undefined':
cluster = None
if not cluster and snippet.get('compute'): # Via notebook.ko.js
cluster = snippet['compute']
else:
cluster = None
cluster_name = cluster.get('id') if cluster else None
if cluster and 'altus:dataware:k8s' in cluster_name:
interface = 'hiveserver2'
elif cluster and 'crn:altus:dataware:' in cluster_name:
interface = 'altus-adb'
elif cluster and 'crn:altus:dataeng:' in cluster_name:
interface = 'dataeng'
LOG.info('Selected cluster %s %s interface %s' % (cluster_name, cluster, interface))
snippet['interface'] = interface
if interface == 'hiveserver2':
from notebook.connectors.hiveserver2 import HS2Api
return HS2Api(user=request.user, request=request, cluster=cluster)
elif interface == 'oozie':
return OozieApi(user=request.user, request=request)
elif interface == 'livy':
from notebook.connectors.spark_shell import SparkApi
return SparkApi(request.user)
elif interface == 'livy-batch':
from notebook.connectors.spark_batch import SparkBatchApi
return SparkBatchApi(request.user)
elif interface == 'text' or interface == 'markdown':
from notebook.connectors.text import TextApi
return TextApi(request.user)
elif interface == 'rdbms':
from notebook.connectors.rdbms import RdbmsApi
return RdbmsApi(request.user, interpreter=snippet['type'], query_server=snippet.get('query_server'))
elif interface == 'altus-adb':
from notebook.connectors.altus_adb import AltusAdbApi
return AltusAdbApi(user=request.user, cluster_name=cluster_name, request=request)
elif interface == 'dataeng':
from notebook.connectors.dataeng import DataEngApi
return DataEngApi(user=request.user, request=request, cluster_name=cluster_name)
elif interface == 'jdbc':
if interpreter['options'] and interpreter['options'].get('url', '').find('teradata') >= 0:
from notebook.connectors.jdbc_teradata import JdbcApiTeradata
return JdbcApiTeradata(request.user, interpreter=interpreter)
elif interpreter['options'] and interpreter['options'].get('url', '').find('presto') >= 0:
from notebook.connectors.jdbc_presto import JdbcApiPresto
return JdbcApiPresto(request.user, interpreter=interpreter)
elif interpreter['options'] and interpreter['options'].get('url', '').find('clickhouse') >= 0:
from notebook.connectors.jdbc_clickhouse import JdbcApiClickhouse
return JdbcApiClickhouse(request.user, interpreter=interpreter)
else:
from notebook.connectors.jdbc import JdbcApi
return JdbcApi(request.user, interpreter=interpreter)
elif interface == 'teradata':
from notebook.connectors.jdbc import JdbcApiTeradata
return JdbcApiTeradata(request.user, interpreter=interpreter)
elif interface == 'presto':
from notebook.connectors.jdbc_presto import JdbcApiPresto
return JdbcApiPresto(request.user, interpreter=interpreter)
elif interface == 'sqlalchemy':
from notebook.connectors.sqlalchemyapi import SqlAlchemyApi
return SqlAlchemyApi(request.user, interpreter=interpreter)
elif interface == 'solr':
from notebook.connectors.solr import SolrApi
return SolrApi(request.user, interpreter=interpreter)
elif interface == 'hbase':
from notebook.connectors.hbase import HBaseApi
return HBaseApi(request.user)
elif interface == 'kafka':
from notebook.connectors.kafka import KafkaApi
return KafkaApi(request.user)
elif interface == 'pig':
return OozieApi(user=request.user, request=request) # Backward compatibility until Hue 4
else:
raise PopupException(_('Notebook connector interface not recognized: %s') % interface)
def _get_snippet_session(notebook, snippet):
session = [session for session in notebook['sessions'] if session['type'] == snippet['type']]
if not session:
raise SessionExpired()
else:
return session[0]
# Base API
class Api(object):
def __init__(self, user, interpreter=None, request=None, cluster=None, query_server=None):
self.user = user
self.interpreter = interpreter
self.request = request
self.cluster = cluster
self.query_server = query_server
def create_session(self, lang, properties=None):
return {
'type': lang,
'id': None,
'properties': properties if not None else []
}
def close_session(self, session):
pass
def fetch_result(self, notebook, snippet, rows, start_over):
pass
def fetch_result_size(self, notebook, snippet):
raise OperationNotSupported()
def download(self, notebook, snippet, format, user_agent=None, max_rows=None, store_data_type_in_header=False):
pass
def get_log(self, notebook, snippet, startFrom=None, size=None):
return 'No logs'
def autocomplete(self, snippet, database=None, table=None, column=None, nested=None):
return {}
def progress(self, snippet, logs=None):
return 50
def get_jobs(self, notebook, snippet, logs):
return []
def get_sample_data(self, snippet, database=None, table=None, column=None, async=False, operation=None): raise NotImplementedError()
def export_data_as_hdfs_file(self, snippet, target_file, overwrite): raise NotImplementedError()
def export_data_as_table(self, notebook, snippet, destination, is_temporary=False, location=None): raise NotImplementedError()
def export_large_data_to_hdfs(self, notebook, snippet, destination): raise NotImplementedError()
def statement_risk(self, notebook, snippet): raise NotImplementedError()
def statement_compatibility(self, notebook, snippet, source_platform, target_platform): raise NotImplementedError()
def statement_similarity(self, notebook, snippet, source_platform, target_platform): raise NotImplementedError()
def _get_snippet_name(notebook, unique=False, table_format=False):
name = (('%(name)s' + ('-%(id)s' if unique else '') if notebook.get('name') else '%(type)s-%(id)s') % notebook)
if table_format:
name = re.sub('[-|\s:]', '_', name)
return name
| 32.690678
| 184
| 0.663383
|
79524a683cc4cfed3e8143de34ed0d5259d60a0a
| 21,312
|
py
|
Python
|
third_party/protobuf/src/gtest/test/run_tests_test.py
|
apkbox/nano-rpc
|
d9d253b104399b443fb377cac2856b8997b1dab6
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/protobuf/src/gtest/test/run_tests_test.py
|
apkbox/nano-rpc
|
d9d253b104399b443fb377cac2856b8997b1dab6
|
[
"BSD-3-Clause"
] | null | null | null |
third_party/protobuf/src/gtest/test/run_tests_test.py
|
apkbox/nano-rpc
|
d9d253b104399b443fb377cac2856b8997b1dab6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for run_tests.py test runner script."""
__author__ = 'vladl@google.com (Vlad Losev)'
import os
import re
import sets
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), os.pardir))
import run_tests
GTEST_DBG_DIR = 'scons/build/dbg/scons'
GTEST_OPT_DIR = 'scons/build/opt/scons'
GTEST_OTHER_DIR = 'scons/build/other/scons'
def AddExeExtension(path):
"""Appends .exe to the path on Windows or Cygwin."""
if run_tests.IS_WINDOWS or run_tests.IS_CYGWIN:
return path + '.exe'
else:
return path
class FakePath(object):
"""A fake os.path module for testing."""
def __init__(self, current_dir=os.getcwd(), known_paths=None):
self.current_dir = current_dir
self.tree = {}
self.path_separator = os.sep
# known_paths contains either absolute or relative paths. Relative paths
# are absolutized with self.current_dir.
if known_paths:
self._AddPaths(known_paths)
def _AddPath(self, path):
ends_with_slash = path.endswith('/')
path = self.abspath(path)
if ends_with_slash:
path += self.path_separator
name_list = path.split(self.path_separator)
tree = self.tree
for name in name_list[:-1]:
if not name:
continue
if name in tree:
tree = tree[name]
else:
tree[name] = {}
tree = tree[name]
name = name_list[-1]
if name:
if name in tree:
assert tree[name] == 1
else:
tree[name] = 1
def _AddPaths(self, paths):
for path in paths:
self._AddPath(path)
def PathElement(self, path):
"""Returns an internal representation of directory tree entry for path."""
tree = self.tree
name_list = self.abspath(path).split(self.path_separator)
for name in name_list:
if not name:
continue
tree = tree.get(name, None)
if tree is None:
break
return tree
def normpath(self, path):
return os.path.normpath(path)
def abspath(self, path):
return self.normpath(os.path.join(self.current_dir, path))
def isfile(self, path):
return self.PathElement(self.abspath(path)) == 1
def isdir(self, path):
return type(self.PathElement(self.abspath(path))) == type(dict())
def basename(self, path):
return os.path.basename(path)
def dirname(self, path):
return os.path.dirname(path)
def join(self, *kargs):
return os.path.join(*kargs)
class FakeOs(object):
"""A fake os module for testing."""
P_WAIT = os.P_WAIT
def __init__(self, fake_path_module):
self.path = fake_path_module
# Some methods/attributes are delegated to the real os module.
self.environ = os.environ
def listdir(self, path):
assert self.path.isdir(path)
return self.path.PathElement(path).iterkeys()
def spawnv(self, wait, executable, *kargs):
assert wait == FakeOs.P_WAIT
return self.spawn_impl(executable, kargs)
class GetTestsToRunTest(unittest.TestCase):
"""Exercises TestRunner.GetTestsToRun."""
def NormalizeGetTestsToRunResults(self, results):
"""Normalizes path data returned from GetTestsToRun for comparison."""
def NormalizePythonTestPair(pair):
"""Normalizes path data in the (directory, python_script) pair."""
return (os.path.normpath(pair[0]), os.path.normpath(pair[1]))
def NormalizeBinaryTestPair(pair):
"""Normalizes path data in the (directory, binary_executable) pair."""
directory, executable = map(os.path.normpath, pair)
# On Windows and Cygwin, the test file names have the .exe extension, but
# they can be invoked either by name or by name+extension. Our test must
# accommodate both situations.
if run_tests.IS_WINDOWS or run_tests.IS_CYGWIN:
executable = re.sub(r'\.exe$', '', executable)
return (directory, executable)
python_tests = sets.Set(map(NormalizePythonTestPair, results[0]))
binary_tests = sets.Set(map(NormalizeBinaryTestPair, results[1]))
return (python_tests, binary_tests)
def AssertResultsEqual(self, results, expected):
"""Asserts results returned by GetTestsToRun equal to expected results."""
self.assertEqual(self.NormalizeGetTestsToRunResults(results),
self.NormalizeGetTestsToRunResults(expected),
'Incorrect set of tests returned:\n%s\nexpected:\n%s' %
(results, expected))
def setUp(self):
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests.__file__)),
known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
'test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests.TestRunner(injected_os=self.fake_os,
injected_subprocess=None,
injected_script_dir='.')
def testBinaryTestsOnly(self):
"""Exercises GetTestsToRun with parameters designating binary tests only."""
# A default build.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# An explicitly specified directory.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# A particular configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'other',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_OTHER_DIR, GTEST_OTHER_DIR + '/gtest_unittest')]))
# All available configurations
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'all',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# All built configurations (unbuilt don't cause failure).
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
True,
available_configurations=self.fake_configurations + ['unbuilt']),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# A combination of an explicit directory and a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'opt',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
# Same test specified in an explicit directory and via a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'dbg',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# All built configurations + explicit directory + explicit configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_unittest'],
'opt',
True,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest'),
(GTEST_OPT_DIR, GTEST_OPT_DIR + '/gtest_unittest')]))
def testPythonTestsOnly(self):
"""Exercises GetTestsToRun with parameters designating Python tests only."""
# A default build.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# An explicitly specified directory.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'test/gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# A particular configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'other',
False,
available_configurations=self.fake_configurations),
([(GTEST_OTHER_DIR, 'test/gtest_color_test.py')],
[]))
# All available configurations
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['test/gtest_color_test.py'],
'all',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# All built configurations (unbuilt don't cause failure).
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
True,
available_configurations=self.fake_configurations + ['unbuilt']),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# A combination of an explicit directory and a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'opt',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
# Same test specified in an explicit directory and via a configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'dbg',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
# All built configurations + explicit directory + explicit configuration.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[GTEST_DBG_DIR, 'gtest_color_test.py'],
'opt',
True,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py'),
(GTEST_OPT_DIR, 'test/gtest_color_test.py')],
[]))
def testCombinationOfBinaryAndPythonTests(self):
"""Exercises GetTestsToRun with mixed binary/Python tests."""
# Use only default configuration for this test.
# Neither binary nor Python tests are specified so find all.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying both binary and Python tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest', 'gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying binary tests suppresses Python tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]))
# Specifying Python tests suppresses binary tests.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]))
def testIgnoresNonTestFiles(self):
"""Verifies that GetTestsToRun ignores non-test files in the filesystem."""
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests.__file__)),
known_paths=[AddExeExtension(GTEST_DBG_DIR + '/gtest_nontest'),
'test/']))
self.test_runner = run_tests.TestRunner(injected_os=self.fake_os,
injected_subprocess=None,
injected_script_dir='.')
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
True,
available_configurations=self.fake_configurations),
([], []))
def testWorksFromDifferentDir(self):
"""Exercises GetTestsToRun from a directory different from run_test.py's."""
# Here we simulate an test script in directory /d/ called from the
# directory /a/b/c/.
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath('/a/b/c'),
known_paths=[
'/a/b/c/',
AddExeExtension('/d/' + GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension('/d/' + GTEST_OPT_DIR + '/gtest_unittest'),
'/d/test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests.TestRunner(injected_os=self.fake_os,
injected_subprocess=None,
injected_script_dir='/d/')
# A binary test.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_unittest'],
'',
False,
available_configurations=self.fake_configurations),
([],
[('/d/' + GTEST_DBG_DIR, '/d/' + GTEST_DBG_DIR + '/gtest_unittest')]))
# A Python test.
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
['gtest_color_test.py'],
'',
False,
available_configurations=self.fake_configurations),
([('/d/' + GTEST_DBG_DIR, '/d/test/gtest_color_test.py')], []))
def testNonTestBinary(self):
"""Exercises GetTestsToRun with a non-test parameter."""
self.assert_(
not self.test_runner.GetTestsToRun(
['gtest_unittest_not_really'],
'',
False,
available_configurations=self.fake_configurations))
def testNonExistingPythonTest(self):
"""Exercises GetTestsToRun with a non-existent Python test parameter."""
self.assert_(
not self.test_runner.GetTestsToRun(
['nonexistent_test.py'],
'',
False,
available_configurations=self.fake_configurations))
if run_tests.IS_WINDOWS or run_tests.IS_CYGWIN:
def testDoesNotPickNonExeFilesOnWindows(self):
"""Verifies that GetTestsToRun does not find _test files on Windows."""
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests.__file__)),
known_paths=['/d/' + GTEST_DBG_DIR + '/gtest_test', 'test/']))
self.test_runner = run_tests.TestRunner(injected_os=self.fake_os,
injected_subprocess=None,
injected_script_dir='.')
self.AssertResultsEqual(
self.test_runner.GetTestsToRun(
[],
'',
True,
available_configurations=self.fake_configurations),
([], []))
class RunTestsTest(unittest.TestCase):
"""Exercises TestRunner.RunTests."""
def SpawnSuccess(self, unused_executable, unused_argv):
"""Fakes test success by returning 0 as an exit code."""
self.num_spawn_calls += 1
return 0
def SpawnFailure(self, unused_executable, unused_argv):
"""Fakes test success by returning 1 as an exit code."""
self.num_spawn_calls += 1
return 1
def setUp(self):
self.fake_os = FakeOs(FakePath(
current_dir=os.path.abspath(os.path.dirname(run_tests.__file__)),
known_paths=[
AddExeExtension(GTEST_DBG_DIR + '/gtest_unittest'),
AddExeExtension(GTEST_OPT_DIR + '/gtest_unittest'),
'test/gtest_color_test.py']))
self.fake_configurations = ['dbg', 'opt']
self.test_runner = run_tests.TestRunner(injected_os=self.fake_os,
injected_subprocess=None)
self.num_spawn_calls = 0 # A number of calls to spawn.
def testRunPythonTestSuccess(self):
"""Exercises RunTests to handle a Python test success."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]),
0)
self.assertEqual(self.num_spawn_calls, 1)
def testRunBinaryTestSuccess(self):
"""Exercises RunTests to handle a binary test success."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 1)
def testRunPythonTestFauilure(self):
"""Exercises RunTests to handle a Python test failure."""
self.fake_os.spawn_impl = self.SpawnFailure
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, 'test/gtest_color_test.py')],
[]),
1)
self.assertEqual(self.num_spawn_calls, 1)
def testRunBinaryTestFailure(self):
"""Exercises RunTests to handle a binary test failure."""
self.fake_os.spawn_impl = self.SpawnFailure
self.assertEqual(
self.test_runner.RunTests(
[],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
1)
self.assertEqual(self.num_spawn_calls, 1)
def testCombinedTestSuccess(self):
"""Exercises RunTests to handle a success of both Python and binary test."""
self.fake_os.spawn_impl = self.SpawnSuccess
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 2)
def testCombinedTestSuccessAndFailure(self):
"""Exercises RunTests to handle a success of both Python and binary test."""
def SpawnImpl(executable, argv):
self.num_spawn_calls += 1
# Simulates failure of a Python test and success of a binary test.
if '.py' in executable or '.py' in argv[0]:
return 1
else:
return 0
self.fake_os.spawn_impl = SpawnImpl
self.assertEqual(
self.test_runner.RunTests(
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')],
[(GTEST_DBG_DIR, GTEST_DBG_DIR + '/gtest_unittest')]),
0)
self.assertEqual(self.num_spawn_calls, 2)
if __name__ == '__main__':
unittest.main()
| 34.653659
| 80
| 0.637669
|
79524ad739458878d268280471fd6bc0f3019965
| 6,586
|
py
|
Python
|
S7/python_files/model.py
|
garima-mahato/EVA4
|
e8efe40a4de2d5da3a04314f52a02610ecde16f1
|
[
"MIT"
] | null | null | null |
S7/python_files/model.py
|
garima-mahato/EVA4
|
e8efe40a4de2d5da3a04314f52a02610ecde16f1
|
[
"MIT"
] | null | null | null |
S7/python_files/model.py
|
garima-mahato/EVA4
|
e8efe40a4de2d5da3a04314f52a02610ecde16f1
|
[
"MIT"
] | 1
|
2020-05-02T17:15:22.000Z
|
2020-05-02T17:15:22.000Z
|
from __future__ import print_function
import math
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR
import matplotlib.pyplot as plt
from torchsummary import summary
from tqdm import tqdm
class SeparableConv2d(nn.Module):
def __init__(self,in_channels,out_channels,kernel_size=1,stride=1,padding=0,dilation=1,bias=False):
super(SeparableConv2d,self).__init__()
self.conv1 = nn.Conv2d(in_channels,in_channels,kernel_size,stride,padding,dilation,groups=in_channels,bias=bias)
self.pointwise = nn.Conv2d(in_channels,out_channels,1,1,0,1,1,bias=bias)
def forward(self,x):
x = self.conv1(x)
x = self.pointwise(x)
return x
class Net(nn.Module):
def __init__(self, dropout_value):
super(Net, self).__init__()
self.dropout_value = dropout_value
self.num_of_channels = 3
# Input Block
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=self.num_of_channels, out_channels=32, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Dropout(self.dropout_value)
) # input_size = 32x32x3, output_size = 32x32x32, RF = 3x3
# CONVOLUTION BLOCK 1
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(self.dropout_value)
) # input_size = 32x32x32, output_size = 32x32x64, RF = 5x5
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Dropout(self.dropout_value)
) # input_size = 32x32x64, output_size = 32x32x128, RF = 7x7
# TRANSITION BLOCK 1
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=32, kernel_size=(1, 1), padding=0, bias=False),
) # input_size = 32x32x128, output_size = 32x32x32, RF = 7x7
self.pool1 = nn.MaxPool2d(2, 2) # input_size = 32x32x32, output_size = 16x16x32, RF = 8x8
# CONVOLUTION BLOCK 2
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(self.dropout_value)
) # input_size = 16x16x32, output_size = 16x16x64, RF = 12x12
self.convblock6 = SeparableConv2d(in_channels=64, out_channels=128, kernel_size=1, stride=1, padding=1, dilation=1, bias=False)
# input_size = 16x16x64, output_size = 16x16x128, RF = 16x16
# TRANSITION BLOCK 2
self.convblock7 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=64, kernel_size=(1, 1), padding=0, bias=False),
) # input_size = 16x16x128, output_size = 16x16x64, RF = 16x16
self.pool2 = nn.MaxPool2d(2, 2) # input_size = 16x16x64, output_size = 8x8x64, RF = 18x18
# CONVOLUTION BLOCK 3
self.convblock8 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=2, dilation=2, bias=False),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Dropout(self.dropout_value)
) # input_size = 8x8x64, output_size = 8x8x128, RF = [18+(5-1)*4] = 34x34
self.convblock9 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Dropout(self.dropout_value)
) # input_size = 8x8x128, output_size = 8x8x256, RF = [34+(3-1)*4] = 42x42
# TRANSITION BLOCK 3
self.convblock10 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=(1, 1), padding=0, bias=False),
) # input_size = 8x8x256, output_size = 8x8x64, RF = 42x42
self.pool3 = nn.MaxPool2d(2, 2) # input_size = 8x8x64, output_size = 4x4x64, RF = 42+(2-1)*4 = 46x46
# CONVOLUTION BLOCK 4
self.convblock11 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(128),
nn.Dropout(self.dropout_value)
) # input_size = 4x4x64, output_size = 4x4x128, RF = 46+(3-1)*8 = 62x62
self.convblock12 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=(3, 3), padding=1, bias=False),
nn.ReLU(),
nn.BatchNorm2d(256),
nn.Dropout(self.dropout_value)
) # input_size = 4x4x128, output_size = 4x4x256, RF = 62+(3-1)*8 = 78x78
# OUTPUT BLOCK
self.gap = nn.Sequential(
nn.AvgPool2d(kernel_size=4)
) # input_size = 4x4x256, output_size = 1x1x256, RF = 78+(4-1)*8 = 102x102
self.convblock13 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=64, kernel_size=(1, 1), padding=0, bias=False)
) # input_size = 1x1x256, output_size = 1x1x64, RF = 102x102
self.convblock14 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(1, 1), padding=0, bias=False)
) # input_size = 1x1x64, output_size = 1x1x32, RF = 102x102
self.convblock15 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=10, kernel_size=(1, 1), padding=0, bias=False)
) # input_size = 1x1x32, output_size = 1x1x10, RF = 102x102
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.convblock3(x)
x = self.convblock4(x)
x = self.pool1(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.convblock7(x)
x = self.pool2(x)
x = self.convblock8(x)
x = self.convblock9(x)
x = self.convblock10(x)
x = self.pool3(x)
x = self.convblock11(x)
x = self.convblock12(x)
x = self.gap(x)
x = self.convblock13(x)
x = self.convblock14(x)
x = self.convblock15(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
| 42.766234
| 136
| 0.600668
|
79524b60de15e6362b16730d5d05073917209179
| 4,829
|
py
|
Python
|
p3_collab-compet/maddpg_agentlab.py
|
francelico/deep-reinforcement-learning
|
65ea99a4ecf527cb9d8a703313da4815f4688ff0
|
[
"MIT"
] | null | null | null |
p3_collab-compet/maddpg_agentlab.py
|
francelico/deep-reinforcement-learning
|
65ea99a4ecf527cb9d8a703313da4815f4688ff0
|
[
"MIT"
] | 6
|
2020-01-28T22:59:05.000Z
|
2022-02-10T01:12:12.000Z
|
p3_collab-compet/maddpg_agentlab.py
|
francelico/deep-reinforcement-learning
|
65ea99a4ecf527cb9d8a703313da4815f4688ff0
|
[
"MIT"
] | null | null | null |
# main code that contains the neural network setup
# policy + critic updates
# see ddpg.py for other details in the network
from ddpg import DDPGAgent
import torch
from utilities import soft_update, transpose_to_tensor, transpose_list
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = 'cpu'
class MADDPG:
def __init__(self, discount_factor=0.95, tau=0.02):
super(MADDPG, self).__init__()
# critic input = obs_full + actions = 14+2+2+2=20
self.maddpg_agent = [DDPGAgent(14, 16, 8, 2, 20, 32, 16),
DDPGAgent(14, 16, 8, 2, 20, 32, 16),
DDPGAgent(14, 16, 8, 2, 20, 32, 16)]
self.discount_factor = discount_factor
self.tau = tau
self.iter = 0
def get_actors(self):
"""get actors of all the agents in the MADDPG object"""
actors = [ddpg_agent.actor for ddpg_agent in self.maddpg_agent]
return actors
def get_target_actors(self):
"""get target_actors of all the agents in the MADDPG object"""
target_actors = [ddpg_agent.target_actor for ddpg_agent in self.maddpg_agent]
return target_actors
def act(self, obs_all_agents, noise=0.0):
"""get actions from all agents in the MADDPG object"""
actions = [agent.act(obs, noise) for agent, obs in zip(self.maddpg_agent, obs_all_agents)]
return actions
def target_act(self, obs_all_agents, noise=0.0):
"""get target network actions from all the agents in the MADDPG object """
target_actions = [ddpg_agent.target_act(obs, noise) for ddpg_agent, obs in zip(self.maddpg_agent, obs_all_agents)]
return target_actions
def update(self, samples, agent_number, logger):
"""update the critics and actors of all the agents """
# need to transpose each element of the samples
# to flip obs[parallel_agent][agent_number] to
# obs[agent_number][parallel_agent]
obs, obs_full, action, reward, next_obs, next_obs_full, done = map(transpose_to_tensor, samples)
obs_full = torch.stack(obs_full)
next_obs_full = torch.stack(next_obs_full)
agent = self.maddpg_agent[agent_number]
agent.critic_optimizer.zero_grad()
#critic loss = batch mean of (y- Q(s,a) from target network)^2
#y = reward of this timestep + discount * Q(st+1,at+1) from target network
target_actions = self.target_act(next_obs)
target_actions = torch.cat(target_actions, dim=1)
target_critic_input = torch.cat((next_obs_full.t(),target_actions), dim=1).to(device)
with torch.no_grad():
q_next = agent.target_critic(target_critic_input)
y = reward[agent_number].view(-1, 1) + self.discount_factor * q_next * (1 - done[agent_number].view(-1, 1))
action = torch.cat(action, dim=1)
critic_input = torch.cat((obs_full.t(), action), dim=1).to(device)
q = agent.critic(critic_input)
huber_loss = torch.nn.SmoothL1Loss()
critic_loss = huber_loss(q, y.detach())
critic_loss.backward()
#torch.nn.utils.clip_grad_norm_(agent.critic.parameters(), 0.5)
agent.critic_optimizer.step()
#update actor network using policy gradient
agent.actor_optimizer.zero_grad()
# make input to agent
# detach the other agents to save computation
# saves some time for computing derivative
q_input = [ self.maddpg_agent[i].actor(ob) if i == agent_number \
else self.maddpg_agent[i].actor(ob).detach()
for i, ob in enumerate(obs) ]
q_input = torch.cat(q_input, dim=1)
# combine all the actions and observations for input to critic
# many of the obs are redundant, and obs[1] contains all useful information already
q_input2 = torch.cat((obs_full.t(), q_input), dim=1)
# get the policy gradient
actor_loss = -agent.critic(q_input2).mean()
actor_loss.backward()
#torch.nn.utils.clip_grad_norm_(agent.actor.parameters(),0.5)
agent.actor_optimizer.step()
al = actor_loss.cpu().detach().item()
cl = critic_loss.cpu().detach().item()
logger.add_scalars('agent%i/losses' % agent_number,
{'critic loss': cl,
'actor_loss': al},
self.iter)
def update_targets(self):
"""soft update targets"""
self.iter += 1
for ddpg_agent in self.maddpg_agent:
soft_update(ddpg_agent.target_actor, ddpg_agent.actor, self.tau)
soft_update(ddpg_agent.target_critic, ddpg_agent.critic, self.tau)
| 39.581967
| 122
| 0.625388
|
79524b941919fc9fe975167cb7a6682bf87cd63e
| 4,989
|
py
|
Python
|
test/vanilla/legacy/Expected/AcceptanceTests/BodyBinary/bodybinary/aio/operations/_upload_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/legacy/Expected/AcceptanceTests/BodyBinary/bodybinary/aio/operations/_upload_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | null | null | null |
test/vanilla/legacy/Expected/AcceptanceTests/BodyBinary/bodybinary/aio/operations/_upload_operations.py
|
cfculhane/autorest.python
|
8cbca95faee88d933a58bbbd17b76834faa8d387
|
[
"MIT"
] | 1
|
2022-03-28T08:58:03.000Z
|
2022-03-28T08:58:03.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, IO, Optional, TypeVar
import warnings
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from ..._vendor import _convert_request
from ...operations._upload_operations import build_binary_request, build_file_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UploadOperations:
"""UploadOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def file(self, file_param: IO, **kwargs: Any) -> None:
"""Uploading json file.
:param file_param: JSON file with payload { "more": "cowbell" }.
:type file_param: IO
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/json") # type: Optional[str]
_content = file_param
request = build_file_request(
content_type=content_type,
content=_content,
template_url=self.file.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
file.metadata = {"url": "/binary/file"} # type: ignore
@distributed_trace_async
async def binary(self, file_param: IO, **kwargs: Any) -> None:
"""Uploading binary file.
:param file_param: Non-empty binary file.
:type file_param: IO
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop("cls", None) # type: ClsType[None]
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}))
content_type = kwargs.pop("content_type", "application/octet-stream") # type: Optional[str]
_content = file_param
request = build_binary_request(
content_type=content_type,
content=_content,
template_url=self.binary.metadata["url"],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
binary.metadata = {"url": "/binary/octet"} # type: ignore
| 39.595238
| 106
| 0.670876
|
79524c05c8c18ef0440cd7afdeb79eec47f39750
| 408
|
py
|
Python
|
GCL/losses/losses.py
|
lem0nle/PyGCL
|
340b0201a5edf4236fef4c96b958ff373ceb7f28
|
[
"Apache-2.0"
] | 361
|
2021-07-09T15:15:23.000Z
|
2022-03-30T07:08:10.000Z
|
GCL/losses/losses.py
|
lem0nle/PyGCL
|
340b0201a5edf4236fef4c96b958ff373ceb7f28
|
[
"Apache-2.0"
] | 25
|
2021-08-21T11:06:26.000Z
|
2022-03-29T02:51:56.000Z
|
GCL/losses/losses.py
|
lem0nle/PyGCL
|
340b0201a5edf4236fef4c96b958ff373ceb7f28
|
[
"Apache-2.0"
] | 45
|
2021-08-07T02:59:45.000Z
|
2022-03-29T05:07:17.000Z
|
import torch
from abc import ABC, abstractmethod
class Loss(ABC):
@abstractmethod
def compute(self, anchor, sample, pos_mask, neg_mask, *args, **kwargs) -> torch.FloatTensor:
pass
def __call__(self, anchor, sample, pos_mask=None, neg_mask=None, *args, **kwargs) -> torch.FloatTensor:
loss = self.compute(anchor, sample, pos_mask, neg_mask, *args, **kwargs)
return loss
| 31.384615
| 107
| 0.683824
|
79524caab16dd28e6b2536bf40a39304e7d920cf
| 516
|
py
|
Python
|
tests/test_python.py
|
jayvdb/dephell_pythons
|
17afb28bd89652718410998dcd13bfdde9dfd865
|
[
"MIT"
] | 2
|
2019-03-20T14:39:55.000Z
|
2019-04-21T15:48:36.000Z
|
tests/test_python.py
|
jayvdb/dephell_pythons
|
17afb28bd89652718410998dcd13bfdde9dfd865
|
[
"MIT"
] | 4
|
2019-07-24T13:51:09.000Z
|
2020-05-28T14:28:14.000Z
|
tests/test_python.py
|
jayvdb/dephell_pythons
|
17afb28bd89652718410998dcd13bfdde9dfd865
|
[
"MIT"
] | 3
|
2019-12-19T01:38:56.000Z
|
2021-09-28T02:43:43.000Z
|
import sys
from dephell_pythons import Pythons
def test_lib_paths():
p = Pythons().current
assert len(p.lib_paths) > 2
# appveyor makes some hacks for python path
libs_found = {path for path in sys.path if 'Scripts' not in path and 'dephell' not in path}
libs_real = {str(path) for path in p.lib_paths if 'Scripts' not in str(path) and 'dephell' not in str(path)}
assert libs_found == libs_real
def test_lib_path():
p = Pythons().current
assert 'site-packages' in str(p.lib_path)
| 30.352941
| 112
| 0.699612
|
79524cdfda340a378ede662e359fa940dbd9655a
| 951
|
py
|
Python
|
setup.py
|
Arfaouim/moire-lattice-generator
|
c46065d27f64010d4bf4c8b0209c27369c316e1e
|
[
"MIT"
] | 1
|
2021-06-24T13:59:00.000Z
|
2021-06-24T13:59:00.000Z
|
setup.py
|
Arfaouim/moire-lattice-generator
|
c46065d27f64010d4bf4c8b0209c27369c316e1e
|
[
"MIT"
] | null | null | null |
setup.py
|
Arfaouim/moire-lattice-generator
|
c46065d27f64010d4bf4c8b0209c27369c316e1e
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name="latticegen",
version="0.0.1",
author="T.A. de Jong",
author_email="tobiasadejong@gmail.com",
description="A small package to create images of atomic lattices",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/TAdejong/moire-lattice-generator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
python_requires='>=3.7',
install_requires=requirements,
test_requires=[
'pytest',
"hypothesis",
],
test_suite="pytest",
)
| 28.818182
| 70
| 0.6551
|
79524d9ebddca959d8ecc93fa7911704399810b4
| 826
|
py
|
Python
|
com/jefferson/bd.py
|
jefferson-paixao/devops_ac03_testes
|
a069be042de162ea19e973a157280b16d1c60d12
|
[
"Apache-2.0"
] | null | null | null |
com/jefferson/bd.py
|
jefferson-paixao/devops_ac03_testes
|
a069be042de162ea19e973a157280b16d1c60d12
|
[
"Apache-2.0"
] | null | null | null |
com/jefferson/bd.py
|
jefferson-paixao/devops_ac03_testes
|
a069be042de162ea19e973a157280b16d1c60d12
|
[
"Apache-2.0"
] | null | null | null |
class bancodedados():
def criando_tabela(self,coluna,campo,valor):
import sqlite3
conn = sqlite3.connect(':memory:')
c = conn.cursor()
with open('criando_tabela.sql', 'r') as content_file:
content = content_file.read()
c.execute(content)
with open('copiando_tabela.sql', 'r') as content_file:
content = content_file.read()
c.execute(content)
c.execute('SELECT * teste WHERE name = '+coluna)
for row in c.fetchall():
notnull = row[3]
pk = row[5]
conn.commit()
conn.close()
if campo == "pk":
print (pk == valor)
return pk == valor
elif campo == "notnull":
print (notnull == valor)
return notnull == valor
| 28.482759
| 62
| 0.521792
|
79524df2c022ab3e2c9a52de1ec6d746cdfffadf
| 4,179
|
py
|
Python
|
model_src/colour/bw_network/backward_model.py
|
imbyjuli/blackboard-tensorflow-nrp
|
93fb4b5ba18e5e3c55526670160ee3ec21626e43
|
[
"Apache-2.0"
] | null | null | null |
model_src/colour/bw_network/backward_model.py
|
imbyjuli/blackboard-tensorflow-nrp
|
93fb4b5ba18e5e3c55526670160ee3ec21626e43
|
[
"Apache-2.0"
] | null | null | null |
model_src/colour/bw_network/backward_model.py
|
imbyjuli/blackboard-tensorflow-nrp
|
93fb4b5ba18e5e3c55526670160ee3ec21626e43
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import numpy as np
import tensorflow as tf
import cv2
import csv
from backward_weights import init_locally_backward_weights ,initiate_backward_weights
from backward_layers import ait_backwards,pit_backwards,v4_backwards,v2_backwards, create_direction_for_eval
tf.logging.set_verbosity(tf.logging.INFO)
def ventral_feed_backward(features, labels, mode):
#==========================================================================================
####### weights and biases described in v_layers_weights
#==========================================================================================
n_field_1 = 40 * 40 * 3
use_sparse = True
input_layer_size = {
#"classes": tf.argmax(input=logits, axis=1),
"v2_1": (40,40),
"v2_2": (40,40),
"v4": (40,40),
"PIT": (40,40),
"AIT": (5,1)
}
for degrees in [0,45,90,135]:
input_layer_size ["v1_"+str(degrees)] = (40,40)
weights, bias = initiate_backward_weights(input_layer_size)
packed_backward_weights = init_locally_backward_weights(input_layer_size)
#==========================================================================================
####### initiation
#==========================================================================================
input_layers = {}
for key in input_layer_size.keys():
img_x,img_y = input_layer_size [key]
features_float = tf.cast(features[key], tf.float32)
input_layers[key] = tf.reshape(features_float, [-1, img_x*img_y * 1])
#print(input_layers.keys())
#==========================================================================================
####### layers described in v_layers
#==========================================================================================
AIT_b = ait_backwards(input_layers)
PIT_b = pit_backwards(AIT_b,input_layers,packed_backward_weights, weights,bias, use_sparse = use_sparse)
v4_b = v4_backwards(PIT_b,input_layers,packed_backward_weights,weights,bias, use_sparse = use_sparse)
v2_b = v2_backwards(v4_b,input_layers,packed_backward_weights,weights,bias, use_sparse = use_sparse)
# final_dense = tf.layers.dense(v2_backward)
logits = tf.layers.dense(inputs = v2_b, units = 40 * 40)
tf.summary.image("v2_b",tf.reshape(v2_b,[-1,40,40,1]),1)
tf.summary.image("logits",tf.reshape(logits,[-1,40,40,1]),1)
#==========================================================================================
####### Prediction with Tensorflow
#==========================================================================================
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
#"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
#sess.run(print_activation_dict(return_tensors))
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
#splitting into two lists along dimension 1
#x_coord_label,y_coord_label = tf.split(labels,2,1)
#AdagradOptimizer
#loss = eucledian_distance(x_coord_pred,x_coord_label,y_coord_pred,y_coord_label)
loss = tf.losses.mean_squared_error(
labels=labels , predictions=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
# change optimiser if wanted
optimizer = tf.train.AdagradOptimizer(learning_rate=0.05)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=create_direction_for_eval(labels), predictions=create_direction_for_eval(logits))}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
| 32.146154
| 108
| 0.581718
|
79524e14ef26f9e56c447ba4dd5e8b39e5a7bfac
| 873
|
py
|
Python
|
tests/test_jira_connection.py
|
Lee-Sutton/pyjournal
|
a91457ea327f7df3b865c3a8fce38dd580a4cb14
|
[
"MIT"
] | null | null | null |
tests/test_jira_connection.py
|
Lee-Sutton/pyjournal
|
a91457ea327f7df3b865c3a8fce38dd580a4cb14
|
[
"MIT"
] | 13
|
2019-03-30T01:52:51.000Z
|
2021-06-01T23:01:33.000Z
|
tests/test_jira_connection.py
|
Lee-Sutton/pyjournal
|
a91457ea327f7df3b865c3a8fce38dd580a4cb14
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
from unittest.mock import patch
from pyjournal.jira_connection import Jira
@patch('pyjournal.jira_connection.JIRA')
def test_jira(jira_api, fake):
url = fake.url()
email = fake.email()
password = 'password'
Jira(url, email, password)
jira_api.assert_called_with(url, auth=(email, password))
Sprint = namedtuple('Sprint', ['state', 'id'])
@patch('pyjournal.jira_connection.JIRA')
def test_active_issues(jira_api, fake):
url = fake.url()
email = fake.email()
password = 'password'
jira_api().sprints.return_value = [Sprint('ACTIVE', 1), Sprint('INACTIVE', 2)]
jira = Jira(url, email, password)
board_id = 2
issues = jira.active_issues(board_id)
assert issues is not None
jira_api().sprints.assert_called_with(board_id=board_id)
jira_api().search_issues.assert_called()
| 24.942857
| 82
| 0.706758
|
79524ecc2e51c375b32965f9dbd6cf66a839077d
| 3,510
|
py
|
Python
|
gilp/tests/test_graphic.py
|
xbrq/gilp
|
7fb1d2425d905aa43a5bcde25713b40878bc30d1
|
[
"MIT"
] | 18
|
2020-07-24T02:35:14.000Z
|
2022-01-28T18:51:46.000Z
|
gilp/tests/test_graphic.py
|
xbrq/gilp
|
7fb1d2425d905aa43a5bcde25713b40878bc30d1
|
[
"MIT"
] | 3
|
2021-01-25T11:18:45.000Z
|
2021-09-04T23:21:28.000Z
|
gilp/tests/test_graphic.py
|
xbrq/gilp
|
7fb1d2425d905aa43a5bcde25713b40878bc30d1
|
[
"MIT"
] | 3
|
2021-02-12T05:02:31.000Z
|
2021-09-04T22:29:58.000Z
|
import pytest
import numpy as np
import plotly.graph_objects as plt
from gilp._graphic import (Figure, num_format, linear_string, equation_string,
label)
# The following functions are not tested since they create visual objects:
# table, vector, scatter, line, equation, and polygon
@pytest.mark.parametrize("n,s",[
(1.0, '1'),
(1.0001, '1'),
(1.023, '1.023'),
(0.0, '0'),
(3.45667777, '3.457'),
(2.00000005, '2'),
(1.9999999, '2')])
def test_num_format(n,s):
assert num_format(n) == s
@pytest.mark.parametrize("A,i,c,s",[
(np.array([1,2,3]), [2,4,6], None,
'1x<sub>2</sub> + 2x<sub>4</sub> + 3x<sub>6</sub>'),
(np.array([1.000001,-1.9999999,0.00]), [1,2,3], None,
'1x<sub>1</sub> - 2x<sub>2</sub> + 0x<sub>3</sub>'),
(np.array([-1,-3,11]), [1,3,6], None,
'-1x<sub>1</sub> - 3x<sub>3</sub> + 11x<sub>6</sub>'),
(np.array([-3,4]), [1,3], 1,
'1 - 3x<sub>1</sub> + 4x<sub>3</sub>')])
def test_linear_string(A,i,c,s):
assert linear_string(A,i,c) == s
@pytest.mark.parametrize("A,b,comp,s",[
(np.array([1,2,3]), 4, " = ",
'1x<sub>1</sub> + 2x<sub>2</sub> + 3x<sub>3</sub> = 4'),
(np.array([2.8999999,1.66666,-3.33333]), 17, " ≤ ",
'2.9x<sub>1</sub> + 1.667x<sub>2</sub> - 3.333x<sub>3</sub> ≤ 17')])
def test_equation_string(A,b,comp,s):
assert equation_string(A,b,comp) == s
@pytest.mark.parametrize("d,s",[
(dict(BFS=[2.990,4.567,0.00001,1.0001],
B=[3,6,4],
Obj=1567.3456),
"<b>BFS</b>: (2.99, 4.567, 0, 1)<br>"
+ "<b>B</b>: (3, 6, 4)<br>"
+ "<b>Obj</b>: 1567.346"),
(dict(BFS=[1.0001,3.99999,0.00001,1.0001],
B=[4,5,6],
Obj=-17.8900),
"<b>BFS</b>: (1, 4, 0, 1)<br>"
+ "<b>B</b>: (4, 5, 6)<br>"
+ "<b>Obj</b>: -17.89")])
def test_label(d,s):
assert label(d) == s
def test_trace_map():
fig = Figure(subplots=False)
fig.add_trace(plt.Scatter(x=[1], y=[1]), name='abc3')
fig.add_trace(plt.Scatter(x=[2], y=[1]), name='abc1')
fig.add_trace(plt.Scatter(x=[1], y=[2]), name='test2')
fig.add_traces([plt.Scatter(x=[1], y=[2]),
plt.Scatter(x=[1], y=[2])], name='test4')
with pytest.raises(ValueError,match='.* trace name is already in use.'):
fig.add_trace(plt.Scatter(x=[1], y=[3]), name='test2')
assert fig.get_indices(name='test4') == [3,4]
assert fig.get_indices(name='abc1') == [1]
assert fig.get_indices(name='abc', containing=True) == [0,1]
assert fig.get_indices(name='test', containing=True) == [2,3,4]
# TODO: rework these test cases
# def test_axis_bad_inputs():
# fig = plt.Figure()
# with pytest.raises(ValueError, match='.*vectors of length 2 or 3'):
# st.set_axis_limits(fig,[np.array([[2],[3],[4],[5]])])
# with pytest.raises(ValueError, match='.*retrieve 2 or 3 axis limits'):
# st.get_axis_limits(fig,4)
# @pytest.mark.parametrize("x_list,n,limits",[
# ([np.array([[0],[1]]),
# np.array([[1],[1]]),
# np.array([[0.5],[1]]),
# np.array([[0.5],[0.5]]),
# np.array([[1],[2]])],
# 2,[1.3,2.6]),
# ([np.array([[0],[1],[1]]),
# np.array([[1],[0],[2]]),
# np.array([[0],[3],[1]]),
# np.array([[1],[1],[0]]),
# np.array([[0],[2],[1]])],
# 3,[1.3,3.9,2.6])])
# def test_axis_limits(x_list,n,limits):
# fig = plt.Figure()
# st.set_axis_limits(fig,x_list)
# assert np.allclose(st.get_axis_limits(fig,n),limits,atol=1e-7)
| 33.75
| 78
| 0.538462
|
79524fce4649054912206a3339e867a4b0d5ae8a
| 6,902
|
py
|
Python
|
tensorflow/python/data/experimental/kernel_tests/matching_files_dataset_test.py
|
AdaAlarm/tensorflow
|
e0db063159751276a92d88a4ad6d481b1199318c
|
[
"Apache-2.0"
] | 10
|
2021-05-25T17:43:04.000Z
|
2022-03-08T10:46:09.000Z
|
tensorflow/python/data/experimental/kernel_tests/matching_files_dataset_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 1,056
|
2019-12-15T01:20:31.000Z
|
2022-02-10T02:06:28.000Z
|
tensorflow/python/data/experimental/kernel_tests/matching_files_dataset_test.py
|
CaptainGizzy21/tensorflow
|
3457a2b122e50b4d44ceaaed5a663d635e5c22df
|
[
"Apache-2.0"
] | 6
|
2016-09-07T04:00:15.000Z
|
2022-01-12T01:47:38.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `MatchingFilesDataset`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import matching_files
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MatchingFilesDatasetTest(test_base.DatasetTestBase,
parameterized.TestCase):
def setUp(self):
super(MatchingFilesDatasetTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
super(MatchingFilesDatasetTest, self).tearDown()
def _touchTempFiles(self, filenames):
for filename in filenames:
open(os.path.join(self.tmp_dir, filename), 'a').close()
@combinations.generate(test_base.default_test_combinations())
def testNonExistingDirectory(self):
"""Test the MatchingFiles dataset with a non-existing directory."""
self.tmp_dir = os.path.join(self.tmp_dir, 'nonexistingdir')
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
@combinations.generate(test_base.default_test_combinations())
def testEmptyDirectory(self):
"""Test the MatchingFiles dataset with an empty directory."""
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset, expected_error=(errors.NotFoundError, ''))
@combinations.generate(test_base.default_test_combinations())
def testSimpleDirectory(self):
"""Test the MatchingFiles dataset with a simple directory."""
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames
],
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testFileSuffixes(self):
"""Test the MatchingFiles dataset using the suffixes of filename."""
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, '*.py'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:-1]
],
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testFileMiddles(self):
"""Test the MatchingFiles dataset using the middles of filename."""
filenames = ['aa.txt', 'bb.py', 'bbc.pyc', 'cc.pyc']
self._touchTempFiles(filenames)
dataset = matching_files.MatchingFilesDataset(
os.path.join(self.tmp_dir, 'b*.py*'))
self.assertDatasetProduces(
dataset,
expected_output=[
compat.as_bytes(os.path.join(self.tmp_dir, filename))
for filename in filenames[1:3]
],
assert_items_equal=True)
@combinations.generate(test_base.default_test_combinations())
def testNestedDirectories(self):
"""Test the MatchingFiles dataset with nested directories."""
filenames = []
width = 8
depth = 4
for i in range(width):
for j in range(depth):
new_base = os.path.join(self.tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
filenames.append(filename)
open(filename, 'w').close()
patterns = [
os.path.join(self.tmp_dir, os.path.join(*['**' for _ in range(depth)]),
suffix) for suffix in ['*.txt', '*.log']
]
dataset = matching_files.MatchingFilesDataset(patterns)
next_element = self.getNext(dataset)
expected_filenames = [
compat.as_bytes(filename)
for filename in filenames
if filename.endswith('.txt') or filename.endswith('.log')
]
actual_filenames = []
while True:
try:
actual_filenames.append(compat.as_bytes(self.evaluate(next_element())))
except errors.OutOfRangeError:
break
self.assertItemsEqual(expected_filenames, actual_filenames)
class MatchingFilesDatasetCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
def _build_iterator_graph(self, test_patterns):
return matching_files.MatchingFilesDataset(test_patterns)
@combinations.generate(test_base.default_test_combinations())
def testMatchingFilesCore(self):
tmp_dir = tempfile.mkdtemp()
width = 16
depth = 8
for i in range(width):
for j in range(depth):
new_base = os.path.join(tmp_dir, str(i),
*[str(dir_name) for dir_name in range(j)])
if not os.path.exists(new_base):
os.makedirs(new_base)
child_files = ['a.py', 'b.pyc'] if j < depth - 1 else ['c.txt', 'd.log']
for f in child_files:
filename = os.path.join(new_base, f)
open(filename, 'w').close()
patterns = [
os.path.join(tmp_dir, os.path.join(*['**'
for _ in range(depth)]), suffix)
for suffix in ['*.txt', '*.log']
]
num_outputs = width * len(patterns)
self.run_core_tests(lambda: self._build_iterator_graph(patterns),
num_outputs)
shutil.rmtree(tmp_dir, ignore_errors=True)
if __name__ == '__main__':
test.main()
| 35.035533
| 80
| 0.67111
|
79524fe027206757c991850457d43905974adb25
| 40,616
|
py
|
Python
|
elephant/spike_train_generation.py
|
healther/elephant
|
b4525dcabc1f4c2204479f580100151eaebd8603
|
[
"BSD-3-Clause"
] | null | null | null |
elephant/spike_train_generation.py
|
healther/elephant
|
b4525dcabc1f4c2204479f580100151eaebd8603
|
[
"BSD-3-Clause"
] | null | null | null |
elephant/spike_train_generation.py
|
healther/elephant
|
b4525dcabc1f4c2204479f580100151eaebd8603
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Functions to generate spike trains from analog signals,
or to generate random spike trains.
Some functions are based on the NeuroTools stgen module, which was mostly
written by Eilif Muller, or from the NeuroTools signals.analogs module.
:copyright: Copyright 2015 by the Elephant team, see AUTHORS.txt.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division
import numpy as np
from quantities import ms, mV, Hz, Quantity, dimensionless
from neo import SpikeTrain
import random
from elephant.spike_train_surrogates import dither_spike_train
import warnings
def spike_extraction(signal, threshold=0.0 * mV, sign='above',
time_stamps=None, extr_interval=(-2 * ms, 4 * ms)):
"""
Return the peak times for all events that cross threshold and the
waveforms. Usually used for extracting spikes from a membrane
potential to calculate waveform properties.
Similar to spike_train_generation.peak_detection.
Parameters
----------
signal : neo AnalogSignal object
'signal' is an analog signal.
threshold : A quantity, e.g. in mV
'threshold' contains a value that must be reached for an event
to be detected. Default: 0.0 * mV.
sign : 'above' or 'below'
'sign' determines whether to count thresholding crossings
that cross above or below the threshold. Default: 'above'.
time_stamps: None, quantity array or Object with .times interface
if 'spike_train' is a quantity array or exposes a quantity array
exposes the .times interface, it provides the time_stamps
around which the waveform is extracted. If it is None, the
function peak_detection is used to calculate the time_stamps
from signal. Default: None.
extr_interval: unpackable time quantities, len == 2
'extr_interval' specifies the time interval around the
time_stamps where the waveform is extracted. The default is an
interval of '6 ms'. Default: (-2 * ms, 4 * ms).
Returns
-------
result_st : neo SpikeTrain object
'result_st' contains the time_stamps of each of the spikes and
the waveforms in result_st.waveforms.
"""
# Get spike time_stamps
if time_stamps is None:
time_stamps = peak_detection(signal, threshold, sign=sign)
elif hasattr(time_stamps, 'times'):
time_stamps = time_stamps.times
elif type(time_stamps) is Quantity:
raise TypeError("time_stamps must be None, a quantity array or" +
" expose the.times interface")
if len(time_stamps) == 0:
return SpikeTrain(time_stamps, units=signal.times.units,
t_start=signal.t_start, t_stop=signal.t_stop,
waveforms=np.array([]),
sampling_rate=signal.sampling_rate)
# Unpack the extraction interval from tuple or array
extr_left, extr_right = extr_interval
if extr_left > extr_right:
raise ValueError("extr_interval[0] must be < extr_interval[1]")
if any(np.diff(time_stamps) < extr_interval[1]):
warnings.warn("Waveforms overlap.", UserWarning)
data_left = ((extr_left * signal.sampling_rate).simplified).magnitude
data_right = ((extr_right * signal.sampling_rate).simplified).magnitude
data_stamps = (((time_stamps - signal.t_start) *
signal.sampling_rate).simplified).magnitude
data_stamps = data_stamps.astype(int)
borders_left = data_stamps + data_left
borders_right = data_stamps + data_right
borders = np.dstack((borders_left, borders_right)).flatten()
waveforms = np.array(
np.split(np.array(signal), borders.astype(int))[1::2]) * signal.units
# len(np.shape(waveforms)) == 1 if waveforms do not have the same width.
# this can occur when extr_interval indexes beyond the signal.
# Workaround: delete spikes shorter than the maximum length with
if len(np.shape(waveforms)) == 1:
max_len = (np.array([len(x) for x in waveforms])).max()
to_delete = np.array([idx for idx, x in enumerate(waveforms)
if len(x) < max_len])
waveforms = np.delete(waveforms, to_delete, axis=0)
waveforms = np.array([x for x in waveforms])
warnings.warn("Waveforms " +
("{:d}, " * len(to_delete)).format(*to_delete) +
"exceeded signal and had to be deleted. " +
"Change extr_interval to keep.")
waveforms = waveforms[:, np.newaxis, :]
return SpikeTrain(time_stamps, units=signal.times.units,
t_start=signal.t_start, t_stop=signal.t_stop,
sampling_rate=signal.sampling_rate, waveforms=waveforms,
left_sweep=extr_left)
def threshold_detection(signal, threshold=0.0 * mV, sign='above'):
"""
Returns the times when the analog signal crosses a threshold.
Usually used for extracting spike times from a membrane potential.
Adapted from version in NeuroTools.
Parameters
----------
signal : neo AnalogSignal object
'signal' is an analog signal.
threshold : A quantity, e.g. in mV
'threshold' contains a value that must be reached
for an event to be detected. Default: 0.0 * mV.
sign : 'above' or 'below'
'sign' determines whether to count thresholding crossings
that cross above or below the threshold.
format : None or 'raw'
Whether to return as SpikeTrain (None)
or as a plain array of times ('raw').
Returns
-------
result_st : neo SpikeTrain object
'result_st' contains the spike times of each of the events (spikes)
extracted from the signal.
"""
assert threshold is not None, "A threshold must be provided"
if sign is 'above':
cutout = np.where(signal > threshold)[0]
elif sign in 'below':
cutout = np.where(signal < threshold)[0]
if len(cutout) <= 0:
events = np.zeros(0)
else:
take = np.where(np.diff(cutout) > 1)[0] + 1
take = np.append(0, take)
time = signal.times
events = time[cutout][take]
events_base = events.base
if events_base is None:
# This occurs in some Python 3 builds due to some
# bug in quantities.
events_base = np.array([event.base for event in events]) # Workaround
result_st = SpikeTrain(events_base, units=signal.times.units,
t_start=signal.t_start, t_stop=signal.t_stop)
return result_st
def peak_detection(signal, threshold=0.0 * mV, sign='above', format=None):
"""
Return the peak times for all events that cross threshold.
Usually used for extracting spike times from a membrane potential.
Similar to spike_train_generation.threshold_detection.
Parameters
----------
signal : neo AnalogSignal object
'signal' is an analog signal.
threshold : A quantity, e.g. in mV
'threshold' contains a value that must be reached
for an event to be detected.
sign : 'above' or 'below'
'sign' determines whether to count thresholding crossings that
cross above or below the threshold. Default: 'above'.
format : None or 'raw'
Whether to return as SpikeTrain (None) or as a plain array
of times ('raw'). Default: None.
Returns
-------
result_st : neo SpikeTrain object
'result_st' contains the spike times of each of the events
(spikes) extracted from the signal.
"""
assert threshold is not None, "A threshold must be provided"
if sign is 'above':
cutout = np.where(signal > threshold)[0]
peak_func = np.argmax
elif sign in 'below':
cutout = np.where(signal < threshold)[0]
peak_func = np.argmin
else:
raise ValueError("sign must be 'above' or 'below'")
if len(cutout) <= 0:
events_base = np.zeros(0)
else:
# Select thr crossings lasting at least 2 dtps, np.diff(cutout) > 2
# This avoids empty slices
border_start = np.where(np.diff(cutout) > 1)[0]
border_end = border_start + 1
borders = np.concatenate((border_start, border_end))
borders = np.append(0, borders)
borders = np.append(borders, len(cutout)-1)
borders = np.sort(borders)
true_borders = cutout[borders]
right_borders = true_borders[1::2] + 1
true_borders = np.sort(np.append(true_borders[0::2], right_borders))
# Workaround for bug that occurs when signal goes below thr for 1 dtp,
# Workaround eliminates empy slices from np. split
backward_mask = np.absolute(np.ediff1d(true_borders, to_begin=1)) > 0
forward_mask = np.absolute(np.ediff1d(true_borders[::-1],
to_begin=1)[::-1]) > 0
true_borders = true_borders[backward_mask * forward_mask]
split_signal = np.split(np.array(signal), true_borders)[1::2]
maxima_idc_split = np.array([peak_func(x) for x in split_signal])
max_idc = maxima_idc_split + true_borders[0::2]
events = signal.times[max_idc]
events_base = events.base
if events_base is None:
# This occurs in some Python 3 builds due to some
# bug in quantities.
events_base = np.array([event.base for event in events]) # Workaround
if format is None:
result_st = SpikeTrain(events_base, units=signal.times.units,
t_start=signal.t_start, t_stop=signal.t_stop)
elif 'raw':
result_st = events_base
else:
raise ValueError("Format argument must be None or 'raw'")
return result_st
def _homogeneous_process(interval_generator, args, mean_rate, t_start, t_stop,
as_array):
"""
Returns a spike train whose spikes are a realization of a random process
generated by the function `interval_generator` with the given rate,
starting at time `t_start` and stopping `time t_stop`.
"""
def rescale(x):
return (x / mean_rate.units).rescale(t_stop.units)
n = int(((t_stop - t_start) * mean_rate).simplified)
number = np.ceil(n + 3 * np.sqrt(n))
if number < 100:
number = min(5 + np.ceil(2 * n), 100)
assert number > 4 # if positive, number cannot be less than 5
isi = rescale(interval_generator(*args, size=int(number)))
spikes = np.cumsum(isi)
spikes += t_start
i = spikes.searchsorted(t_stop)
if i == len(spikes):
# ISI buffer overrun
extra_spikes = []
t_last = spikes[-1] + rescale(interval_generator(*args, size=1))[0]
while t_last < t_stop:
extra_spikes.append(t_last)
t_last = t_last + rescale(interval_generator(*args, size=1))[0]
# np.concatenate does not conserve units
spikes = Quantity(
np.concatenate(
(spikes, extra_spikes)).magnitude, units=spikes.units)
else:
spikes = spikes[:i]
if as_array:
spikes = spikes.magnitude
else:
spikes = SpikeTrain(
spikes, t_start=t_start, t_stop=t_stop, units=spikes.units)
return spikes
def homogeneous_poisson_process(rate, t_start=0.0 * ms, t_stop=1000.0 * ms,
as_array=False):
"""
Returns a spike train whose spikes are a realization of a Poisson process
with the given rate, starting at time `t_start` and stopping time `t_stop`.
All numerical values should be given as Quantities, e.g. 100*Hz.
Parameters
----------
rate : Quantity scalar with dimension 1/time
The rate of the discharge.
t_start : Quantity scalar with dimension time
The beginning of the spike train.
t_stop : Quantity scalar with dimension time
The end of the spike train.
as_array : bool
If True, a NumPy array of sorted spikes is returned,
rather than a SpikeTrain object.
Raises
------
ValueError : If `t_start` and `t_stop` are not of type `pq.Quantity`.
Examples
--------
>>> from quantities import Hz, ms
>>> spikes = homogeneous_poisson_process(50*Hz, 0*ms, 1000*ms)
>>> spikes = homogeneous_poisson_process(
20*Hz, 5000*ms, 10000*ms, as_array=True)
"""
if not isinstance(t_start, Quantity) or not isinstance(t_stop, Quantity):
raise ValueError("t_start and t_stop must be of type pq.Quantity")
rate = rate.rescale((1 / t_start).units)
mean_interval = 1 / rate.magnitude
return _homogeneous_process(
np.random.exponential, (mean_interval,), rate, t_start, t_stop,
as_array)
def inhomogeneous_poisson_process(rate, as_array=False):
"""
Returns a spike train whose spikes are a realization of an inhomogeneous
Poisson process with the given rate profile.
Parameters
----------
rate : neo.AnalogSignal
A `neo.AnalogSignal` representing the rate profile evolving over time.
Its values have all to be `>=0`. The output spiketrain will have
`t_start = rate.t_start` and `t_stop = rate.t_stop`
as_array : bool
If True, a NumPy array of sorted spikes is returned,
rather than a SpikeTrain object.
Raises
------
ValueError : If `rate` contains any negative value.
"""
# Check rate contains only positive values
if any(rate < 0) or not rate.size:
raise ValueError(
'rate must be a positive non empty signal, representing the'
'rate at time t')
else:
#Generate n hidden Poisson SpikeTrains with rate equal to the peak rate
max_rate = max(rate)
homogeneous_poiss = homogeneous_poisson_process(
rate=max_rate, t_stop=rate.t_stop, t_start=rate.t_start)
# Compute the rate profile at each spike time by interpolation
rate_interpolated = _analog_signal_linear_interp(
signal=rate, times=homogeneous_poiss.magnitude *
homogeneous_poiss.units)
# Accept each spike at time t with probability rate(t)/max_rate
u = np.random.uniform(size=len(homogeneous_poiss)) * max_rate
spikes = homogeneous_poiss[u < rate_interpolated.flatten()]
if as_array:
spikes = spikes.magnitude
return spikes
def _analog_signal_linear_interp(signal, times):
'''
Compute the linear interpolation of a signal at desired times.
Given the `signal` (neo.AnalogSignal) taking value `s0` and `s1` at two
consecutive time points `t0` and `t1` `(t0 < t1)`, for every time `t` in
`times`, such that `t0<t<=t1` is returned the value of the linear
interpolation, given by:
`s = ((s1 - s0) / (t1 - t0)) * t + s0`.
Parameters
----------
times : Quantity vector(time)
The time points for which the interpolation is computed
signal : neo.core.AnalogSignal
The analog signal containing the discretization of the function to
interpolate
Returns
------
out: Quantity array representing the values of the interpolated signal at the
times given by times
Notes
-----
If `signal` has sampling period `dt=signal.sampling_period`, its values
are defined at `t=signal.times`, such that `t[i] = signal.t_start + i * dt`
The last of such times is lower than
signal.t_stop`:t[-1] = signal.t_stop - dt`.
For the interpolation at times t such that `t[-1] <= t <= signal.t_stop`,
the value of `signal` at `signal.t_stop` is taken to be that
at time `t[-1]`.
'''
dt = signal.sampling_period
t_start = signal.t_start.rescale(signal.times.units)
t_stop = signal.t_stop.rescale(signal.times.units)
# Extend the signal (as a dimensionless array) copying the last value
# one time, and extend its times to t_stop
signal_extended = np.vstack(
[signal.magnitude, signal[-1].magnitude]).flatten()
times_extended = np.hstack([signal.times, t_stop]) * signal.times.units
time_ids = np.floor(((times - t_start) / dt).rescale(
dimensionless).magnitude).astype('i')
# Compute the slope m of the signal at each time in times
y1 = signal_extended[time_ids]
y2 = signal_extended[time_ids + 1]
m = (y2 - y1) / dt
# Interpolate the signal at each time in times by linear interpolation
out = (y1 + m * (times - times_extended[time_ids])) * signal.units
return out.rescale(signal.units)
def homogeneous_gamma_process(a, b, t_start=0.0 * ms, t_stop=1000.0 * ms,
as_array=False):
"""
Returns a spike train whose spikes are a realization of a gamma process
with the given parameters, starting at time `t_start` and stopping time
`t_stop` (average rate will be b/a).
All numerical values should be given as Quantities, e.g. 100*Hz.
Parameters
----------
a : int or float
The shape parameter of the gamma distribution.
b : Quantity scalar with dimension 1/time
The rate parameter of the gamma distribution.
t_start : Quantity scalar with dimension time
The beginning of the spike train.
t_stop : Quantity scalar with dimension time
The end of the spike train.
as_array : bool
If True, a NumPy array of sorted spikes is returned,
rather than a SpikeTrain object.
Raises
------
ValueError : If `t_start` and `t_stop` are not of type `pq.Quantity`.
Examples
--------
>>> from quantities import Hz, ms
>>> spikes = homogeneous_gamma_process(2.0, 50*Hz, 0*ms, 1000*ms)
>>> spikes = homogeneous_gamma_process(
5.0, 20*Hz, 5000*ms, 10000*ms, as_array=True)
"""
if not isinstance(t_start, Quantity) or not isinstance(t_stop, Quantity):
raise ValueError("t_start and t_stop must be of type pq.Quantity")
b = b.rescale((1 / t_start).units).simplified
rate = b / a
k, theta = a, (1 / b.magnitude)
return _homogeneous_process(np.random.gamma, (k, theta), rate, t_start, t_stop, as_array)
def _n_poisson(rate, t_stop, t_start=0.0 * ms, n=1):
"""
Generates one or more independent Poisson spike trains.
Parameters
----------
rate : Quantity or Quantity array
Expected firing rate (frequency) of each output SpikeTrain.
Can be one of:
* a single Quantity value: expected firing rate of each output
SpikeTrain
* a Quantity array: rate[i] is the expected firing rate of the i-th
output SpikeTrain
t_stop : Quantity
Single common stop time of each output SpikeTrain. Must be > t_start.
t_start : Quantity (optional)
Single common start time of each output SpikeTrain. Must be < t_stop.
Default: 0 s.
n: int (optional)
If rate is a single Quantity value, n specifies the number of
SpikeTrains to be generated. If rate is an array, n is ignored and the
number of SpikeTrains is equal to len(rate).
Default: 1
Returns
-------
list of neo.SpikeTrain
Each SpikeTrain contains one of the independent Poisson spike trains,
either n SpikeTrains of the same rate, or len(rate) SpikeTrains with
varying rates according to the rate parameter. The time unit of the
SpikeTrains is given by t_stop.
"""
# Check that the provided input is Hertz of return error
try:
for r in rate.reshape(-1, 1):
r.rescale('Hz')
except AttributeError:
raise ValueError('rate argument must have rate unit (1/time)')
# Check t_start < t_stop and create their strip dimensions
if not t_start < t_stop:
raise ValueError(
't_start (=%s) must be < t_stop (=%s)' % (t_start, t_stop))
# Set number n of output spike trains (specified or set to len(rate))
if not (type(n) == int and n > 0):
raise ValueError('n (=%s) must be a positive integer' % str(n))
rate_dl = rate.simplified.magnitude.flatten()
# Check rate input parameter
if len(rate_dl) == 1:
if rate_dl < 0:
raise ValueError('rate (=%s) must be non-negative.' % rate)
rates = np.array([rate_dl] * n)
else:
rates = rate_dl.flatten()
if any(rates < 0):
raise ValueError('rate must have non-negative elements.')
sts = []
for r in rates:
sts.append(homogeneous_poisson_process(r * Hz, t_start, t_stop))
return sts
def single_interaction_process(
rate, rate_c, t_stop, n=2, jitter=0 * ms, coincidences='deterministic',
t_start=0 * ms, min_delay=0 * ms, return_coinc=False):
"""
Generates a multidimensional Poisson SIP (single interaction process)
plus independent Poisson processes
A Poisson SIP consists of Poisson time series which are independent
except for simultaneous events in all of them. This routine generates
a SIP plus additional parallel independent Poisson processes.
See [1].
Parameters
-----------
t_stop: quantities.Quantity
Total time of the simulated processes. The events are drawn between
0 and `t_stop`.
rate: quantities.Quantity
Overall mean rate of the time series to be generated (coincidence
rate `rate_c` is subtracted to determine the background rate). Can be:
* a float, representing the overall mean rate of each process. If
so, it must be higher than `rate_c`.
* an iterable of floats (one float per process), each float
representing the overall mean rate of a process. If so, all the
entries must be larger than `rate_c`.
rate_c: quantities.Quantity
Coincidence rate (rate of coincidences for the n-dimensional SIP).
The SIP spike trains will have coincident events with rate `rate_c`
plus independent 'background' events with rate `rate-rate_c`.
n: int, optional
If `rate` is a single Quantity value, `n` specifies the number of
SpikeTrains to be generated. If rate is an array, `n` is ignored and
the number of SpikeTrains is equal to `len(rate)`.
Default: 1
jitter: quantities.Quantity, optional
Jitter for the coincident events. If `jitter == 0`, the events of all
n correlated processes are exactly coincident. Otherwise, they are
jittered around a common time randomly, up to +/- `jitter`.
coincidences: string, optional
Whether the total number of injected coincidences must be determin-
istic (i.e. rate_c is the actual rate with which coincidences are
generated) or stochastic (i.e. rate_c is the mean rate of coincid-
ences):
* 'deterministic': deterministic rate
* 'stochastic': stochastic rate
Default: 'deterministic'
t_start: quantities.Quantity, optional
Starting time of the series. If specified, it must be lower than
t_stop
Default: 0 * ms
min_delay: quantities.Quantity, optional
Minimum delay between consecutive coincidence times.
Default: 0 * ms
return_coinc: bool, optional
Whether to return the coincidence times for the SIP process
Default: False
Returns
--------
output: list
Realization of a SIP consisting of n Poisson processes characterized
by synchronous events (with the given jitter)
If `return_coinc` is `True`, the coincidence times are returned as a
second output argument. They also have an associated time unit (same
as `t_stop`).
References
----------
[1] Kuhn, Aertsen, Rotter (2003) Neural Comput 15(1):67-101
EXAMPLE:
>>> import quantities as qt
>>> import jelephant.core.stocmod as sm
>>> sip, coinc = sm.sip_poisson(n=10, n=0, t_stop=1*qt.sec, \
rate=20*qt.Hz, rate_c=4, return_coinc = True)
*************************************************************************
"""
# Check if n is a positive integer
if not (isinstance(n, int) and n > 0):
raise ValueError('n (=%s) must be a positive integer' % str(n))
# Assign time unit to jitter, or check that its existing unit is a time
# unit
jitter = abs(jitter)
# Define the array of rates from input argument rate. Check that its length
# matches with n
if rate.ndim == 0:
if rate < 0 * Hz:
raise ValueError(
'rate (=%s) must be non-negative.' % str(rate))
rates_b = np.array(
[rate.magnitude for _ in range(n)]) * rate.units
else:
rates_b = np.array(rate).flatten() * rate.units
if not all(rates_b >= 0. * Hz):
raise ValueError('*rate* must have non-negative elements')
# Check: rate>=rate_c
if np.any(rates_b < rate_c):
raise ValueError('all elements of *rate* must be >= *rate_c*')
# Check min_delay < 1./rate_c
if not (rate_c == 0 * Hz or min_delay < 1. / rate_c):
raise ValueError(
"'*min_delay* (%s) must be lower than 1/*rate_c* (%s)." %
(str(min_delay), str((1. / rate_c).rescale(min_delay.units))))
# Generate the n Poisson processes there are the basis for the SIP
# (coincidences still lacking)
embedded_poisson_trains = _n_poisson(
rate=rates_b - rate_c, t_stop=t_stop, t_start=t_start)
# Convert the trains from neo SpikeTrain objects to simpler Quantity
# objects
embedded_poisson_trains = [
emb.view(Quantity) for emb in embedded_poisson_trains]
# Generate the array of times for coincident events in SIP, not closer than
# min_delay. The array is generated as a quantity from the Quantity class
# in the quantities module
if coincidences == 'deterministic':
Nr_coinc = int(((t_stop - t_start) * rate_c).rescale(dimensionless))
while True:
coinc_times = t_start + \
np.sort(np.random.random(Nr_coinc)) * (t_stop - t_start)
if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay:
break
elif coincidences == 'stochastic':
while True:
coinc_times = homogeneous_poisson_process(
rate=rate_c, t_stop=t_stop, t_start=t_start)
if len(coinc_times) < 2 or min(np.diff(coinc_times)) >= min_delay:
break
# Convert coinc_times from a neo SpikeTrain object to a Quantity object
# pq.Quantity(coinc_times.base)*coinc_times.units
coinc_times = coinc_times.view(Quantity)
# Set the coincidence times to T-jitter if larger. This ensures that
# the last jittered spike time is <T
for i in range(len(coinc_times)):
if coinc_times[i] > t_stop - jitter:
coinc_times[i] = t_stop - jitter
# Replicate coinc_times n times, and jitter each event in each array by
# +/- jitter (within (t_start, t_stop))
embedded_coinc = coinc_times + \
np.random.random(
(len(rates_b), len(coinc_times))) * 2 * jitter - jitter
embedded_coinc = embedded_coinc + \
(t_start - embedded_coinc) * (embedded_coinc < t_start) - \
(t_stop - embedded_coinc) * (embedded_coinc > t_stop)
# Inject coincident events into the n SIP processes generated above, and
# merge with the n independent processes
sip_process = [
np.sort(np.concatenate((
embedded_poisson_trains[m].rescale(t_stop.units),
embedded_coinc[m].rescale(t_stop.units))) * t_stop.units)
for m in range(len(rates_b))]
# Convert back sip_process and coinc_times from Quantity objects to
# neo.SpikeTrain objects
sip_process = [
SpikeTrain(t, t_start=t_start, t_stop=t_stop).rescale(t_stop.units)
for t in sip_process]
coinc_times = [
SpikeTrain(t, t_start=t_start, t_stop=t_stop).rescale(t_stop.units)
for t in embedded_coinc]
# Return the processes in the specified output_format
if not return_coinc:
output = sip_process
else:
output = sip_process, coinc_times
return output
def _pool_two_spiketrains(a, b, extremes='inner'):
"""
Pool the spikes of two spike trains a and b into a unique spike train.
Parameters
----------
a, b : neo.SpikeTrains
Spike trains to be pooled
extremes: str, optional
Only spikes of a and b in the specified extremes are considered.
* 'inner': pool all spikes from max(a.tstart_ b.t_start) to
min(a.t_stop, b.t_stop)
* 'outer': pool all spikes from min(a.tstart_ b.t_start) to
max(a.t_stop, b.t_stop)
Default: 'inner'
Output
------
neo.SpikeTrain containing all spikes in a and b falling in the
specified extremes
"""
unit = a.units
times_a_dimless = list(a.view(Quantity).magnitude)
times_b_dimless = list(b.rescale(unit).view(Quantity).magnitude)
times = (times_a_dimless + times_b_dimless) * unit
if extremes == 'outer':
t_start = min(a.t_start, b.t_start)
t_stop = max(a.t_stop, b.t_stop)
elif extremes == 'inner':
t_start = max(a.t_start, b.t_start)
t_stop = min(a.t_stop, b.t_stop)
times = times[times > t_start]
times = times[times < t_stop]
else:
raise ValueError(
'extremes (%s) can only be "inner" or "outer"' % extremes)
pooled_train = SpikeTrain(
times=sorted(times.magnitude), units=unit, t_start=t_start,
t_stop=t_stop)
return pooled_train
def _pool_spiketrains(trains, extremes='inner'):
"""
Pool spikes from any number of spike trains into a unique spike train.
Parameters
----------
trains: list
list of spike trains to merge
extremes: str, optional
Only spikes of a and b in the specified extremes are considered.
* 'inner': pool all spikes from min(a.t_start b.t_start) to
max(a.t_stop, b.t_stop)
* 'outer': pool all spikes from max(a.tstart_ b.t_start) to
min(a.t_stop, b.t_stop)
Default: 'inner'
Output
------
neo.SpikeTrain containing all spikes in trains falling in the
specified extremes
"""
merge_trains = trains[0]
for t in trains[1:]:
merge_trains = _pool_two_spiketrains(
merge_trains, t, extremes=extremes)
t_start, t_stop = merge_trains.t_start, merge_trains.t_stop
merge_trains = sorted(merge_trains)
merge_trains = np.squeeze(merge_trains)
merge_trains = SpikeTrain(
merge_trains, t_stop=t_stop, t_start=t_start, units=trains[0].units)
return merge_trains
def _sample_int_from_pdf(a, n):
"""
Draw n independent samples from the set {0,1,...,L}, where L=len(a)-1,
according to the probability distribution a.
a[j] is the probability to sample j, for each j from 0 to L.
Parameters
-----
a: numpy.array
Probability vector (i..e array of sum 1) that at each entry j carries
the probability to sample j (j=0,1,...,len(a)-1).
n: int
Number of samples generated with the function
Output
-------
array of n samples taking values between 0 and n=len(a)-1.
"""
A = np.cumsum(a) # cumulative distribution of a
u = np.random.uniform(0, 1, size=n)
U = np.array([u for i in a]).T # copy u (as column vector) len(a) times
return (A < U).sum(axis=1)
def _mother_proc_cpp_stat(A, t_stop, rate, t_start=0 * ms):
"""
Generate the hidden ("mother") Poisson process for a Compound Poisson
Process (CPP).
Parameters
----------
A : numpy.array
Amplitude distribution. A[j] represents the probability of a
synchronous event of size j.
The sum over all entries of a must be equal to one.
t_stop : quantities.Quantity
The stopping time of the mother process
rate : quantities.Quantity
Homogeneous rate of the n spike trains that will be genereted by the
CPP function
t_start : quantities.Quantity, optional
The starting time of the mother process
Default: 0 ms
Output
------
Poisson spike train representing the mother process generating the CPP
"""
N = len(A) - 1
exp_A = np.dot(A, range(N + 1)) # expected value of a
exp_mother = (N * rate) / float(exp_A) # rate of the mother process
return homogeneous_poisson_process(
rate=exp_mother, t_stop=t_stop, t_start=t_start)
def _cpp_hom_stat(A, t_stop, rate, t_start=0 * ms):
"""
Generate a Compound Poisson Process (CPP) with amplitude distribution
A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
Parameters
----------
A : numpy.ndarray
Amplitude distribution. A[j] represents the probability of a
synchronous event of size j.
The sum over all entries of A must be equal to one.
t_stop : quantities.Quantity
The end time of the output spike trains
rate : quantities.Quantity
Average rate of each spike train generated
t_start : quantities.Quantity, optional
The start time of the output spike trains
Default: 0 ms
Output
------
List of n neo.SpikeTrains, having average firing rate r and correlated
such to form a CPP with amplitude distribution a
"""
# Generate mother process and associated spike labels
mother = _mother_proc_cpp_stat(
A=A, t_stop=t_stop, rate=rate, t_start=t_start)
labels = _sample_int_from_pdf(A, len(mother))
N = len(A) - 1 # Number of trains in output
try: # Faster but more memory-consuming approach
M = len(mother) # number of spikes in the mother process
spike_matrix = np.zeros((N, M), dtype=bool)
# for each spike, take its label l
for spike_id, l in enumerate(labels):
# choose l random trains
train_ids = random.sample(range(N), l)
# and set the spike matrix for that train
for train_id in train_ids:
spike_matrix[train_id, spike_id] = True # and spike to True
times = [[] for i in range(N)]
for train_id, row in enumerate(spike_matrix):
times[train_id] = mother[row].view(Quantity)
except MemoryError: # Slower (~2x) but less memory-consuming approach
print('memory case')
times = [[] for i in range(N)]
for t, l in zip(mother, labels):
train_ids = random.sample(range(N), l)
for train_id in train_ids:
times[train_id].append(t)
trains = [SpikeTrain(
times=t, t_start=t_start, t_stop=t_stop) for t in times]
return trains
def _cpp_het_stat(A, t_stop, rate, t_start=0. * ms):
"""
Generate a Compound Poisson Process (CPP) with amplitude distribution
A and heterogeneous firing rates r=r[0], r[1], ..., r[-1].
Parameters
----------
A : array
CPP's amplitude distribution. A[j] represents the probability of
a synchronous event of size j among the generated spike trains.
The sum over all entries of A must be equal to one.
t_stop : Quantity (time)
The end time of the output spike trains
rate : Quantity (1/time)
Average rate of each spike train generated
t_start : quantities.Quantity, optional
The start time of the output spike trains
Default: 0 ms
Output
------
List of neo.SpikeTrains with different firing rates, forming
a CPP with amplitude distribution A
"""
# Computation of Parameters of the two CPPs that will be merged
# (uncorrelated with heterog. rates + correlated with homog. rates)
N = len(rate) # number of output spike trains
A_exp = np.dot(A, range(N + 1)) # expectation of A
r_sum = np.sum(rate) # sum of all output firing rates
r_min = np.min(rate) # minimum of the firing rates
r1 = r_sum - N * r_min # rate of the uncorrelated CPP
r2 = r_sum / float(A_exp) - r1 # rate of the correlated CPP
r_mother = r1 + r2 # rate of the hidden mother process
# Check the analytical constraint for the amplitude distribution
if A[1] < (r1 / r_mother).rescale(dimensionless).magnitude:
raise ValueError('A[1] too small / A[i], i>1 too high')
# Compute the amplitude distrib of the correlated CPP, and generate it
a = [(r_mother * i) / float(r2) for i in A]
a[1] = a[1] - r1 / float(r2)
CPP = _cpp_hom_stat(a, t_stop, r_min, t_start)
# Generate the independent heterogeneous Poisson processes
POISS = [
homogeneous_poisson_process(i - r_min, t_start, t_stop) for i in rate]
# Pool the correlated CPP and the corresponding Poisson processes
out = [_pool_two_spiketrains(CPP[i], POISS[i]) for i in range(N)]
return out
def compound_poisson_process(rate, A, t_stop, shift=None, t_start=0 * ms):
"""
Generate a Compound Poisson Process (CPP; see [1]) with a given amplitude
distribution A and stationary marginal rates r.
The CPP process is a model for parallel, correlated processes with Poisson
spiking statistics at pre-defined firing rates. It is composed of len(A)-1
spike trains with a correlation structure determined by the amplitude
distribution A: A[j] is the probability that a spike occurs synchronously
in any j spike trains.
The CPP is generated by creating a hidden mother Poisson process, and then
copying spikes of the mother process to j of the output spike trains with
probability A[j].
Note that this function decorrelates the firing rate of each SpikeTrain
from the probability for that SpikeTrain to participate in a synchronous
event (which is uniform across SpikeTrains).
Parameters
----------
rate : quantities.Quantity
Average rate of each spike train generated. Can be:
- a single value, all spike trains will have same rate rate
- an array of values (of length len(A)-1), each indicating the
firing rate of one process in output
A : array
CPP's amplitude distribution. A[j] represents the probability of
a synchronous event of size j among the generated spike trains.
The sum over all entries of A must be equal to one.
t_stop : quantities.Quantity
The end time of the output spike trains.
shift : None or quantities.Quantity, optional
If None, the injected synchrony is exact. If shift is a Quantity, all
the spike trains are shifted independently by a random amount in
the interval [-shift, +shift].
Default: None
t_start : quantities.Quantity, optional
The t_start time of the output spike trains.
Default: 0 s
Returns
-------
List of neo.SpikeTrains
SpikeTrains with specified firing rates forming the CPP with amplitude
distribution A.
References
----------
[1] Staude, Rotter, Gruen (2010) J Comput Neurosci 29:327-350.
"""
# Check A is a probability distribution (it sums to 1 and is positive)
if abs(sum(A) - 1) > np.finfo('float').eps:
raise ValueError(
'A must be a probability vector, sum(A)= %f !=1' % (sum(A)))
if any([a < 0 for a in A]):
raise ValueError(
'A must be a probability vector, all the elements of must be >0')
# Check that the rate is not an empty Quantity
if rate.ndim == 1 and len(rate.magnitude) == 0:
raise ValueError('Rate is an empty Quantity array')
# Return empty spike trains for specific parameters
elif A[0] == 1 or np.sum(np.abs(rate.magnitude)) == 0:
return [
SpikeTrain([] * t_stop.units, t_stop=t_stop,
t_start=t_start) for i in range(len(A) - 1)]
else:
# Homogeneous rates
if rate.ndim == 0:
cpp = _cpp_hom_stat(A=A, t_stop=t_stop, rate=rate, t_start=t_start)
# Heterogeneous rates
else:
cpp = _cpp_het_stat(A=A, t_stop=t_stop, rate=rate, t_start=t_start)
if shift is None:
return cpp
# Dither the output spiketrains
else:
cpp = [
dither_spike_train(cp, shift=shift, edges=True)[0]
for cp in cpp]
return cpp
# Alias for the compound poisson process
cpp = compound_poisson_process
| 38.172932
| 93
| 0.641274
|
7952505591894de61ba5269d2ed071b733a940c4
| 5,371
|
py
|
Python
|
mypy_django_plugin/main.py
|
Naddiseo/django-stubs
|
cff5ab463c911283a9c43a26a38cb7bd4deebbd5
|
[
"BSD-3-Clause"
] | null | null | null |
mypy_django_plugin/main.py
|
Naddiseo/django-stubs
|
cff5ab463c911283a9c43a26a38cb7bd4deebbd5
|
[
"BSD-3-Clause"
] | null | null | null |
mypy_django_plugin/main.py
|
Naddiseo/django-stubs
|
cff5ab463c911283a9c43a26a38cb7bd4deebbd5
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from typing import Callable, Optional, cast, Dict
from mypy.checker import TypeChecker
from mypy.nodes import TypeInfo
from mypy.options import Options
from mypy.plugin import Plugin, FunctionContext, ClassDefContext, MethodContext
from mypy.types import Type, Instance
from mypy_django_plugin import helpers, monkeypatch
from mypy_django_plugin.plugins.fields import determine_type_of_array_field
from mypy_django_plugin.plugins.migrations import determine_model_cls_from_string_for_migrations
from mypy_django_plugin.plugins.models import process_model_class
from mypy_django_plugin.plugins.related_fields import extract_to_parameter_as_get_ret_type_for_related_field, reparametrize_with
from mypy_django_plugin.plugins.settings import DjangoConfSettingsInitializerHook
def transform_model_class(ctx: ClassDefContext) -> None:
try:
sym = ctx.api.lookup_fully_qualified(helpers.MODEL_CLASS_FULLNAME)
except KeyError:
# models.Model is not loaded, skip metadata model write
pass
else:
if sym is not None and isinstance(sym.node, TypeInfo):
sym.node.metadata['django']['model_bases'][ctx.cls.fullname] = 1
process_model_class(ctx)
def transform_manager_class(ctx: ClassDefContext) -> None:
sym = ctx.api.lookup_fully_qualified_or_none(helpers.MANAGER_CLASS_FULLNAME)
if sym is not None and isinstance(sym.node, TypeInfo):
sym.node.metadata['django']['manager_bases'][ctx.cls.fullname] = 1
def determine_proper_manager_type(ctx: FunctionContext) -> Type:
api = cast(TypeChecker, ctx.api)
ret = ctx.default_return_type
if not api.tscope.classes:
# not in class
return ret
outer_model_info = api.tscope.classes[0]
if not outer_model_info.has_base(helpers.MODEL_CLASS_FULLNAME):
return ret
if not isinstance(ret, Instance):
return ret
for i, base in enumerate(ret.type.bases):
if base.type.fullname() in {helpers.MANAGER_CLASS_FULLNAME,
helpers.RELATED_MANAGER_CLASS_FULLNAME,
helpers.BASE_MANAGER_CLASS_FULLNAME}:
ret.type.bases[i] = reparametrize_with(base, [Instance(outer_model_info, [])])
return ret
return ret
class DjangoPlugin(Plugin):
def __init__(self,
options: Options) -> None:
super().__init__(options)
self.django_settings = os.environ.get('DJANGO_SETTINGS_MODULE')
if self.django_settings:
monkeypatch.load_graph_to_add_settings_file_as_a_source_seed(self.django_settings)
monkeypatch.inject_dependencies(self.django_settings)
else:
monkeypatch.restore_original_load_graph()
monkeypatch.restore_original_dependencies_handling()
def get_current_model_bases(self) -> Dict[str, int]:
model_sym = self.lookup_fully_qualified(helpers.MODEL_CLASS_FULLNAME)
if model_sym is not None and isinstance(model_sym.node, TypeInfo):
if 'django' not in model_sym.node.metadata:
model_sym.node.metadata['django'] = {
'model_bases': {helpers.MODEL_CLASS_FULLNAME: 1}
}
return model_sym.node.metadata['django']['model_bases']
else:
return {}
def get_current_manager_bases(self) -> Dict[str, int]:
manager_sym = self.lookup_fully_qualified(helpers.MANAGER_CLASS_FULLNAME)
if manager_sym is not None and isinstance(manager_sym.node, TypeInfo):
if 'django' not in manager_sym.node.metadata:
manager_sym.node.metadata['django'] = {
'manager_bases': {helpers.MANAGER_CLASS_FULLNAME: 1}
}
return manager_sym.node.metadata['django']['manager_bases']
else:
return {}
def get_function_hook(self, fullname: str
) -> Optional[Callable[[FunctionContext], Type]]:
if fullname in {helpers.FOREIGN_KEY_FULLNAME,
helpers.ONETOONE_FIELD_FULLNAME,
helpers.MANYTOMANY_FIELD_FULLNAME}:
return extract_to_parameter_as_get_ret_type_for_related_field
if fullname == 'django.contrib.postgres.fields.array.ArrayField':
return determine_type_of_array_field
manager_bases = self.get_current_manager_bases()
if fullname in manager_bases:
return determine_proper_manager_type
def get_method_hook(self, fullname: str
) -> Optional[Callable[[MethodContext], Type]]:
if fullname in {'django.apps.registry.Apps.get_model',
'django.db.migrations.state.StateApps.get_model'}:
return determine_model_cls_from_string_for_migrations
return None
def get_base_class_hook(self, fullname: str
) -> Optional[Callable[[ClassDefContext], None]]:
if fullname in self.get_current_model_bases():
return transform_model_class
if fullname == helpers.DUMMY_SETTINGS_BASE_CLASS:
return DjangoConfSettingsInitializerHook(settings_module=self.django_settings)
if fullname in self.get_current_manager_bases():
return transform_manager_class
return None
def plugin(version):
return DjangoPlugin
| 41.635659
| 128
| 0.689443
|
7952508f8557f8db3bef292a9c99cc827c410152
| 3,365
|
py
|
Python
|
AutenticacioJuego.py
|
Kelly901/Proyecto2
|
198c04c3324253e0ec7ea74d3edaefbd182e026f
|
[
"Apache-2.0"
] | null | null | null |
AutenticacioJuego.py
|
Kelly901/Proyecto2
|
198c04c3324253e0ec7ea74d3edaefbd182e026f
|
[
"Apache-2.0"
] | null | null | null |
AutenticacioJuego.py
|
Kelly901/Proyecto2
|
198c04c3324253e0ec7ea74d3edaefbd182e026f
|
[
"Apache-2.0"
] | null | null | null |
from Juego import Juego
import json
class AutenticacionJuego:
juego=[]
juego2=[]
def __init__(self):
self.contador=0
def crearJuego(self,nombre,anio,precio,categoria1,categoria2,categoria3,foto,banner,descripcion):
self.juego.append(Juego(self.contador,nombre,anio,precio,categoria1,categoria2,categoria3,foto,banner,descripcion))
self.contador+=1
return True
#Crear juego 2 para validar que el nombre no coincida
def crearJuego2(self,nombre,anio,precio,categoria1,categoria2,categoria3,foto,banner,descripcion):
for ju in self.juego:
if ju.nombre==nombre:
print("El nombre esta repetido")
return False
self.juego.append(Juego(self.contador,nombre,anio,precio,categoria1,categoria2,categoria3,foto,banner,descripcion))
self.contador+=1
return True
def mostrar(self):
print('id: \t nombre \tcategoria1 \t categoria2 \t categoria3 \t descripcion')
for ju in self.juego:
print(str(ju.id )+'\t'+ju.nombre+'\t\t\t\t'+ju.precio+'\t\t'+ju.categoria1+'\t\t\t'+ju.categoria2+'\t\t\t\t'+ju.categoria3+'\t\t\t\t'+ju.descripcion)
def descomponer(self,texto):
texto2=texto.split("\n")
for r in texto2:
texto3=r.split(",")
print(r.split(","))
if texto[0]=="":
return "no se puede"
self.crearJuego(texto3[0],texto3[1],texto3[2],texto3[3],texto3[4],texto3[5],texto3[6],texto3[7],texto3[8])
return texto
def dump(self,id):
for ju in self.juego:
if str(ju.id)==id:
return {
'id':ju.id,
'nombre':ju.nombre,
'anio':ju.anio,
'precio': ju.precio,
'categoria1':ju.categoria1,
'categoria2':ju.categoria2,
'categoria3':ju.categoria3,
'foto' :ju.foto,
'banner' :ju.banner,
'descripcion' :ju.descripcion
}
return False
def dump2(self,nombre):
for ju in self.juego:
if ju.nombre==nombre:
return {
'id':ju.id,
'nombre':ju.nombre,
'anio':ju.anio,
'precio': ju.precio,
'categoria1':ju.categoria1,
'categoria2':ju.categoria2,
'categoria3':ju.categoria3,
'foto' :ju.foto,
'banner' :ju.banner,
'descripcion' :ju.descripcion,
'estado':'1'
}
return False
#Retornar imagen
def nombreJuego(self,id):
for ju in self.juego:
if str(ju.id)==id:
return ju.nombre
return False
#Retornar foto
def fotoJuego(self,id):
for ju in self.juego:
if str(ju.id)==id:
return ju.foto
return False
def bucarJuego(self,nombre):
for j in self.juego:
if j.nombre==nombre:
print("El juego si existe")
return True
print("El juego no existe")
return False
def modificarJuego(self,id,nombre,anio,precio,categoria1,categoria2,categoria3,foto,banner,descripcion):
for us in self.juego:
if self.comparar_nombre(nombre)=="si":
if str(us.id) ==id:
print(1)
us.nombre=nombre
us.anio=anio
us.precio=precio
us.categoria1=categoria1
us.categoria2=categoria2
us.categoria3=categoria3
us.foto=foto
us.banner=banner
us.descripcion=descripcion
print("si se pudo")
return True
return False
def comparar_nombre(self,nombre):
for us in self.juego:
if us.nombre==nombre:
print("no se pudo por el nombre de usuario")
return "no"
print("si se puede")
return "si"
def eliminar(self,id):
self.juego.pop(id)
return True
| 23.865248
| 152
| 0.658247
|
795250dabe27df678f8cdb20201f3cb38ab3fef4
| 5,003
|
py
|
Python
|
tests/modules/file_list_handling/test_remote_file_list_creation.py
|
mhaberler/gribmagic
|
f7a8c3f0653e421ca57f4200ec2e0ba0ce4064e0
|
[
"MIT"
] | 1
|
2021-11-14T06:25:44.000Z
|
2021-11-14T06:25:44.000Z
|
tests/modules/file_list_handling/test_remote_file_list_creation.py
|
mhaberler/gribmagic
|
f7a8c3f0653e421ca57f4200ec2e0ba0ce4064e0
|
[
"MIT"
] | null | null | null |
tests/modules/file_list_handling/test_remote_file_list_creation.py
|
mhaberler/gribmagic
|
f7a8c3f0653e421ca57f4200ec2e0ba0ce4064e0
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from pathlib import Path
from unittest.mock import patch
import pytest
from src.enumerations.weather_models import WeatherModels
from src.exceptions.wrong_weather_model_exception import \
WrongWeatherModelException
from src.modules.config.constants import KEY_VARIABLES, KEY_GRIB_PACKAGE_TYPES, \
KEY_FORECAST_STEPS, \
KEY_DIRECTORY_TEMPLATE, KEY_REMOTE_SERVER, KEY_FILE_TEMPLATE, \
KEY_INITIALIZATION_DATE_FORMAT, KEY_FORECAST_STEPS_STR_LEN
from src.modules.file_list_handling.remote_file_list_creation import \
build_remote_file_lists_for_variable_files, \
build_remote_file_lists_for_package_files, \
build_remote_file_list
@patch(
'src.modules.file_list_handling.remote_file_list_creation.MODEL_CONFIG',
{
WeatherModels.ICON_EU.value:
{
KEY_VARIABLES: ['air_temperature_2m'],
KEY_FORECAST_STEPS: {0: [0, 1]},
KEY_DIRECTORY_TEMPLATE: 'test_remote_dir/{initialization_time}/{variable_name_lower}',
KEY_FILE_TEMPLATE: 'test_remote_file_{level_type}_{initialization_date}{initialization_time}_'
'{forecast_step}_{variable_name_upper}.grib2.bz2',
KEY_REMOTE_SERVER: 'test1',
KEY_INITIALIZATION_DATE_FORMAT: '%Y%m%d',
}}
)
def test_build_remote_model_file_lists():
to_test = build_remote_file_lists_for_variable_files(WeatherModels.ICON_EU,
0,
datetime(2020, 6,
10).date())
assert to_test == [Path(
'test1/test_remote_dir/00/t_2m/test_remote_file_single-level_2020061000_000_T_2M.grib2.bz2'),
Path(
'test1/test_remote_dir/00/t_2m/test_remote_file_single-level_2020061000_001_T_2M.grib2.bz2')]
def test_build_remote_model_file_lists_wrong_weather_model():
with pytest.raises(WrongWeatherModelException) as exc:
_ = build_remote_file_lists_for_variable_files(
WeatherModels.AROME_METEO_FRANCE,
0,
datetime(2020, 6, 10).date())
assert str(
exc.value) == 'Please choose one of [icon_global, icon_eu, cosmo_d2, cosmo_d2_eps, icon_eu_eps]'
@patch(
'src.modules.file_list_handling.remote_file_list_creation.MODEL_CONFIG',
{
WeatherModels.AROME_METEO_FRANCE.value:
{
KEY_VARIABLES: ['air_temperature_2m'],
KEY_FORECAST_STEPS: {0: [0, 1]},
KEY_FILE_TEMPLATE: 'test_remote_file_{initialization_date}{initialization_time}_'
'{forecast_step}_{grib_package_type}.grib2.bz2',
KEY_REMOTE_SERVER: 'test1',
KEY_INITIALIZATION_DATE_FORMAT: '%Y%m%d',
KEY_GRIB_PACKAGE_TYPES: ['Package1'],
KEY_FORECAST_STEPS_STR_LEN: 2,
KEY_DIRECTORY_TEMPLATE: ''
}}
)
def test_build_remote_model_file_lists_for_package():
to_test = build_remote_file_lists_for_package_files(
WeatherModels.AROME_METEO_FRANCE,
0,
datetime(2020, 6, 10).date())
assert to_test == [
Path('test1/test_remote_file_2020061000_00_Package1.grib2.bz2'),
Path('test1/test_remote_file_2020061000_01_Package1.grib2.bz2')]
@patch(
'src.modules.file_list_handling.remote_file_list_creation.MODEL_CONFIG',
{
WeatherModels.AROME_METEO_FRANCE.value:
{
KEY_VARIABLES: ['air_temperature_2m'],
KEY_FORECAST_STEPS: {0: [0, 1]},
KEY_FILE_TEMPLATE: 'test_remote_file_{initialization_date}{initialization_time}_'
'{forecast_step}_{grib_package_type}.grib2.bz2',
KEY_REMOTE_SERVER: 'test1',
KEY_INITIALIZATION_DATE_FORMAT: '%Y%m%d',
KEY_GRIB_PACKAGE_TYPES: ['Package1'],
KEY_FORECAST_STEPS_STR_LEN: 2,
KEY_DIRECTORY_TEMPLATE: ''
}}
)
def test_build_remote_file_list():
to_test = build_remote_file_list(
WeatherModels.AROME_METEO_FRANCE,
0,
datetime(2020, 6, 10).date())
assert to_test == [
Path('test1/test_remote_file_2020061000_00_Package1.grib2.bz2'),
Path('test1/test_remote_file_2020061000_01_Package1.grib2.bz2')]
def test_build_remote_model_file_lists_for_package_wrong_model():
with pytest.raises(WrongWeatherModelException) as excinfo:
_ = build_remote_file_lists_for_package_files(WeatherModels.ICON_EU,
0,
datetime(2020, 6,
10).date())
assert str(excinfo.value) == 'Please choose one of [arome_meteo_france, geos5, gfs, harmonie_knmi]'
| 43.885965
| 120
| 0.63262
|
7952513fcaecee979d79e7eb79fe616b5c6524a9
| 4,387
|
py
|
Python
|
ktapp/helpers/search.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | 5
|
2015-04-13T09:44:31.000Z
|
2017-10-19T01:07:58.000Z
|
ktapp/helpers/search.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | 49
|
2015-02-15T07:12:05.000Z
|
2022-03-11T23:11:43.000Z
|
ktapp/helpers/search.py
|
cu2/KT
|
8a0964b77dce150358637faa679d969a07e42f07
|
[
"CC-BY-3.0"
] | null | null | null |
from django.conf import settings
from django.db.models import Q
from django.template.defaultfilters import slugify
from ktapp import models
from ktapp import utils as kt_utils
def get_q_pieces(q): # limit length of query string and number of query pieces
return q[:200].split()[:20]
def find_film_by_link(q):
# search by IMDB link:
if 'imdb.com/title' in q:
try:
imdb_link = q[q.index('imdb.com/title')+15:].split('/')[0]
except Exception:
imdb_link = None
if imdb_link:
try:
film = models.Film.objects.get(imdb_link=imdb_link)
except models.Film.DoesNotExist:
film = None
if film:
return film
# search by port.hu link:
if 'port.hu' in q:
porthu_link = kt_utils.parse_porthu_link(q)
if porthu_link:
try:
film = models.Film.objects.get(porthu_link=porthu_link)
except models.Film.DoesNotExist:
film = None
if film:
return film
# search by wikipedia link:
if 'wikipedia.org' in q:
try:
wikipedia_link = q[q.index('://')+3:]
except Exception:
wikipedia_link = None
if wikipedia_link:
try:
film = models.Film.objects.get(wikipedia_link_en__contains=wikipedia_link)
except (models.Film.DoesNotExist, models.Film.MultipleObjectsReturned):
try:
film = models.Film.objects.get(wikipedia_link_hu__contains=wikipedia_link)
except (models.Film.DoesNotExist, models.Film.MultipleObjectsReturned):
film = None
if film:
return film
return None
def search_safe_slugify(value):
# remove dashes from the beginning
# because __search='+-anything' breaks with ProgrammingError: (1064, "syntax error, unexpected '-'")
return slugify(value).lstrip('-')
def find_artists(q_pieces, limit):
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
return models.Artist.objects.filter(
slug_cache__icontains=' '.join([q_piece for q_piece in q_pieces if q_piece])
).order_by('-number_of_ratings')[:limit]
return models.Artist.objects.filter(
slug_cache__search=' '.join(['+%s*' % search_safe_slugify(q_piece) for q_piece in q_pieces if search_safe_slugify(q_piece)])
).order_by('-number_of_ratings')[:limit]
def find_users(q_pieces, limit):
if settings.DATABASES['default']['ENGINE'] == 'django.db.backends.sqlite3':
return models.KTUser.objects.filter(
slug_cache__icontains=' '.join([q_piece for q_piece in q_pieces if q_piece])
).order_by('username')[:limit]
return models.KTUser.objects.filter(
slug_cache__search=' '.join(['+%s*' % search_safe_slugify(q_piece) for q_piece in q_pieces if search_safe_slugify(q_piece)])
).order_by('username')[:limit]
def find_topics(q_pieces, limit):
topics = models.Topic.objects.select_related('last_comment', 'last_comment__created_by')
for q_piece in q_pieces:
topics = topics.filter(
Q(title__icontains=q_piece)
| Q(slug_cache__icontains=slugify(q_piece))
)
return topics.order_by('-number_of_comments')[:limit]
def find_polls(q_pieces, limit):
polls = models.Poll.objects
for q_piece in q_pieces:
polls = polls.filter(
Q(title__icontains=q_piece)
| Q(slug_cache__icontains=slugify(q_piece))
)
return polls.order_by('-number_of_votes')[:limit]
def find_roles(q_pieces, limit):
roles = models.FilmArtistRelationship.objects.select_related('artist', 'film').filter(role_type=models.FilmArtistRelationship.ROLE_TYPE_ACTOR)
for q_piece in q_pieces:
roles = roles.filter(
Q(role_name__icontains=q_piece)
| Q(slug_cache__icontains=slugify(q_piece))
)
return roles.order_by('-film__number_of_ratings', '-artist__number_of_ratings')[:limit]
def find_sequels(q_pieces, limit):
sequels = models.Sequel.objects
for q_piece in q_pieces:
sequels = sequels.filter(
Q(name__icontains=q_piece)
| Q(slug_cache__icontains=slugify(q_piece))
)
return sequels.order_by('name')[:limit]
| 36.865546
| 146
| 0.644632
|
79525285bf6a67a9880597766525a93f9f200be5
| 8,821
|
py
|
Python
|
src/collision_manager.py
|
Tomaszu97/game-engine
|
dcf34e0741edecdb2902871ef7488ea873844e65
|
[
"MIT"
] | 1
|
2022-02-09T07:41:30.000Z
|
2022-02-09T07:41:30.000Z
|
src/collision_manager.py
|
Tomaszu97/game-engine
|
dcf34e0741edecdb2902871ef7488ea873844e65
|
[
"MIT"
] | null | null | null |
src/collision_manager.py
|
Tomaszu97/game-engine
|
dcf34e0741edecdb2902871ef7488ea873844e65
|
[
"MIT"
] | null | null | null |
from pygame import Rect
from .shared import *
from .game_object import *
class CollisionManager():
#TODO first load the whole level ,then handle this stuff, otherwise race condition sometimes happens
def __init__(self):
# collision handlers
# elastic collision
def BNC(object, other_object, elastic=True):
sign = lambda x: 1 if x>=0 else -1
# return if object width or height is 0
if other_object.hitbox_size.x == 0 or other_object.hitbox_size.y == 0:
return
# move them away and collide
if Rect(object.hitbox_position, object.hitbox_size).colliderect(Rect(other_object.hitbox_position, other_object.hitbox_size)):
relative_position = object.position + (object.size/2) - other_object.position - (other_object.size/2)
total_speed = object.movement_speed + other_object.movement_speed
if object.mass == 0 or other_object.mass == 0:
mass_ratio = 1
other_mass_ratio = 1
else:
mass_ratio = object.mass / (object.mass+other_object.mass)
other_mass_ratio = other_object.mass / (object.mass+other_object.mass)
# relpos from other to me
if relative_position.x < 0:
# self approach from the left
x_intersection = -round(object.hitbox_position.x+object.hitbox_size.x-other_object.hitbox_position.x)
else:
# self approach from the right
x_intersection = round(other_object.hitbox_position.x+other_object.hitbox_size.x-object.hitbox_position.x)
if relative_position.y < 0:
# self approach from above
y_intersection = -round(object.hitbox_position.y+object.hitbox_size.y-other_object.hitbox_position.y)
else:
# self approach from below
y_intersection = round(other_object.hitbox_position.y+other_object.hitbox_size.y-object.hitbox_position.y)
#TODO optimize it
if abs(x_intersection) > abs(y_intersection):
if object.mass != 0:
object.move(0, y_intersection*other_mass_ratio)
if elastic:
object.movement_speed.y = sign(y_intersection)*abs(other_mass_ratio*total_speed.y)
else:
object.movement_speed.y = 0
if other_object.mass != 0:
other_object.move(0, -y_intersection*mass_ratio)
if elastic:
other_object.movement_speed.y = -sign(y_intersection)*abs(mass_ratio*total_speed.y)
else:
other_object.movement_speed.y = 0
else:
if object.mass != 0:
object.move(x_intersection*other_mass_ratio, 0)
if elastic:
object.movement_speed.x = sign(x_intersection)*abs(other_mass_ratio*total_speed.x)
else:
object.movement_speed.x = 0
if other_object.mass != 0:
other_object.move(-x_intersection*mass_ratio, 0)
if elastic:
other_object.movement_speed.x = -sign(x_intersection)*abs(mass_ratio*total_speed.x)
else:
other_object.movement_speed.x = 0
# inelastic collision
def HIT(object, other_object):
return BNC(object, other_object, elastic=False)
# take damage - first object takes damage
def TDM(object, other_object):
try:
object.hp -= other_object.damage
if object.hp < 0:
object.kill()
except AttributeError:
return
# make damage - second object takes damage
def MDM(object, other_object):
return TDM(other_object, object)
# kill yourself
def KYS(object, other_object):
object.kill()
# kill him
def KHM(object, other_object):
return KYS(other_object, object)
# teamwork - stop processing collision if both objects are in the same team
def TMW(object, other_object):
# stop processing collision if both are in the same team
if object.team == other_object.team:
return False
# call trapdoor handler if trapdoor is not triggered
def TRP(object, other_object):
# optimize it - maybe take advantage of collision order (higher first)
if object.type == TRAPDOOR:
trapdoor = object
else:
trapdoor = other_object
if not trapdoor.triggered:
trapdoor.triggered = True
trapdoor.handler()
# bullet kys - kill a bullet
def BKS(object, other_object):
# optimize it
if object.type == BULLET:
bullet = object
else:
bullet = other_object
bullet.kill()
# matrix defining collision behavior
self.collision_matrix = [
# NULL PLAYER ALLY ENEMY SPAWNER BULLET CONTAINER DECORATION LABEL WALL TRAPDOOR DIALOG TEXTINPUT
[ [HIT], ],# NULL
[ [HIT], None, ],# PLAYER
[ None, None, None, ],# ALLY
[ None, [HIT,MDM], [HIT,MDM], None, ],# ENEMY
[ None, None, None, None, None, ],# SPAWNER
[ None, [TMW,MDM,BKS], [TMW,MDM,BKS], [TMW,MDM,BKS], None, None, ],# BULLET
[ None, [HIT], [HIT], None, None, None, None, ],# CONTAINER
[ None, None, None, None, None, None, None, None, ],# DECORATION
[ None, None, None, None, None, None, None, None, None, ],# LABEL
[ [HIT], [HIT], [HIT], [HIT], None, [BNC], [HIT], None, None, None, ],# WALL
[ None, [TRP], None, None, None, None, None, None, None, None, None, ],# TRAPDOOR
[ None, None, None, None, None, None, None, None, None, None, None, None, ],# DIALOG
[ None, None, None, None, None, None, None, None, None, None, None, None, None, ],# TEXTINPUT
]
def get_on_collide(self, object, other_object):
#TODO - handle invincibility
# if self.is_invincible:
# self.process_collision[str(object.type.name)][:-1]
srt = sorted([object.type, other_object.type], reverse=True)
return self.collision_matrix[srt[0]][srt[1]]
def handle_collision(self, object, other_object):
functions = self.get_on_collide(object, other_object)
if functions:
if Rect(object.hitbox_position, object.hitbox_size).colliderect(Rect(other_object.hitbox_position, other_object.hitbox_size)):
for function in functions:
srt = sorted([object, other_object], key=lambda x: x.type, reverse=True) # pass object with higher type number as first parameter
if function(srt[0], srt[1]) is False: break # break when one of the handlers returns False
return True
return False
#TODO optimize it
def handle_all_collisions(self, to_collide):
for object in to_collide:
for other_object in to_collide:
if object is not other_object:
self.handle_collision(object, other_object)
| 53.460606
| 155
| 0.488833
|
7952528a99e67b752388a5669b6b0b5216d34e2c
| 3,827
|
py
|
Python
|
tests/components/sensor/test_rflink.py
|
bxnyintuc/home-assistant
|
70fff26383ed02cd8acf4c0f336e8d9ab9e0d598
|
[
"Apache-2.0"
] | null | null | null |
tests/components/sensor/test_rflink.py
|
bxnyintuc/home-assistant
|
70fff26383ed02cd8acf4c0f336e8d9ab9e0d598
|
[
"Apache-2.0"
] | null | null | null |
tests/components/sensor/test_rflink.py
|
bxnyintuc/home-assistant
|
70fff26383ed02cd8acf4c0f336e8d9ab9e0d598
|
[
"Apache-2.0"
] | null | null | null |
"""Test for RFlink sensor components.
Test setup of rflink sensor component/platform. Verify manual and
automatic sensor creation.
"""
from homeassistant.components.rflink import (
CONF_RECONNECT_INTERVAL)
from homeassistant.const import STATE_UNKNOWN
from ..test_rflink import mock_rflink
DOMAIN = 'sensor'
CONFIG = {
'rflink': {
'port': '/dev/ttyABC0',
'ignore_devices': ['ignore_wildcard_*', 'ignore_sensor'],
},
DOMAIN: {
'platform': 'rflink',
'devices': {
'test': {
'name': 'test',
'sensor_type': 'temperature',
},
},
},
}
async def test_default_setup(hass, monkeypatch):
"""Test all basic functionality of the rflink sensor component."""
# setup mocking rflink module
event_callback, create, _, _ = await mock_rflink(
hass, CONFIG, DOMAIN, monkeypatch)
# make sure arguments are passed
assert create.call_args_list[0][1]['ignore']
# test default state of sensor loaded from config
config_sensor = hass.states.get('sensor.test')
assert config_sensor
assert config_sensor.state == 'unknown'
assert config_sensor.attributes['unit_of_measurement'] == '°C'
# test event for config sensor
event_callback({
'id': 'test',
'sensor': 'temperature',
'value': 1,
'unit': '°C',
})
await hass.async_block_till_done()
assert hass.states.get('sensor.test').state == '1'
# test event for new unconfigured sensor
event_callback({
'id': 'test2',
'sensor': 'temperature',
'value': 0,
'unit': '°C',
})
await hass.async_block_till_done()
# test state of new sensor
new_sensor = hass.states.get('sensor.test2')
assert new_sensor
assert new_sensor.state == '0'
assert new_sensor.attributes['unit_of_measurement'] == '°C'
assert new_sensor.attributes['icon'] == 'mdi:thermometer'
async def test_disable_automatic_add(hass, monkeypatch):
"""If disabled new devices should not be automatically added."""
config = {
'rflink': {
'port': '/dev/ttyABC0',
},
DOMAIN: {
'platform': 'rflink',
'automatic_add': False,
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(
hass, config, DOMAIN, monkeypatch)
# test event for new unconfigured sensor
event_callback({
'id': 'test2',
'sensor': 'temperature',
'value': 0,
'unit': '°C',
})
await hass.async_block_till_done()
# make sure new device is not added
assert not hass.states.get('sensor.test2')
async def test_entity_availability(hass, monkeypatch):
"""If Rflink device is disconnected, entities should become unavailable."""
# Make sure Rflink mock does not 'recover' to quickly from the
# disconnect or else the unavailability cannot be measured
config = CONFIG
failures = [True, True]
config[CONF_RECONNECT_INTERVAL] = 60
# Create platform and entities
_, _, _, disconnect_callback = await mock_rflink(
hass, config, DOMAIN, monkeypatch, failures=failures)
# Entities are available by default
assert hass.states.get('sensor.test').state == STATE_UNKNOWN
# Mock a disconnect of the Rflink device
disconnect_callback()
# Wait for dispatch events to propagate
await hass.async_block_till_done()
# Entity should be unavailable
assert hass.states.get('sensor.test').state == 'unavailable'
# Reconnect the Rflink device
disconnect_callback()
# Wait for dispatch events to propagate
await hass.async_block_till_done()
# Entities should be available again
assert hass.states.get('sensor.test').state == STATE_UNKNOWN
| 28.139706
| 79
| 0.642279
|
795253857d177009dd688f2eadfc2447131b928d
| 3,323
|
py
|
Python
|
asposeslidescloud/models/i_shape_export_options.py
|
rizwanniazigroupdocs/aspose-slides-cloud-python
|
f692a7082387350f80f0b389c1914e33b800a76f
|
[
"MIT"
] | null | null | null |
asposeslidescloud/models/i_shape_export_options.py
|
rizwanniazigroupdocs/aspose-slides-cloud-python
|
f692a7082387350f80f0b389c1914e33b800a76f
|
[
"MIT"
] | null | null | null |
asposeslidescloud/models/i_shape_export_options.py
|
rizwanniazigroupdocs/aspose-slides-cloud-python
|
f692a7082387350f80f0b389c1914e33b800a76f
|
[
"MIT"
] | 1
|
2020-12-25T16:28:49.000Z
|
2020-12-25T16:28:49.000Z
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose">
# Copyright (c) 2018 Aspose.Slides for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class IShapeExportOptions(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
type_determiners = {
}
def __init__(self): # noqa: E501
"""IShapeExportOptions - a model defined in Swagger""" # noqa: E501
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, IShapeExportOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.908163
| 85
| 0.585615
|
795253cc36d0064e285be6f46b6d3bdeab66069c
| 2,603
|
py
|
Python
|
esperancanordeste/context_processors.py
|
klebercode/esperancanordeste
|
5ceb28f9b04619db21e83c2417e453b7c84647a6
|
[
"MIT"
] | null | null | null |
esperancanordeste/context_processors.py
|
klebercode/esperancanordeste
|
5ceb28f9b04619db21e83c2417e453b7c84647a6
|
[
"MIT"
] | null | null | null |
esperancanordeste/context_processors.py
|
klebercode/esperancanordeste
|
5ceb28f9b04619db21e83c2417e453b7c84647a6
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from django.shortcuts import get_object_or_404
from esperancanordeste.core.models import Enterprise, SocialLogo
from esperancanordeste.catalog.models import Category
from esperancanordeste.newsletter.models import Subscribe
from esperancanordeste.newsletter.forms import SubscribeForm
def enterprise_proc(request):
""" View Function """
context = {}
try:
enterprise = get_object_or_404(Enterprise, pk=1)
except:
enterprise = ''
social_list = SocialLogo.objects.all()
category_list = Category.objects.all()
# newsletter
if request.method == 'POST' and 'subscribe' in request.POST:
newsletter_form = SubscribeForm(request.POST, prefix='Subscribe')
if newsletter_form.is_valid():
obj = Subscribe.objects.filter(
email=newsletter_form.cleaned_data['email'])
if not obj:
newsletter_form.save()
context['subscribe_success'] = True
else:
context['subscribe_exist'] = True
else:
newsletter_form = SubscribeForm(prefix='Subscribe')
context['enterprise'] = enterprise
context['social_list'] = social_list
context['cat_list'] = category_list
context['newsletter_form'] = newsletter_form
return context
class EnterpriseExtraContext(object):
""" Class Based View """
try:
enterprise = get_object_or_404(Enterprise, pk=1)
except:
enterprise = ''
social_list = SocialLogo.objects.all()
category_list = Category.objects.all()
extra_context = {
'enterprise': enterprise,
'social_list': social_list,
'cat_list': category_list,
}
def process_request(self, request):
extra_context = {}
# newsletter
if request.method == 'POST' and 'subscribe' in request.POST:
newsletter_form = SubscribeForm(request.POST, prefix='Subscribe')
if newsletter_form.is_valid():
obj = Subscribe.objects.filter(
email=newsletter_form.cleaned_data['email'])
if not obj:
newsletter_form.save()
extra_context['subscribe_success'] = True
else:
extra_context['subscribe_exist'] = True
else:
newsletter_form = SubscribeForm(prefix='Subscribe')
def get_context_data(self, **kwargs):
context = super(EnterpriseExtraContext,
self).get_context_data(**kwargs)
context.update(self.extra_context)
return context
| 31.743902
| 77
| 0.632731
|
795255ce6c7514658d17d893e1e7fb1bfd0b7a60
| 5,872
|
py
|
Python
|
push_notifications/south_migrations/0002_auto__add_field_apnsdevice_date_created__add_field_gcmdevice_date_created.py
|
peterhinson/django-push-notifications
|
bf4e781aa4e67ea287027aa0d93c0faa0953c591
|
[
"MIT"
] | 1
|
2015-03-04T04:30:19.000Z
|
2015-03-04T04:30:19.000Z
|
push_notifications/south_migrations/0002_auto__add_field_apnsdevice_date_created__add_field_gcmdevice_date_created.py
|
peterhinson/django-push-notifications
|
bf4e781aa4e67ea287027aa0d93c0faa0953c591
|
[
"MIT"
] | null | null | null |
push_notifications/south_migrations/0002_auto__add_field_apnsdevice_date_created__add_field_gcmdevice_date_created.py
|
peterhinson/django-push-notifications
|
bf4e781aa4e67ea287027aa0d93c0faa0953c591
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field "APNSDevice.date_created"
db.add_column(
"push_notifications_apnsdevice", "date_created",
self.gf("django.db.models.fields.DateTimeField")(auto_now_add=True, null=True, blank=True),
keep_default=False
)
# Adding field "GCMDevice.date_created"
db.add_column(
"push_notifications_gcmdevice", "date_created",
self.gf("django.db.models.fields.DateTimeField")(auto_now_add=True, null=True, blank=True),
keep_default=False
)
def backwards(self, orm):
# Deleting field "APNSDevice.date_created"
db.delete_column("push_notifications_apnsdevice", "date_created")
# Deleting field "GCMDevice.date_created"
db.delete_column("push_notifications_gcmdevice", "date_created")
models = {
u"auth.group": {
"Meta": {"object_name": "Group"},
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"unique": "True", "max_length": "80"}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {
'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')",
'unique_together': "((u'content_type', u'codename'),)",
"object_name": "Permission"
},
"codename": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"content_type": ("django.db.models.fields.related.ForeignKey", [], {"to": u"orm['contenttypes.ContentType']"}),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "50"})
},
"%s.%s" % (User._meta.app_label, User._meta.module_name): {
"Meta": {"object_name": User.__name__, 'db_table': "'%s'" % User._meta.db_table},
"date_joined": ("django.db.models.fields.DateTimeField", [], {"default": "datetime.datetime.now"}),
"email": ("django.db.models.fields.EmailField", [], {"max_length": "75", "blank": "True"}),
"first_name": ("django.db.models.fields.CharField", [], {"max_length": "30", "blank": "True"}),
"groups": ("django.db.models.fields.related.ManyToManyField", [],
{"to": u"orm['auth.Group']", "symmetrical": "False", "blank": "True"}),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"is_active": ("django.db.models.fields.BooleanField", [], {"default": "True"}),
"is_staff": ("django.db.models.fields.BooleanField", [], {"default": "False"}),
"is_superuser": ("django.db.models.fields.BooleanField", [], {"default": "False"}),
"last_login": ("django.db.models.fields.DateTimeField", [], {"default": "datetime.datetime.now"}),
"last_name": ("django.db.models.fields.CharField", [], {"max_length": "30", "blank": "True"}),
"password": ("django.db.models.fields.CharField", [], {"max_length": "128"}),
"user_permissions": ("django.db.models.fields.related.ManyToManyField", [],
{"to": u"orm['auth.Permission']", "symmetrical": "False", "blank": "True"}),
"username": ("django.db.models.fields.CharField", [], {"unique": "True", "max_length": "30"})
},
u"contenttypes.contenttype": {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
"app_label": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"model": ("django.db.models.fields.CharField", [], {"max_length": "100"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "100"})
},
u'push_notifications.apnsdevice': {
"Meta": {"object_name": "APNSDevice"},
"active": ("django.db.models.fields.BooleanField", [], {"default": "True"}),
"date_created": ("django.db.models.fields.DateTimeField", [], {"auto_now_add": "True", "null": "True", "blank": "True"}),
"device_id": ("uuidfield.fields.UUIDField", [], {"max_length": "32", "null": "True", "blank": "True"}),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "255", "null": "True", "blank": "True"}),
"registration_id": ("django.db.models.fields.CharField", [], {"unique": "True", "max_length": "64"}),
"user": ("django.db.models.fields.related.ForeignKey", [],
{"to": u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name), "null": "True", "blank": "True"})
},
u"push_notifications.gcmdevice": {
"Meta": {"object_name": "GCMDevice"},
"active": ("django.db.models.fields.BooleanField", [], {"default": "True"}),
"date_created": ("django.db.models.fields.DateTimeField", [], {"auto_now_add": "True", "null": "True", "blank": "True"}),
"device_id": ("push_notifications.fields.HexIntegerField", [], {"null": "True", "blank": "True"}),
"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "255", "null": "True", "blank": "True"}),
"registration_id": ("django.db.models.fields.TextField", [], {}),
"user": ("django.db.models.fields.related.ForeignKey", [],
{"to": u"orm['%s.%s']" % (User._meta.app_label, User._meta.object_name), "null": "True", "blank": "True"})
}
}
complete_apps = ['push_notifications']
| 53.87156
| 152
| 0.647139
|
795255ce9558cdcebad46a90da7b6ff0c9173971
| 11,176
|
py
|
Python
|
lib/fast_rcnn/config.py
|
Turoad/py-RFCN-priv
|
f2837b0aa2b74941bf5a62304798b74547d486f4
|
[
"MIT"
] | 1
|
2021-05-14T15:12:22.000Z
|
2021-05-14T15:12:22.000Z
|
lib/fast_rcnn/config.py
|
Turoad/py-RFCN-priv
|
f2837b0aa2b74941bf5a62304798b74547d486f4
|
[
"MIT"
] | null | null | null |
lib/fast_rcnn/config.py
|
Turoad/py-RFCN-priv
|
f2837b0aa2b74941bf5a62304798b74547d486f4
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Fast R-CNN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
import getpass
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
__C.TRAIN.DATA_ROOT = ' ' # Root dir for dataset
__C.TRAIN.LABELMAP_PATH = ' ' # Lablemap for classes to ID
__C.TRAIN.SOURCE = ' ' # Train list for images and annotation XML or categroy label
__C.TRAIN.MIN_AREA = 16 # Min area of bbox
__C.TRAIN.MAX_RATIO = 32 # Max long/short ratio of bbox
__C.TRAIN.LABEL_SHUFFLING = False # Label shuffle bootstrapping
__C.TRAIN.SHUFFLE = True # Shuffle train images
__C.TRAIN.USE_FLIPPED = True # Use horizontally-flipped images during training?
__C.TRAIN.COLOR_FACTOR = (0.95, 1.05) # color aug
__C.TRAIN.CONTRAST_FACTOR = (0.95, 1.05) # contrast aug
__C.TRAIN.BRIGTHNESS_FACTOR = (0.95, 1.05) # brightness aug
__C.TRAIN.GAUSSIAN_BLUR = False # 1/4 prop for (3, 3) gaussian blur
__C.TRAIN.CROP = False # im crop implement as SSD
__C.TRAIN.CROP_MAX_ATTEMPTS = 200
__C.TRAIN.CROP_AREA = (0.5, 1)
__C.TRAIN.CROP_RATIO = (0.5, 2)
__C.TRAIN.MIN_COVERED = 0.5
# Scales to use during training (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1000
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 40000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
__C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.RPN_NORMALIZE_TARGETS = False
__C.TRAIN.RPN_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.RPN_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'selective_search'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = False
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
__C.TRAIN.ONLY_INSIDE_ANCHORS = True
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 8
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Parameters for "Online Hard-example Mining Algorithm"
__C.TRAIN.USE_OHEM = False
# For diversity and de-duplication
__C.TRAIN.OHEM_USE_NMS = True
__C.TRAIN.OHEM_NMS_THRESH = 0.7
# whether use class aware box or not
__C.TRAIN.AGNOSTIC = False
#
# Testing options
#
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Flag for soft-NMS method. 0 performs standard NMS, 1 performs soft-NMS with linear weighting and
# 2 performs soft-NMS with Gaussian weighting
__C.TEST.SOFT_NMS = 1
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'selective_search'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 8
# whether use class aware box or not
__C.TEST.AGNOSTIC = False
# Apply bounding box voting
__C.TEST.BBOX_VOTE = False
# Apply box scoring heuristics
__C.TEST.BBOX_VOTE_N_WEIGHTED_SCORE = 1
__C.TEST.BBOX_VOTE_WEIGHT_EMPTY = 0.5
__C.TEST.ITER_BBOX_REG = False
__C.TEST.ADD_OLD_BBOX = False
__C.TEST.ITER_THRESH = 0.0
__C.TEST.BBOX_REG_BATCH_SIZE = 300
#
# MISC
#
__C.DATABASE_ROOT = osp.abspath(osp.join('/home', getpass.getuser(), 'Database')) # for example: /home/priv/Database
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1./16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
__C.PIXEL_STDS = np.array([[[1.0, 1.0, 1.0]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Model directory
__C.MODELS_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'models', 'pascal_voc'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net=None):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is not None:
outdir = osp.join(outdir, net.name)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.iteritems():
# a must specify keys that are in b
if not b.has_key(k):
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert d.has_key(subkey)
d = d[subkey]
subkey = key_list[-1]
assert d.has_key(subkey)
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| 32.967552
| 118
| 0.698908
|
79525657ca25cd7a1a45c008198d2521372793ef
| 18,836
|
py
|
Python
|
data/level/level124.py
|
levelupai/match3-level-similarity
|
cc9b28b8741b41bea1273c8bc9b4d265d79a1dca
|
[
"Apache-2.0"
] | null | null | null |
data/level/level124.py
|
levelupai/match3-level-similarity
|
cc9b28b8741b41bea1273c8bc9b4d265d79a1dca
|
[
"Apache-2.0"
] | 6
|
2020-07-04T02:53:08.000Z
|
2022-03-11T23:53:14.000Z
|
data/level/level124.py
|
levelupai/match3-level-similarity
|
cc9b28b8741b41bea1273c8bc9b4d265d79a1dca
|
[
"Apache-2.0"
] | 3
|
2019-12-31T11:42:59.000Z
|
2021-03-28T20:06:13.000Z
|
data = {
'level_index': 124,
'move_count': 32,
'board_info': {
(0, 0): {
'base': (14, 1),
'cover': (64, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 1): {
'base': (4, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 2): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 3): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 4): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 5): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 8): {
'base': (50, 1),
'cover': (64, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 10): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 11): {
'base': (4, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 12): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 13): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 14): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 15): {
'base': (4, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 16): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(0, 17): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 0): {
'base': (66, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 1): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 2): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 3): {
'base': (66, 2),
'next': (0, 1),
'prev': (0, -1)
},
(1, 4): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 5): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 6): {
'base': (66, 2),
'next': (0, 1),
'prev': (0, -1)
},
(1, 8): {
'base': (50, 1),
'cover': (64, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 9): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 10): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 11): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 12): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 13): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 14): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 15): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 16): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(1, 17): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 5): {
'base': (1, 1),
'cover': (63, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 6): {
'base': (2, 1),
'cover': (63, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 8): {
'base': (4, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 9): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 10): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 11): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 12): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 14): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 15): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 16): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(2, 17): {
'base': (4, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 0): {
'base': (5, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 1): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 2): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 3): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 4): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 5): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 6): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 8): {
'base': (5, 1),
'cover': (64, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 9): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 10): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 11): {
'base': (50, 1),
'cover': (64, 2),
'next': (0, 1),
'prev': (0, -1)
},
(3, 15): {
'base': (50, 1),
'cover': (64, 2),
'next': (0, 1),
'prev': (0, -1)
},
(3, 16): {
'base': (4, 1),
'next': (0, 1),
'prev': (0, -1)
},
(3, 17): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 0): {
'base': (4, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 1): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 2): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 3): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 4): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 5): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 6): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 8): {
'base': (50, 1),
'cover': (64, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 10): {
'base': (4, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 16): {
'base': (5, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(4, 17): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 0): {
'base': (1, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 1): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 2): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 3): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 4): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 5): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 6): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 12): {
'base': (1, 1),
'cover': (64, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(5, 13): {
'base': (50, 1),
'cover': (64, 2),
'next': (0, 1),
'prev': (0, -1)
},
(5, 14): {
'base': (50, 1),
'cover': (64, 2),
'next': (0, 1),
'prev': (0, -1)
},
(6, 0): {
'base': (1, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 1): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 2): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 3): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 4): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 5): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 6): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 8): {
'base': (50, 1),
'cover': (64, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 10): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 16): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(6, 17): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 0): {
'base': (2, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 1): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 2): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 3): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 4): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 5): {
'base': (4, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 6): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 8): {
'base': (5, 1),
'cover': (64, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 9): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 10): {
'base': (4, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 11): {
'base': (50, 1),
'cover': (64, 2),
'next': (0, 1),
'prev': (0, -1)
},
(7, 15): {
'base': (4, 1),
'cover': (64, 2),
'next': (0, 1),
'prev': (0, -1)
},
(7, 16): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(7, 17): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 5): {
'base': (5, 1),
'cover': (63, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 6): {
'base': (5, 1),
'cover': (63, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 8): {
'base': (1, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 9): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 10): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 11): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 12): {
'base': (5, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 14): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 15): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 16): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(8, 17): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 0): {
'base': (66, 2),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 1): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 2): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 3): {
'base': (66, 2),
'next': (0, 1),
'prev': (0, -1)
},
(9, 4): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 5): {
'base': (66, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 6): {
'base': (66, 2),
'next': (0, 1),
'prev': (0, -1)
},
(9, 8): {
'base': (50, 1),
'cover': (64, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 9): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 10): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 11): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 12): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 13): {
'base': (6, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 14): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 15): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 16): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(9, 17): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 0): {
'base': (14, 1),
'cover': (64, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 1): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 2): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 3): {
'base': (2, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 4): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 5): {
'base': (50, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 8): {
'base': (50, 1),
'cover': (64, 1),
'fall_point': (0, -1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 10): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 11): {
'base': (1, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 12): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 13): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 14): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 15): {
'base': (2, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 16): {
'base': (5, 1),
'next': (0, 1),
'prev': (0, -1)
},
(10, 17): {
'base': (14, 1),
'cover': (64, 1),
'next': (0, 1),
'prev': (0, -1)
}
},
'trans_info': {
(0, 0): {
14: 17,
50: 10
},
(0, 9): {
50: 10
}
}
}
| 23.283066
| 34
| 0.196114
|
7952568943402b63936a3af28376de4aaa6410c4
| 700
|
py
|
Python
|
nbr/schemas/session.py
|
zhivykh/nbrunner
|
b5b77aec3b9c71d594ca116f2b5eefeb08dd0475
|
[
"MIT"
] | null | null | null |
nbr/schemas/session.py
|
zhivykh/nbrunner
|
b5b77aec3b9c71d594ca116f2b5eefeb08dd0475
|
[
"MIT"
] | null | null | null |
nbr/schemas/session.py
|
zhivykh/nbrunner
|
b5b77aec3b9c71d594ca116f2b5eefeb08dd0475
|
[
"MIT"
] | null | null | null |
from typing import Optional
from pydantic import BaseModel
class Kernel(BaseModel):
"""Kernel model."""
id: str
name: str
last_activity: str
execution_state: str
connections: int
class Notebook(BaseModel):
"""Notebook model."""
path: Optional[str] = None
name: str
class Session(BaseModel):
"""Session model."""
id: str
path: str
name: str
type: str
kernel: Kernel
notebook: Notebook
class KernelName(BaseModel):
"""Kernel name scheme."""
name: str = "python3"
class CreateSession(BaseModel):
"""Session scheme."""
kernel: KernelName = KernelName()
name: str
path: str
type: str = "notebook"
| 14.893617
| 37
| 0.627143
|
79525894106470b417346e762a67939d672214ea
| 23,559
|
py
|
Python
|
uspy/features/lsr.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
uspy/features/lsr.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
uspy/features/lsr.py
|
jwarndt/uspy
|
ab5bb73f9243a1d7978c83ccb63e7189fc18cd8a
|
[
"MIT"
] | null | null | null |
import math
import os
import cv2
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
import skimage
from skimage.color import rgb2gray
from skimage._shared._warnings import expected_warnings
from scipy.stats import entropy
import numpy as np
from nmapy.utilities.io import *
class LSR_data_object:
def __init__(self, orientation_image, region_support, labeled_regions, label_id, orientation_threshold):
self.orientation_image = orientation_image
self.region_support = region_support
self.labeled_regions = labeled_regions
self.label_id = label_id
self.orientation_threshold = orientation_threshold
def __calc_mag_ang(im):
dx = cv2.Sobel(np.float32(im), cv2.CV_32F, 1, 0, ksize=7)
dy = cv2.Sobel(np.float32(im), cv2.CV_32F, 0, 1, ksize=7)
mag, ang = cv2.cartToPolar(dx, dy, angleInDegrees=1)
return mag, ang, dx, dy
def write_lsr_shapefile(lsf_arr, output, geotran):
ulx = geotran[0]
uly = geotran[3]
cell_width = geotran[1]
cell_height = geotran[5]
vector_driver = ogr.GetDriverByName("ESRI Shapefile")
vector_ds = vector_driver.CreateDataSource(output)
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
layer = vector_ds.CreateLayer(output[:-4], srs, ogr.wkbLineString)
line_length = ogr.FieldDefn("len", ogr.OFTReal)
layer.CreateField(line_length)
line_ori = ogr.FieldDefn("ori", ogr.OFTReal)
layer.CreateField(line_ori)
line_con = ogr.FieldDefn("con", ogr.OFTReal)
layer.CreateField(line_con)
for n in lsf_arr:
out_feature = ogr.Feature(layer.GetLayerDefn())
out_feature.SetField("len", n[0])
out_feature.SetField("ori", np.rad2deg(n[3])+180)
out_feature.SetField("con", n[4])
dx = n[0]/(np.sqrt(4*(1+np.tan(n[3])**2)));
dy = np.tan(n[3])*dx;
x1 = ulx + (n[1]+dx) * cell_width
y1 = uly + (n[2]+dy) * cell_height
x2 = ulx + (n[1]-dx) * cell_width
y2 = uly + (n[2]-dy) * cell_height
line = ogr.Geometry(ogr.wkbLineString)
line.AddPoint(x1, y1)
line.AddPoint(x2, y2)
#wkt_geom = line.ExportToWkt()
out_feature.SetGeometry(line)
layer.CreateFeature(out_feature)
out_feature = None
vector_ds = None
def lsr_feature(image_name, output, block, scale, mag_threshold=20, lsr_threshold=20, distance_threshold=8, orientation_threshold=22.5):
assert(type(block) != None and type(scale) != None)
ds = gdal.Open(image_name)
image = ds.ReadAsArray()
geotran = ds.GetGeoTransform()
ulx = geotran[0]
uly = geotran[3]
in_cell_width = geotran[1]
in_cell_height = geotran[5]
ds = None
# block and scale parameters are in meters
# convert meters to image space (number of pixels)
# the conversion is very crude at the moment, should really
# be using projected data
if "wv2" in image_name:
cell_width = 0.46
if "wv3" in image_name:
cell_width = 0.31
# in number of pixels relative to the input data GSD
block = int(block / cell_width)
scale = int(scale / cell_width)
out_srs = osr.SpatialReference()
out_srs.ImportFromEPSG(4326)
out_srs_wkt = out_srs.ExportToWkt()
out_cell_width = block * in_cell_width
out_cell_height = block * in_cell_height
image = np.moveaxis(image, 0, -1) # expects an image in rows, columns, channels
with expected_warnings(['precision']):
image = skimage.img_as_ubyte(rgb2gray(image))
lsr_im = line_support_regions(image)
for i in range(0, lsr_im.shape[0], block):
outrow = []
for j in range(0, lsr_im.shape[1], block):
center_i = int(i+block/2.)
center_j = int(j+block/2.)
if center_i-int(scale/2.) < 0:
top = 0
else:
top = center_i-int(scale/2.)
if center_i+int(scale/2.) > lsr_im.shape[0]:
bot = lsr_im.shape[0]
else:
bot = center_i+int(scale/2.)
if center_j-int(scale/2.) < 0:
left = 0
else:
left = center_j-int(scale/2.)
if center_j+int(scale/2.) > lsr_im.shape[1]:
right = lsr_im.shape[1]
else:
right = center_j+int(scale/2.)
scale_arr = lsr_im[top:bot+1,left:right+1]
feat_vec = __lsr_hist_feature(scale_arr)
outrow.append(feat_vec)
out_image.append(outrow)
out_image = np.moveaxis(out_image, -1, 0)
if output:
out_geotran = (ulx, out_cell_width, 0, uly, 0, out_cell_height)
write_geotiff(output, out_image, out_geotran, out_srs_wkt)
return np.array(out_image)
def lsr_feat_vec(image_name, scales):
ds = gdal.Open(image_name)
image = ds.ReadAsArray()
geotran = ds.GetGeoTransform()
ulx = geotran[0]
uly = geotran[3]
in_cell_width = geotran[1]
in_cell_height = geotran[5]
ds = None
image = np.moveaxis(image, 0, -1) # expects an image in rows, columns, channels
with expected_warnings(['precision']):
image = skimage.img_as_ubyte(rgb2gray(image))
lsr_im = line_support_regions(image)
# center pixel location
center_i = int(image.shape[0] / 2.)
center_j = int(image.shape[1] / 2.)
if "wv2" in image_name:
cell_width = 0.46
if "wv3" in image_name:
cell_width = 0.31
out = []
for s in scales:
# convert meters to pixel counts
n_pixels = int(s / cell_width) # number of pixels for the scale
if center_i-int(n_pixels/2) < 0:
top = 0
else:
top = center_i-int(n_pixels/2)
if center_i+int(n_pixels/2) > lsr_im.shape[0]:
bot = image.shape[0]
else:
bot = center_i+int(n_pixels/2)
if center_j-int(n_pixels/2) < 0:
left = 0
else:
left = center_j-int(n_pixels/2)
if center_j+int(n_pixels/2) > lsr_im.shape[1]:
right = image.shape[1]
else:
right = center_j+int(n_pixels/2)
feat_vec = __lsr_hist_feature(lsr_im[top:bot+1,left:right+1])
out.append(feat_vec)
return np.array(out).flatten()
def __lsr_hist_feature(lsr_im, orders=[1,2], peak_nums=2):
"""
1. number of lines
2. line length mean
3. line length variance
4. line orientation variance
5. line contrast mean
6. line orientation entropy
7. line length entropy
8. line contrast entropy
"""
feat_vec = []
orientations = lsr_im[1].flatten()
orientations = orientations[np.where(orientations != -1)]
lengths = lsr_im[0].flatten()
lengths = lengths[np.where(lengths != -1)]
contrasts = lsr_im[2].flatten()
contrasts = contrasts[np.where(contrasts != -1)]
feat_vec.append(len(lengths))
feat_vec.append(np.mean(lengths))
feat_vec.append(np.var(lengths))
feat_vec.append(np.var(orientations))
feat_vec.append(np.mean(contrasts))
or_bins = np.linspace(90, 270, 51)
len_bins = np.linspace(0, 200, 51)
con_bins = np.linspace(0, 100, 51)
or_hist = np.histogram(orientations, or_bins, density=True)
len_hist = np.histogram(lengths, len_bins, density=True)
con_hist = np.histogram(contrasts, con_bins, density=True)
or_ent = entropy(or_hist[0])
len_ent = entropy(len_hist[0])
con_ent = entropy(con_hist[0])
feat_vec.append(or_ent)
feat_vec.append(len_ent)
feat_vec.append(con_ent)
return np.array(feat_vec)
def line_support_regions(array, mag_threshold=20, lsr_threshold=20, distance_threshold=8, orientation_threshold=22.5):
"""
input is a gray scale image
"""
# calculate gradient orientation and magnitude
mag, ang, dx, dy = __calc_mag_ang(array)
mag *= 0.001
# tmp(edmim<magThreshold)=-1;
temp = np.where(mag < mag_threshold, -1, ang) # set pixels to -1 if magnitude is below the mag_threshold
data_ob = __label_regions(temp, distance_threshold, orientation_threshold)
lsr_m = data_ob.labeled_regions
line_idx = np.unique(lsr_m)
# lsfarr = zeros(max(lsrM(:)),5);
lsf_arr = np.zeros(shape=(np.max(lsr_m), 9))
count = 0
l = 1
for l in range(1, np.max(line_idx)):
# idx=find(lsrM==l);
idx = np.argwhere(lsr_m.ravel() == l) # returns an array of indices
# eim = zeros(size(im));
eim = np.zeros(shape=temp.shape)
# eim(idx) = 1;
eim = np.where(lsr_m == l, 1, eim)
# if (sum(eim(:)) <= lsrThreshold)
if np.sum(eim) <= lsr_threshold: # ignore small line support region
continue
# ix_wi = dx(idx)
# iy_wi = dy(idx)
Ix_wi = dx.ravel()[idx] # extract elements in dx at index locations where lsr_m == l
Iy_wi = dy.ravel()[idx]
grd_wi = mag.ravel()[idx]
# find major orientation
ST = [[np.sum(Ix_wi**2), np.sum(Ix_wi*Iy_wi)],
[np.sum(Ix_wi*Iy_wi), np.sum(Iy_wi**2)]]
# V, D = eig(ST)
# matlab returns returns diagonal matrix D of eigenvalues and matrix V whose columns are the corresponding right eigenvectors, so that A*V = V*D.
D, V = np.linalg.eig(ST) # python's return of D is a 1D array,
D = np.diag(D) # make D on the diagonal to conform to matlab's procedure
# if D(1,1)<D(2,2)
# lorn=atan(V(2,1)/V(1,1));
# else
# lorn=atan(V(2,2)/V(1,2));
# end
if D[0][0] < D[1][1]:
# lorn=atan(V(2,1)/V(1,1));
lorn = np.arctan(V[1][0]/V[0][0])
else:
# lorn=atan(V(2,2)/V(1,2));
lorn = np.arctan(V[1][1]/V[0][1])
# vote for r
# [Ytmp,Xtmp]=ind2sub(size(im),idx);
Ytmp, Xtmp = np.unravel_index(idx, temp.shape)
Ytmp+=1 # indices need += 1 for some indexing weirdness...
Xtmp+=1
# Raccm=round(Xtmp.*cos(lorn-pi/2)+Ytmp.*sin(lorn-pi/2));
Raccm=np.round(Xtmp*math.cos(lorn-(math.pi/2))+Ytmp*math.sin(lorn-(math.pi/2)))
rng=np.arange(Raccm.min(),Raccm.max()+1)
accm=np.zeros(shape=(len(rng)))
for k in range(len(idx)):
rc = np.round(Xtmp[k]*math.cos(lorn-math.pi/2)+Ytmp[k]*math.sin(lorn-math.pi/2))
accm[np.where(rng==rc)] = accm[np.where(rng==rc)] + grd_wi[k]
mxid = np.argmax(accm)
Xmx=max(Xtmp[np.where(Raccm==rng[mxid])])
Xmn=min(Xtmp[np.where(Raccm==rng[mxid])])
Ymx=max(Ytmp[np.where(Raccm==rng[mxid])])
Ymn=min(Ytmp[np.where(Raccm==rng[mxid])])
lmx = ((Xmx+Xmn)/2) - 1
lmy = ((Ymx+Ymn)/2) - 1
llen = math.sqrt((Xmx-Xmn)**2+(Ymx-Ymn)**2)
lsf_arr[count][0] = llen
lsf_arr[count][1] = lmx
lsf_arr[count][2] = lmy
lsf_arr[count][3] = lorn
lcon=np.mean(grd_wi[(np.where(Raccm==rng[mxid]))])
lsf_arr[count][4] = lcon
lsf_raster[0][int(lmy)][int(lmx)] = llen
lsf_raster[1][int(lmy)][int(lmx)] = np.rad2deg(lorn)+180
lsf_raster[2][int(lmy)][int(lmx)] = lcon
count+=1
lsf_arr = lsf_arr[0:count,:]
return lsf_raster
def calc_line_support_regions(image_name, mag_threshold=20, lsr_threshold=20, distance_threshold=8, orientation_threshold=22.5, output_lsr_shapefile=False):
"""
Parameters:
------------
image_name: str
the image filename
mag_threshold: int or float
pixels with magnitude above mag_threshold are considered for line support regions
lsr_threshold: int or float
threshold for the smallest line support region
distance_threshold: int or float
the size of the kernel used for counting the number of pixels contributing to
a line support region
orientation_threshold: int or float
the number of degrees (+ or -) that is allowed when determining if pixels are in the same
line support region
Returns:
--------
lsf_raster: ndarray (3 bands, n rows, m cols)
lsf_raster[0] is line length values (in pixels)
lsf_raster[1] is line orientation values (in degrees)
lsf_raster[2] is line contrast values
geotran: gdal geotransform
"""
ds = gdal.Open(image_name)
image = ds.ReadAsArray()
geotran = ds.GetGeoTransform()
ulx = geotran[0]
uly = geotran[3]
in_cell_width = geotran[1]
in_cell_height = geotran[5]
ds = None
out_srs = osr.SpatialReference()
out_srs.ImportFromEPSG(4326)
out_srs_wkt = out_srs.ExportToWkt()
lsf_raster = np.ones(shape=(image.shape))*-1 # output lsf_raster is 3 band.
image = np.moveaxis(image, 0, -1) # expects an image in rows, columns, channels
with expected_warnings(['precision']):
image = skimage.img_as_ubyte(rgb2gray(image))
# calculate gradient orientation and magnitude
mag, ang, dx, dy = __calc_mag_ang(image)
mag *= 0.001
# tmp(edmim<magThreshold)=-1;
temp = np.where(mag < mag_threshold, -1, ang) # set pixels to -1 if magnitude is below the mag_threshold
data_ob = __label_regions(temp, distance_threshold, orientation_threshold)
lsr_m = data_ob.labeled_regions
line_idx = np.unique(lsr_m)
# lsfarr = zeros(max(lsrM(:)),5);
lsf_arr = np.zeros(shape=(np.max(lsr_m), 5))
count = 0
l = 1
for l in range(1, np.max(line_idx)):
# idx=find(lsrM==l);
idx = np.argwhere(lsr_m.ravel() == l) # returns an array of indices
# eim = zeros(size(im));
eim = np.zeros(shape=temp.shape)
# eim(idx) = 1;
eim = np.where(lsr_m == l, 1, eim)
# if (sum(eim(:)) <= lsrThreshold)
if np.sum(eim) <= lsr_threshold: # ignore small line support region
continue
# ix_wi = dx(idx)
# iy_wi = dy(idx)
Ix_wi = dx.ravel()[idx] # extract elements in dx at index locations where lsr_m == l
Iy_wi = dy.ravel()[idx]
grd_wi = mag.ravel()[idx]
# find major orientation
ST = [[np.sum(Ix_wi**2), np.sum(Ix_wi*Iy_wi)],
[np.sum(Ix_wi*Iy_wi), np.sum(Iy_wi**2)]]
# V, D = eig(ST)
# matlab returns returns diagonal matrix D of eigenvalues and matrix V whose columns are the corresponding right eigenvectors, so that A*V = V*D.
D, V = np.linalg.eig(ST) # python's return of D is a 1D array,
D = np.diag(D) # make D on the diagonal to conform to matlab's procedure
# if D(1,1)<D(2,2)
# lorn=atan(V(2,1)/V(1,1));
# else
# lorn=atan(V(2,2)/V(1,2));
# end
if D[0][0] < D[1][1]:
# lorn=atan(V(2,1)/V(1,1));
lorn = np.arctan(V[1][0]/V[0][0])
else:
# lorn=atan(V(2,2)/V(1,2));
lorn = np.arctan(V[1][1]/V[0][1])
# vote for r
# [Ytmp,Xtmp]=ind2sub(size(im),idx);
Ytmp, Xtmp = np.unravel_index(idx, temp.shape)
Ytmp+=1 # indices need += 1 for some indexing weirdness...
Xtmp+=1
# Raccm=round(Xtmp.*cos(lorn-pi/2)+Ytmp.*sin(lorn-pi/2));
Raccm=np.round(Xtmp*math.cos(lorn-(math.pi/2))+Ytmp*math.sin(lorn-(math.pi/2)))
rng=np.arange(Raccm.min(),Raccm.max()+1)
accm=np.zeros(shape=(len(rng)))
for k in range(len(idx)):
rc = np.round(Xtmp[k]*math.cos(lorn-math.pi/2)+Ytmp[k]*math.sin(lorn-math.pi/2))
accm[np.where(rng==rc)] = accm[np.where(rng==rc)] + grd_wi[k]
mxid = np.argmax(accm)
Xmx=max(Xtmp[np.where(Raccm==rng[mxid])])
Xmn=min(Xtmp[np.where(Raccm==rng[mxid])])
Ymx=max(Ytmp[np.where(Raccm==rng[mxid])])
Ymn=min(Ytmp[np.where(Raccm==rng[mxid])])
lmx = ((Xmx+Xmn)/2) - 1
lmy = ((Ymx+Ymn)/2) - 1
llen = math.sqrt((Xmx-Xmn)**2+(Ymx-Ymn)**2)
lsf_arr[count][0] = llen
lsf_arr[count][1] = lmx
lsf_arr[count][2] = lmy
lsf_arr[count][3] = lorn
lcon=np.mean(grd_wi[(np.where(Raccm==rng[mxid]))])
lsf_arr[count][4] = lcon
lsf_raster[0][int(lmy)][int(lmx)] = llen
lsf_raster[1][int(lmy)][int(lmx)] = np.rad2deg(lorn)+180
lsf_raster[2][int(lmy)][int(lmx)] = lcon
count+=1
lsf_arr = lsf_arr[0:count,:]
#lsf_arr[:,1] = ulx + lsf_arr[:,1] * in_cell_width
#lsf_arr[:,2] = uly + lsf_arr[:,2] * in_cell_height
if output_lsr_shapefile:
write_lsr_shapefile(lsf_arr, os.path.join(os.path.dirname(image_name), os.path.basename(image_name)[:-4])+"_lsr_MT"+str(mag_threshold)+"_LT"+str(lsr_threshold)+"_DT"+str(distance_threshold)+"_OT"+str(orientation_threshold)+".shp", geotran)
return lsf_raster
def __label_regions(orientation_image, distance_threshold, orientation_threshold):
labeled_regions = np.zeros(shape=orientation_image.shape, dtype=int) # labeled image
region_support = np.zeros(shape=orientation_image.shape, dtype=int) # counts of pixels supporting the region
out_l = []
out_row_idx = []
out_col_idx = []
ws = distance_threshold*2
i = 0
while i < orientation_image.shape[0]:
j = 0
while j < orientation_image.shape[1]:
if orientation_image[i][j] >= 0:
center_i = i
center_j = j
# now get the row and col indices for the kernel
# (top, bot, left, right). The kernel is centered on orientation_image[i][j]
if center_i - ws > 0: # avoid indexing out of bounds of the top of the array
top = center_i - ws
else:
top = 0
if center_i + ws < orientation_image.shape[0] - 1: # avoid indexing out of bounds of the bottom of the array
bot = center_i + ws
else:
bot = orientation_image.shape[0] - 1
if center_j - ws > 0: # avoid indexing out of bounds to the left of the array
left = center_j - ws
else:
left = 0
if center_j + ws < orientation_image.shape[1] - 1: # avoid indexing out of bounds to the right of the array
right = center_j + ws
else:
right = orientation_image.shape[1] - 1
pixel_count = 0
ii = top
while ii <= bot:
jj = left
while jj <= right:
dist = math.sqrt((center_i-ii)*(center_i-ii)+(center_j-jj)*(center_j-jj))
if dist <= distance_threshold and orientation_image[ii][jj] >= 0 and (ii != center_i or jj != center_j):
abs_orientation_diff = abs(orientation_image[center_i][center_j] - orientation_image[ii][jj])
# when abs_orientation_diff is large, it means that the orientations of the pixels are similar
c2 = 360 - abs_orientation_diff
# when c2 is small, it means that the orientations of the pixels are similar
if abs_orientation_diff < c2:
c = abs_orientation_diff
else:
c = c2
if c < orientation_threshold:
pixel_count += 1
jj+=1
ii+=1
region_support[i][j] = pixel_count
out_l.append(pixel_count)
out_row_idx.append(i)
out_col_idx.append(j)
j+=1
i+=1
# sort the support regions based on the number of contributing pixels
out_l, out_row_idx, out_col_idx = (list(t) for t in zip(*sorted(zip(out_l, out_row_idx, out_col_idx))))
# begin expanding regions with support. start with the line with the most support first and count down from there
label_id = 0
data_object = LSR_data_object(orientation_image, region_support, labeled_regions, 0, orientation_threshold)
for k in range(len(out_l)-1, -1, -1):
if data_object.labeled_regions[out_row_idx[k]][out_col_idx[k]] == 0: # if values at this location have not been written,
center_i = out_row_idx[k]
center_j = out_col_idx[k]
# if out_m[tgti + tgtj * rows] == 0:
if data_object.region_support[center_i][center_j] == 0: # there were no pixels with similar orientation connected to eachother
continue
# tgt = image[tgti + tgtj * rows]
orientation = data_object.orientation_image[center_i][center_j] # orientation at this location
# out_m[tgti + tgtj * rows] = 0
data_object.region_support[center_i][center_j] = 0
data_object.label_id+=1
top = center_i
bot = center_i
left = center_j
right = center_j
# now expand out from center_i, center_j and label the support region
__expand(data_object, center_i, center_j, center_i, center_j)
return data_object
def __expand(data_object, origin_i, origin_j, candidate_i, candidate_j):
"""
label regions with pixels of similar orientation
"""
abs_orientation_diff = abs(data_object.orientation_image[candidate_i][candidate_j] - data_object.orientation_image[origin_i][origin_j])
# when abs_orientation_diff is large, it means that the orientations of the pixels are similar
c2 = 360 - abs_orientation_diff
# when c2 is small, it means that the orientations of the pixels are similar
if abs_orientation_diff < c2:
c = abs_orientation_diff
else:
c = c2
# if c > ot or image[i + j * rows] < 0 or seg_res[i + j * rows] > 0:
if c > data_object.orientation_threshold or data_object.orientation_image[candidate_i][candidate_j] < 0 or data_object.labeled_regions[candidate_i][candidate_j] > 0:
return
# seg_res[i + j * rows] = label_n
data_object.labeled_regions[candidate_i][candidate_j] = data_object.label_id
# out_m[i + j * rows] = 0
data_object.region_support[candidate_i][candidate_j] = 0
# continue expanding until the bounds of the array are reached. Worst case scenario
# is that a line support region spans the entire image
if candidate_i + 1 < data_object.orientation_image.shape[0]:
__expand(data_object, candidate_i, candidate_j, candidate_i + 1, candidate_j)
if candidate_j + 1 < data_object.orientation_image.shape[1]:
__expand(data_object, candidate_i, candidate_j, candidate_i, candidate_j + 1)
if candidate_i - 1 >= 0:
__expand(data_object, candidate_i, candidate_j, candidate_i - 1, candidate_j)
if candidate_j - 1 >= 0:
__expand(data_object, candidate_i, candidate_j, candidate_i, candidate_j - 1)
| 41.043554
| 248
| 0.585636
|
79525ab2de58efae22644e9364fd217576d7c5eb
| 8,670
|
py
|
Python
|
package_control/cmd.py
|
Shekharrajak/my_sublime_packages
|
4627b7faa47b0b03578c339fb5081be9d92867a6
|
[
"Unlicense",
"MIT"
] | 1
|
2018-06-23T08:07:39.000Z
|
2018-06-23T08:07:39.000Z
|
package_control/cmd.py
|
Shekharrajak/my_sublime_packages
|
4627b7faa47b0b03578c339fb5081be9d92867a6
|
[
"Unlicense",
"MIT"
] | null | null | null |
package_control/cmd.py
|
Shekharrajak/my_sublime_packages
|
4627b7faa47b0b03578c339fb5081be9d92867a6
|
[
"Unlicense",
"MIT"
] | null | null | null |
import os
import subprocess
import re
import sys
if os.name == 'nt':
from ctypes import windll, create_unicode_buffer
try:
# Allow using this file on the website where the sublime
# module is unavailable
import sublime
except (ImportError):
sublime = None
from .console_write import console_write
from .unicode import unicode_from_os
from .show_error import show_error
from . import text
try:
# Python 2
str_cls = unicode
except (NameError):
# Python 3
str_cls = str
def create_cmd(args, basename_binary=False):
"""
Takes an array of strings to be passed to subprocess.Popen and creates
a string that can be pasted into a terminal
:param args:
The array containing the binary name/path and all arguments
:param basename_binary:
If only the basename of the binary should be disabled instead of the full path
:return:
The command string
"""
if basename_binary:
args[0] = os.path.basename(args[0])
if os.name == 'nt':
return subprocess.list2cmdline(args)
else:
escaped_args = []
for arg in args:
if re.search('^[a-zA-Z0-9/_^\\-\\.:=]+$', arg) == None:
arg = u"'" + arg.replace(u"'", u"'\\''") + u"'"
escaped_args.append(arg)
return u' '.join(escaped_args)
class Cli(object):
"""
Base class for running command line apps
:param binary_locations:
The full filesystem path to the executable for the version control
system. May be set to None to allow the code to try and find it. May
also be a list of locations to attempt. This allows settings to be
shared across operating systems.
"""
# Prevent duplicate lookups
binary_paths = {}
cli_name = None
def __init__(self, binary_locations, debug):
self.binary_locations = binary_locations
self.debug = debug
def execute(self, args, cwd, input=None, encoding='utf-8', meaningful_output=False, ignore_errors=None):
"""
Creates a subprocess with the executable/args
:param args:
A list of the executable path and all arguments to it
:param cwd:
The directory in which to run the executable
:param input:
The input text to send to the program
:param meaningful_output:
If the output from the command is possibly meaningful and should
be displayed if in debug mode
:param ignore_errors:
A regex of errors to ignore
:return: A string of the executable output
"""
startupinfo = None
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
# Make sure the cwd is ascii
try:
cwd.encode('mbcs')
except UnicodeEncodeError:
buf = create_unicode_buffer(512)
if windll.kernel32.GetShortPathNameW(cwd, buf, len(buf)):
cwd = buf.value
if self.debug:
console_write(
u'''
Executing %s [%s]
''',
(create_cmd(args), cwd)
)
try:
if sys.platform == 'win32' and sys.version_info < (3,):
cwd = cwd.encode('mbcs')
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
startupinfo=startupinfo, cwd=cwd, env=os.environ)
if input and isinstance(input, str_cls):
input = input.encode(encoding)
stuck = True
if sublime:
def kill_proc():
if not stuck:
return
# This doesn't actually work!
proc.kill()
binary_name = os.path.basename(args[0])
if re.search('git', binary_name):
is_vcs = True
elif re.search('hg', binary_name):
is_vcs = True
message = u'The process %s seems to have gotten stuck.' % binary_name
if is_vcs:
message += text.format(
u'''
This is likely due to a password or passphrase
prompt. Please ensure %s works without a prompt, or
change the "ignore_vcs_packages" Package Control
setting to true.
Sublime Text will need to be restarted once these
changes are made.
''',
binary_name
)
show_error(message)
sublime.set_timeout(kill_proc, 60000)
output, _ = proc.communicate(input)
stuck = False
output = output.decode(encoding)
output = output.replace('\r\n', '\n').rstrip(' \n\r')
if proc.returncode != 0:
if not ignore_errors or re.search(ignore_errors, output) is None:
show_error(
u'''
Error executing: %s
%s
VCS-based packages can be ignored with the
"ignore_vcs_packages" setting.
''',
(create_cmd(args), output)
)
return False
if meaningful_output and self.debug and len(output) > 0:
console_write(output, indent=' ', prefix=False)
return output
except (OSError) as e:
show_error(
u'''
Error executing: %s
%s
Try checking your "%s_binary" setting?
''',
(create_cmd(args), unicode_from_os(e), self.cli_name)
)
return False
def find_binary(self, name):
"""
Locates the executable by looking in the PATH and well-known directories
:param name:
The string filename of the executable
:return:
The filesystem path to the executable, or None if not found
"""
# Use the cached path
if self.cli_name in Cli.binary_paths:
return Cli.binary_paths[self.cli_name]
check_binaries = []
# Use the settings first
if self.binary_locations:
if not isinstance(self.binary_locations, list):
self.binary_locations = [self.binary_locations]
check_binaries.extend(self.binary_locations)
# Next check the PATH
for dir_ in os.environ['PATH'].split(os.pathsep):
check_binaries.append(os.path.join(dir_, name))
# Finally look in common locations that may not be in the PATH
if os.name == 'nt':
dirs = ['C:\\Program Files\\Git\\bin',
'C:\\Program Files (x86)\\Git\\bin',
'C:\\Program Files\\TortoiseGit\\bin',
'C:\\Program Files\\Mercurial',
'C:\\Program Files (x86)\\Mercurial',
'C:\\Program Files (x86)\\TortoiseHg',
'C:\\Program Files\\TortoiseHg',
'C:\\cygwin\\bin']
else:
# ST seems to launch with a minimal set of environmental variables
# on OS X, so we add some common paths for it
dirs = ['/usr/local/git/bin', '/usr/local/bin']
for dir_ in dirs:
check_binaries.append(os.path.join(dir_, name))
if self.debug:
console_write(
u'''
Looking for %s at: "%s"
''',
(self.cli_name, '", "'.join(check_binaries))
)
for path in check_binaries:
if os.path.exists(path) and not os.path.isdir(path) and os.access(path, os.X_OK):
if self.debug:
console_write(
u'''
Found %s at "%s"
''',
(self.cli_name, path)
)
Cli.binary_paths[self.cli_name] = path
return path
if self.debug:
console_write(
u'''
Could not find %s on your machine
''',
self.cli_name
)
return None
| 31.18705
| 108
| 0.51511
|
79525aec2be7db4c5a2bcc9affd18d707018f860
| 5,278
|
py
|
Python
|
Models/classTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
|
3bec5e9efc1f3101e7506401eb57e7b8c955f84c
|
[
"Apache-2.0"
] | 123
|
2019-05-14T19:50:42.000Z
|
2022-03-21T11:32:30.000Z
|
Models/classTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
|
3bec5e9efc1f3101e7506401eb57e7b8c955f84c
|
[
"Apache-2.0"
] | 30
|
2019-05-23T18:48:29.000Z
|
2021-06-26T01:17:13.000Z
|
Models/classTemplateTF/model.py
|
UTS-AnimalLogicAcademy/nuke-ML-server
|
3bec5e9efc1f3101e7506401eb57e7b8c955f84c
|
[
"Apache-2.0"
] | 34
|
2019-05-14T17:43:26.000Z
|
2021-11-10T23:53:02.000Z
|
from __future__ import print_function
import sys
import os
import time
import scipy.misc
import numpy as np
import cv2
import tensorflow as tf
tf.compat.v1.disable_eager_execution() # For TF 2.x compatibility
from ..baseModel import BaseModel
from ..common.util import print_, get_saved_model_list, linear_to_srgb
import message_pb2
class Model(BaseModel):
"""Load your trained model and do inference in Nuke"""
def __init__(self):
super(Model, self).__init__()
self.name = 'Classification Template'
dir_path = os.path.dirname(os.path.realpath(__file__))
self.checkpoints_dir = os.path.join(dir_path, 'checkpoints')
self.batch_size = 1
# Initialise checkpoint name to the most recent trained model
ckpt_names = get_saved_model_list(self.checkpoints_dir)
if not ckpt_names: # empty list
self.checkpoint_name = ''
else:
self.checkpoint_name = ckpt_names[-1]
self.prev_ckpt_name = self.checkpoint_name
# Button to get classification label
self.get_label = False
# Define options
self.options = ('checkpoint_name',)
self.buttons = ('get_label',)
# Define inputs/outputs
self.inputs = {'input': 3}
self.outputs = {'output': 3}
def load_model(self):
# Check if empty or invalid checkpoint name
if self.checkpoint_name=='':
ckpt_names = get_saved_model_list(self.checkpoints_dir)
if not ckpt_names:
raise ValueError("No checkpoints found in {}".format(self.checkpoints_dir))
else:
raise ValueError("Empty checkpoint name, try an available checkpoint in {} (ex: {})"
.format(self.checkpoints_dir, ckpt_names[-1]))
print_("Loading trained model checkpoint...\n", 'm')
# Load from given checkpoint file name
model = tf.keras.models.load_model(os.path.join(self.checkpoints_dir, self.checkpoint_name))
model._make_predict_function()
print_("...Checkpoint {} loaded\n".format(self.checkpoint_name), 'm')
return model
def inference(self, image_list):
"""Do an inference on the model with a set of inputs.
# Arguments:
image_list: The input image list
Return the result of the inference.
"""
image = image_list[0]
image = linear_to_srgb(image).copy()
image = (image * 255).astype(np.uint8)
if not hasattr(self, 'model'):
# Initialise tensorflow graph
tf.compat.v1.reset_default_graph()
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth=True
self.sess = tf.compat.v1.Session(config=config)
# Necessary to switch / load_weights on different h5 file
tf.compat.v1.keras.backend.set_session(self.sess)
# Load most recent trained model
self.model = self.load_model()
self.graph = tf.compat.v1.get_default_graph()
self.prev_ckpt_name = self.checkpoint_name
self.class_labels = (self.checkpoint_name.split('.')[0]).split('_')
else:
tf.compat.v1.keras.backend.set_session(self.sess)
# If checkpoint name has changed, load new checkpoint
if self.prev_ckpt_name != self.checkpoint_name or self.checkpoint_name == '':
self.model = self.load_model()
self.graph = tf.compat.v1.get_default_graph()
self.class_labels = (self.checkpoint_name.split('.')[0]).split('_')
# If checkpoint correctly loaded, update previous checkpoint name
self.prev_ckpt_name = self.checkpoint_name
image = cv2.resize(image, dsize=(224, 224), interpolation=cv2.INTER_NEAREST)
# Predict on new data
image_batch = np.expand_dims(image, 0)
# Preprocess a numpy array encoding a batch of images (RGB values within [0, 255])
image_batch = tf.keras.applications.mobilenet.preprocess_input(image_batch)
start = time.time()
with self.graph.as_default():
y_prob = self.model.predict(image_batch)
y_class = y_prob.argmax(axis=-1)[0]
duration = time.time() - start
# Print results on server side
print('Inference duration: {:4.3f}s'.format(duration))
class_scores = str(["{0:0.4f}".format(i) for i in y_prob[0]]).replace("'", "")
print("Class scores: {} --> Label: {}".format(class_scores, self.class_labels[y_class]))
# If get_label button is pressed in Nuke
if self.get_label:
# Send back which class was detected
script_msg = message_pb2.FieldValuePairAttrib()
script_msg.name = "PythonScript"
# Create a Python script message to run in Nuke
nuke_msg = "Class scores: {}\\nLabel: {}".format(class_scores, self.class_labels[y_class])
python_script = "nuke.message('{}')\n".format(nuke_msg)
script_msg_val = script_msg.values.add()
script_msg_str = script_msg_val.string_attributes.add()
script_msg_str.values.extend([python_script])
return [image_list[0], script_msg]
return [image_list[0]]
| 41.234375
| 102
| 0.636036
|
79525cf11f35655760597ff820c2b99354cf838c
| 4,921
|
py
|
Python
|
clusterman/cli/manage.py
|
akshaysharma096/clusterman
|
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
|
[
"Apache-2.0"
] | 281
|
2019-11-15T03:12:43.000Z
|
2022-01-07T06:36:58.000Z
|
clusterman/cli/manage.py
|
akshaysharma096/clusterman
|
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
|
[
"Apache-2.0"
] | 38
|
2019-11-18T20:15:47.000Z
|
2022-03-28T11:28:45.000Z
|
clusterman/cli/manage.py
|
akshaysharma096/clusterman
|
27f4bd217fe201a4c0b9bf460c5a9e155ee88041
|
[
"Apache-2.0"
] | 21
|
2019-11-16T07:49:40.000Z
|
2022-02-09T18:13:48.000Z
|
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from getpass import getuser
from socket import gethostname
import arrow
import staticconf
from clusterman.args import add_cluster_arg
from clusterman.args import add_cluster_config_directory_arg
from clusterman.args import add_pool_arg
from clusterman.args import add_scheduler_arg
from clusterman.args import subparser
from clusterman.autoscaler.pool_manager import PoolManager
from clusterman.cli.util import timeout_wrapper
from clusterman.config import POOL_NAMESPACE
from clusterman.util import ask_for_confirmation
from clusterman.util import get_autoscaler_scribe_stream
from clusterman.util import log_to_scribe
LOG_TEMPLATE = f"{arrow.now()} {gethostname()} {__name__}"
def get_target_capacity_value(target_capacity: str, pool: str, scheduler: str) -> int:
target_capacity = target_capacity.lower()
pool_namespace = POOL_NAMESPACE.format(pool=pool, scheduler=scheduler)
if target_capacity == "min":
return staticconf.read_int("scaling_limits.min_capacity", namespace=pool_namespace)
elif target_capacity == "max":
return staticconf.read_int("scaling_limits.max_capacity", namespace=pool_namespace)
else:
return int(target_capacity)
def change_target_capacity(manager: PoolManager, target_capacity: str, dry_run: bool) -> str:
old_target = manager.target_capacity
requested_target = get_target_capacity_value(target_capacity, manager.pool, manager.scheduler)
if not dry_run and not ask_for_confirmation(
f"Modifying target capacity for {manager.cluster}, {manager.pool}.{manager.scheduler} "
f"from {old_target} to {requested_target}. Proceed? "
):
print("Aborting operation.")
return ""
new_target = manager.modify_target_capacity(requested_target, dry_run)
return (
f"Target capacity for {manager.pool}.{manager.scheduler} on {manager.cluster} manually changed "
f"from {old_target} to {new_target} by {getuser()}"
)
def mark_stale(manager: PoolManager, dry_run: bool) -> str:
if not dry_run and not ask_for_confirmation(
f"Marking all resource groups in {manager.cluster}, {manager.pool}.{manager.scheduler} stale. Proceed? "
):
print("Aborting operation.")
return ""
manager.mark_stale(dry_run)
return (
f"All resource groups in {manager.pool}.{manager.scheduler} on {manager.cluster} manually "
f"marked as stale by {getuser()}"
)
@timeout_wrapper
def main(args: argparse.Namespace) -> None:
if args.target_capacity and args.mark_stale:
raise ValueError("Cannot specify --target-capacity and --mark-stale simultaneously")
manager = PoolManager(args.cluster, args.pool, args.scheduler)
log_messages = []
if args.target_capacity:
log_message = change_target_capacity(manager, args.target_capacity, args.dry_run)
log_messages.append(log_message)
elif args.mark_stale:
log_message = mark_stale(manager, args.dry_run)
log_messages.append(log_message)
for log_message in log_messages:
if not log_message:
continue
print(log_message)
if not args.dry_run:
scribe_stream = get_autoscaler_scribe_stream(args.cluster, args.pool, args.scheduler)
log_to_scribe(scribe_stream, f"{LOG_TEMPLATE} {log_message}")
@subparser("manage", "check the status of a cluster", main)
def add_manager_parser(subparser, required_named_args, optional_named_args): # pragma: no cover
add_cluster_arg(required_named_args, required=True)
add_pool_arg(required_named_args)
add_scheduler_arg(required_named_args)
optional_named_args.add_argument(
"--target-capacity",
metavar="X",
help="New target capacity for the cluster (valid options: min, max, positive integer)",
)
optional_named_args.add_argument(
"--mark-stale",
action="store_true",
help=(
'Mark the resource groups of a cluster as "stale" (ASGs only); these resource groups '
"will no longer contribute to the pool's target capacity."
),
)
optional_named_args.add_argument(
"--dry-run", action="store_true", help="Just print what would happen, don't actually add or remove instances",
)
add_cluster_config_directory_arg(optional_named_args)
| 39.368
| 118
| 0.731355
|
79525e47465a3120276741fcfebcf63773fdc65e
| 19,381
|
py
|
Python
|
research/object_detection/protos/box_predictor_pb2.py
|
nobillowseagit/models
|
cbac8468537edbe679ddadad365da47a5ac966d8
|
[
"Apache-2.0"
] | 16
|
2018-12-17T15:30:16.000Z
|
2021-08-20T03:07:07.000Z
|
research/object_detection/protos/box_predictor_pb2.py
|
nobillowseagit/models
|
cbac8468537edbe679ddadad365da47a5ac966d8
|
[
"Apache-2.0"
] | null | null | null |
research/object_detection/protos/box_predictor_pb2.py
|
nobillowseagit/models
|
cbac8468537edbe679ddadad365da47a5ac966d8
|
[
"Apache-2.0"
] | 13
|
2019-05-13T11:01:43.000Z
|
2022-03-28T07:39:53.000Z
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: object_detection/protos/box_predictor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from object_detection.protos import hyperparams_pb2 as object__detection_dot_protos_dot_hyperparams__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='object_detection/protos/box_predictor.proto',
package='object_detection.protos',
syntax='proto2',
serialized_pb=_b('\n+object_detection/protos/box_predictor.proto\x12\x17object_detection.protos\x1a)object_detection/protos/hyperparams.proto\"\x9b\x02\n\x0c\x42oxPredictor\x12Y\n\x1b\x63onvolutional_box_predictor\x18\x01 \x01(\x0b\x32\x32.object_detection.protos.ConvolutionalBoxPredictorH\x00\x12P\n\x17mask_rcnn_box_predictor\x18\x02 \x01(\x0b\x32-.object_detection.protos.MaskRCNNBoxPredictorH\x00\x12G\n\x12rfcn_box_predictor\x18\x03 \x01(\x0b\x32).object_detection.protos.RfcnBoxPredictorH\x00\x42\x15\n\x13\x62ox_predictor_oneof\"\xf2\x02\n\x19\x43onvolutionalBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x14\n\tmin_depth\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\tmax_depth\x18\x03 \x01(\x05:\x01\x30\x12&\n\x1bnum_layers_before_predictor\x18\x04 \x01(\x05:\x01\x30\x12\x19\n\x0buse_dropout\x18\x05 \x01(\x08:\x04true\x12%\n\x18\x64ropout_keep_probability\x18\x06 \x01(\x02:\x03\x30.8\x12\x16\n\x0bkernel_size\x18\x07 \x01(\x05:\x01\x31\x12\x18\n\rbox_code_size\x18\x08 \x01(\x05:\x01\x34\x12&\n\x17\x61pply_sigmoid_to_scores\x18\t \x01(\x08:\x05\x66\x61lse\x12%\n\x1a\x63lass_prediction_bias_init\x18\n \x01(\x02:\x01\x30\"\xe3\x02\n\x14MaskRCNNBoxPredictor\x12<\n\x0e\x66\x63_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\x1a\n\x0buse_dropout\x18\x02 \x01(\x08:\x05\x66\x61lse\x12%\n\x18\x64ropout_keep_probability\x18\x03 \x01(\x02:\x03\x30.5\x12\x18\n\rbox_code_size\x18\x04 \x01(\x05:\x01\x34\x12>\n\x10\x63onv_hyperparams\x18\x05 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12%\n\x16predict_instance_masks\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\'\n\x1amask_prediction_conv_depth\x18\x07 \x01(\x05:\x03\x32\x35\x36\x12 \n\x11predict_keypoints\x18\x08 \x01(\x08:\x05\x66\x61lse\"\xf9\x01\n\x10RfcnBoxPredictor\x12>\n\x10\x63onv_hyperparams\x18\x01 \x01(\x0b\x32$.object_detection.protos.Hyperparams\x12\"\n\x17num_spatial_bins_height\x18\x02 \x01(\x05:\x01\x33\x12!\n\x16num_spatial_bins_width\x18\x03 \x01(\x05:\x01\x33\x12\x13\n\x05\x64\x65pth\x18\x04 \x01(\x05:\x04\x31\x30\x32\x34\x12\x18\n\rbox_code_size\x18\x05 \x01(\x05:\x01\x34\x12\x17\n\x0b\x63rop_height\x18\x06 \x01(\x05:\x02\x31\x32\x12\x16\n\ncrop_width\x18\x07 \x01(\x05:\x02\x31\x32')
,
dependencies=[object__detection_dot_protos_dot_hyperparams__pb2.DESCRIPTOR,])
_BOXPREDICTOR = _descriptor.Descriptor(
name='BoxPredictor',
full_name='object_detection.protos.BoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='convolutional_box_predictor', full_name='object_detection.protos.BoxPredictor.convolutional_box_predictor', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mask_rcnn_box_predictor', full_name='object_detection.protos.BoxPredictor.mask_rcnn_box_predictor', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rfcn_box_predictor', full_name='object_detection.protos.BoxPredictor.rfcn_box_predictor', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='box_predictor_oneof', full_name='object_detection.protos.BoxPredictor.box_predictor_oneof',
index=0, containing_type=None, fields=[]),
],
serialized_start=116,
serialized_end=399,
)
_CONVOLUTIONALBOXPREDICTOR = _descriptor.Descriptor(
name='ConvolutionalBoxPredictor',
full_name='object_detection.protos.ConvolutionalBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.ConvolutionalBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='min_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.min_depth', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_depth', full_name='object_detection.protos.ConvolutionalBoxPredictor.max_depth', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_layers_before_predictor', full_name='object_detection.protos.ConvolutionalBoxPredictor.num_layers_before_predictor', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.ConvolutionalBoxPredictor.use_dropout', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.ConvolutionalBoxPredictor.dropout_keep_probability', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.8),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='kernel_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.kernel_size', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.ConvolutionalBoxPredictor.box_code_size', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='apply_sigmoid_to_scores', full_name='object_detection.protos.ConvolutionalBoxPredictor.apply_sigmoid_to_scores', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_prediction_bias_init', full_name='object_detection.protos.ConvolutionalBoxPredictor.class_prediction_bias_init', index=9,
number=10, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=402,
serialized_end=772,
)
_MASKRCNNBOXPREDICTOR = _descriptor.Descriptor(
name='MaskRCNNBoxPredictor',
full_name='object_detection.protos.MaskRCNNBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='fc_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.fc_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='use_dropout', full_name='object_detection.protos.MaskRCNNBoxPredictor.use_dropout', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='dropout_keep_probability', full_name='object_detection.protos.MaskRCNNBoxPredictor.dropout_keep_probability', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=True, default_value=float(0.5),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.MaskRCNNBoxPredictor.box_code_size', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.MaskRCNNBoxPredictor.conv_hyperparams', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='predict_instance_masks', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_instance_masks', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mask_prediction_conv_depth', full_name='object_detection.protos.MaskRCNNBoxPredictor.mask_prediction_conv_depth', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=256,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='predict_keypoints', full_name='object_detection.protos.MaskRCNNBoxPredictor.predict_keypoints', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=775,
serialized_end=1130,
)
_RFCNBOXPREDICTOR = _descriptor.Descriptor(
name='RfcnBoxPredictor',
full_name='object_detection.protos.RfcnBoxPredictor',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='conv_hyperparams', full_name='object_detection.protos.RfcnBoxPredictor.conv_hyperparams', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_spatial_bins_height', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_height', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='num_spatial_bins_width', full_name='object_detection.protos.RfcnBoxPredictor.num_spatial_bins_width', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=3,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='depth', full_name='object_detection.protos.RfcnBoxPredictor.depth', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=1024,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='box_code_size', full_name='object_detection.protos.RfcnBoxPredictor.box_code_size', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=4,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_height', full_name='object_detection.protos.RfcnBoxPredictor.crop_height', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='crop_width', full_name='object_detection.protos.RfcnBoxPredictor.crop_width', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=True, default_value=12,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=1133,
serialized_end=1382,
)
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].message_type = _CONVOLUTIONALBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].message_type = _MASKRCNNBOXPREDICTOR
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].message_type = _RFCNBOXPREDICTOR
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'])
_BOXPREDICTOR.fields_by_name['convolutional_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'])
_BOXPREDICTOR.fields_by_name['mask_rcnn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_BOXPREDICTOR.oneofs_by_name['box_predictor_oneof'].fields.append(
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'])
_BOXPREDICTOR.fields_by_name['rfcn_box_predictor'].containing_oneof = _BOXPREDICTOR.oneofs_by_name['box_predictor_oneof']
_CONVOLUTIONALBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_MASKRCNNBOXPREDICTOR.fields_by_name['fc_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_MASKRCNNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
_RFCNBOXPREDICTOR.fields_by_name['conv_hyperparams'].message_type = object__detection_dot_protos_dot_hyperparams__pb2._HYPERPARAMS
DESCRIPTOR.message_types_by_name['BoxPredictor'] = _BOXPREDICTOR
DESCRIPTOR.message_types_by_name['ConvolutionalBoxPredictor'] = _CONVOLUTIONALBOXPREDICTOR
DESCRIPTOR.message_types_by_name['MaskRCNNBoxPredictor'] = _MASKRCNNBOXPREDICTOR
DESCRIPTOR.message_types_by_name['RfcnBoxPredictor'] = _RFCNBOXPREDICTOR
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BoxPredictor = _reflection.GeneratedProtocolMessageType('BoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _BOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.BoxPredictor)
))
_sym_db.RegisterMessage(BoxPredictor)
ConvolutionalBoxPredictor = _reflection.GeneratedProtocolMessageType('ConvolutionalBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _CONVOLUTIONALBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.ConvolutionalBoxPredictor)
))
_sym_db.RegisterMessage(ConvolutionalBoxPredictor)
MaskRCNNBoxPredictor = _reflection.GeneratedProtocolMessageType('MaskRCNNBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _MASKRCNNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.MaskRCNNBoxPredictor)
))
_sym_db.RegisterMessage(MaskRCNNBoxPredictor)
RfcnBoxPredictor = _reflection.GeneratedProtocolMessageType('RfcnBoxPredictor', (_message.Message,), dict(
DESCRIPTOR = _RFCNBOXPREDICTOR,
__module__ = 'object_detection.protos.box_predictor_pb2'
# @@protoc_insertion_point(class_scope:object_detection.protos.RfcnBoxPredictor)
))
_sym_db.RegisterMessage(RfcnBoxPredictor)
# @@protoc_insertion_point(module_scope)
| 51.545213
| 2,256
| 0.774573
|
79525eb8bf148e6484aed8e264c929a8f0ca9145
| 129
|
py
|
Python
|
unobase/commenting/signals.py
|
unomena/unobase
|
175e768afa1608f9f34d1e5a053763ad27db0f7e
|
[
"BSD-3-Clause"
] | null | null | null |
unobase/commenting/signals.py
|
unomena/unobase
|
175e768afa1608f9f34d1e5a053763ad27db0f7e
|
[
"BSD-3-Clause"
] | null | null | null |
unobase/commenting/signals.py
|
unomena/unobase
|
175e768afa1608f9f34d1e5a053763ad27db0f7e
|
[
"BSD-3-Clause"
] | null | null | null |
__author__ = 'michael'
from django.dispatch import Signal
user_commented = Signal(providing_args=['user', 'request', 'comment'])
| 32.25
| 70
| 0.767442
|
79525fb76b6d260cd6a5ed91ddad96dfa1d316c5
| 4,087
|
py
|
Python
|
utils/metrics.py
|
leonardozcm/Point-Completion-Fig-AutoGenerator
|
109f5a414f51469fac82d0d23cde69efb9cf97e0
|
[
"Apache-2.0"
] | 31
|
2021-08-22T15:01:58.000Z
|
2022-03-19T12:26:21.000Z
|
utils/metrics.py
|
leonardozcm/ASFM-Net-Pytorch
|
2584d2d098c760e559d3f632b72b9ad9881c59d5
|
[
"MIT"
] | 10
|
2021-09-06T09:07:38.000Z
|
2022-02-12T08:12:54.000Z
|
utils/metrics.py
|
leonardozcm/SnowflakeNet
|
93e7151610765e7e2b41ace2d03c8750f0b6c80c
|
[
"MIT"
] | 5
|
2021-08-30T00:53:17.000Z
|
2022-03-20T11:57:25.000Z
|
# -*- coding: utf-8 -*-
# @Author: Haozhe Xie
# @Date: 2019-08-08 14:31:30
# @Last Modified by: Haozhe Xie
# @Last Modified time: 2020-05-25 09:13:32
# @Email: cshzxie@gmail.com
import logging
import open3d
import torch
from Chamfer3D.dist_chamfer_3D import chamfer_3DDist
class Metrics(object):
ITEMS = [{
'name': 'ChamferDistance',
'enabled': True,
'eval_func': 'cls._get_chamfer_distance',
'eval_object': chamfer_3DDist(),
# 'eval_object': ChamferDistance(ignore_zeros=True),
'is_greater_better': False,
'init_value': 32767
}]
@classmethod
def get(cls, pred, gt):
_items = cls.items()
_values = [0] * len(_items)
for i, item in enumerate(_items):
eval_func = eval(item['eval_func'])
_values[i] = eval_func(pred, gt)
return _values
@classmethod
def items(cls):
return [i for i in cls.ITEMS if i['enabled']]
@classmethod
def names(cls):
_items = cls.items()
return [i['name'] for i in _items]
@classmethod
def _get_f_score(cls, pred, gt, th=0.01):
"""References: https://github.com/lmb-freiburg/what3d/blob/master/util.py"""
pred = cls._get_open3d_ptcloud(pred)
gt = cls._get_open3d_ptcloud(gt)
dist1 = pred.compute_point_cloud_distance(gt)
dist2 = gt.compute_point_cloud_distance(pred)
recall = float(sum(d < th for d in dist2)) / float(len(dist2))
precision = float(sum(d < th for d in dist1)) / float(len(dist1))
return 2 * recall * precision / (recall + precision) if recall + precision else 0
@classmethod
def _get_open3d_ptcloud(cls, tensor):
tensor = tensor.squeeze().cpu().numpy()
ptcloud = open3d.geometry.PointCloud()
ptcloud.points = open3d.utility.Vector3dVector(tensor)
return ptcloud
@classmethod
def _get_chamfer_distance(cls, pred, gt):
# chamfer_distance = cls.ITEMS[1]['eval_object']
chamfer_distance = cls.ITEMS[0]['eval_object']
d1, d2, _, _ = chamfer_distance(pred, gt)
cd = torch.mean(d1) + torch.mean(d2)
return cd.item() * 1000
# return chamfer_distance(pred, gt).item() * 1000
@classmethod
def _get_emd_distance(cls, pred, gt):
emd_distance = cls.ITEMS[0]['eval_object']
return torch.mean(emd_distance(pred, gt)).item()
def __init__(self, metric_name, values):
self._items = Metrics.items()
self._values = [item['init_value'] for item in self._items]
self.metric_name = metric_name
if type(values).__name__ == 'list':
self._values = values
elif type(values).__name__ == 'dict':
metric_indexes = {}
for idx, item in enumerate(self._items):
item_name = item['name']
metric_indexes[item_name] = idx
for k, v in values.items():
if k not in metric_indexes:
logging.warn('Ignore Metric[Name=%s] due to disability.' % k)
continue
self._values[metric_indexes[k]] = v
else:
raise Exception('Unsupported value type: %s' % type(values))
def state_dict(self):
_dict = dict()
for i in range(len(self._items)):
item = self._items[i]['name']
value = self._values[i]
_dict[item] = value
return _dict
def __repr__(self):
return str(self.state_dict())
def better_than(self, other):
if other is None:
return True
_index = -1
for i, _item in enumerate(self._items):
if _item['name'] == self.metric_name:
_index = i
break
if _index == -1:
raise Exception('Invalid metric name to compare.')
_metric = self._items[i]
_value = self._values[_index]
other_value = other._values[_index]
return _value > other_value if _metric['is_greater_better'] else _value < other_value
| 32.181102
| 93
| 0.590898
|
7952609e9f72568f6207d4ca79537c3b28a117d4
| 203
|
py
|
Python
|
classy_start/paths.py
|
mfonism/django-classy-start
|
70b73e1a836c2ae4c3bae6f53846b07a30a81ac0
|
[
"MIT"
] | 1
|
2022-01-20T18:48:46.000Z
|
2022-01-20T18:48:46.000Z
|
classy_start/paths.py
|
mfonism/django-classy-start
|
70b73e1a836c2ae4c3bae6f53846b07a30a81ac0
|
[
"MIT"
] | 8
|
2020-10-05T15:27:07.000Z
|
2021-02-16T17:17:54.000Z
|
classy_start/paths.py
|
mfonism/django-classy-start
|
70b73e1a836c2ae4c3bae6f53846b07a30a81ac0
|
[
"MIT"
] | 5
|
2020-10-05T18:11:44.000Z
|
2022-01-21T18:33:13.000Z
|
import pathlib
TEMPLATES_DIR = pathlib.Path(__file__).resolve(strict=True).parent / 'conf'
APP_TEMPLATES_DIR = TEMPLATES_DIR / 'app_template'
PROJECT_TEMPLATES_DIR = TEMPLATES_DIR / 'project_template'
| 29
| 75
| 0.807882
|
795260caef24ee8f231bab564ac699dadd75fb00
| 1,375
|
py
|
Python
|
cogdl/data/dataloader.py
|
Somefive/cogdl
|
1c5ab88aafc27529495d0d22f781055619e27cb2
|
[
"MIT"
] | 1
|
2021-03-17T07:23:51.000Z
|
2021-03-17T07:23:51.000Z
|
cogdl/data/dataloader.py
|
yingyukexiansheng/cogdl
|
cf594cdb3a97f45333d08c937205d1a691828a33
|
[
"MIT"
] | null | null | null |
cogdl/data/dataloader.py
|
yingyukexiansheng/cogdl
|
cf594cdb3a97f45333d08c937205d1a691828a33
|
[
"MIT"
] | null | null | null |
import torch.utils.data
from torch.utils.data.dataloader import default_collate
from cogdl.data import Batch, Data
class DataLoader(torch.utils.data.DataLoader):
r"""Data loader which merges data objects from a
:class:`cogdl.data.dataset` to a mini-batch.
Args:
dataset (Dataset): The dataset from which to load the data.
batch_size (int, optional): How may samples per batch to load.
(default: :obj:`1`)
shuffle (bool, optional): If set to :obj:`True`, the data will be
reshuffled at every epoch (default: :obj:`True`)
"""
def __init__(self, dataset, batch_size=1, shuffle=True, **kwargs):
super(DataLoader, self).__init__(
# dataset, batch_size, shuffle, collate_fn=lambda data_list: Batch.from_data_list(data_list), **kwargs
dataset,
batch_size,
shuffle,
collate_fn=self.collate_fn,
**kwargs,
)
@staticmethod
def collate_fn(batch):
item = batch[0]
if isinstance(item, Data):
return Batch.from_data_list(batch)
elif isinstance(item, torch.Tensor):
return default_collate(batch)
elif isinstance(item, float):
return torch.tensor(batch, dtype=torch.float)
raise TypeError("DataLoader found invalid type: {}".format(type(item)))
| 34.375
| 114
| 0.633455
|
795260edc9b41a9d3c0c76ccf0622e352e51fd4a
| 132
|
py
|
Python
|
myproj/api/routes/hello_v1/__init__.py
|
jbarguil/python-rest-server
|
d050d095555c9bad3e2e94b4e0c51075cc6ad3bc
|
[
"MIT"
] | null | null | null |
myproj/api/routes/hello_v1/__init__.py
|
jbarguil/python-rest-server
|
d050d095555c9bad3e2e94b4e0c51075cc6ad3bc
|
[
"MIT"
] | null | null | null |
myproj/api/routes/hello_v1/__init__.py
|
jbarguil/python-rest-server
|
d050d095555c9bad3e2e94b4e0c51075cc6ad3bc
|
[
"MIT"
] | null | null | null |
"""v1 of the Hello API
"""
from .api import api, blueprint
from .hello import namespace as ns_hello
api.add_namespace(ns_hello)
| 13.2
| 40
| 0.742424
|
7952611de7e54899944a86432e54961eb181d179
| 1,055
|
py
|
Python
|
src/groups/migrations/0001_initial.py
|
OmarYehia/django-social_network
|
83fc6fd288fe23684409c43bf51c7fca01297481
|
[
"MIT"
] | null | null | null |
src/groups/migrations/0001_initial.py
|
OmarYehia/django-social_network
|
83fc6fd288fe23684409c43bf51c7fca01297481
|
[
"MIT"
] | null | null | null |
src/groups/migrations/0001_initial.py
|
OmarYehia/django-social_network
|
83fc6fd288fe23684409c43bf51c7fca01297481
|
[
"MIT"
] | 1
|
2021-05-13T15:08:12.000Z
|
2021-05-13T15:08:12.000Z
|
# Generated by Django 3.2 on 2021-05-03 23:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0007_alter_relationship_managers'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('overview', models.TextField(blank=True, default='No overview available', max_length=500, null=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='profiles.profile')),
('user', models.ManyToManyField(blank=True, to='profiles.Profile')),
],
),
]
| 36.37931
| 131
| 0.617062
|
7952619ae3844ac0b36c3566ebb9a2737af62eb2
| 7,587
|
py
|
Python
|
gsae/utils/load_splits.py
|
dbhaskar92/GSAE
|
3340dfc38f95b29ff04d890d93deedf8dcae9324
|
[
"Apache-2.0"
] | 3
|
2021-05-18T21:28:54.000Z
|
2022-01-29T23:59:18.000Z
|
gsae/utils/load_splits.py
|
dbhaskar92/GSAE
|
3340dfc38f95b29ff04d890d93deedf8dcae9324
|
[
"Apache-2.0"
] | null | null | null |
gsae/utils/load_splits.py
|
dbhaskar92/GSAE
|
3340dfc38f95b29ff04d890d93deedf8dcae9324
|
[
"Apache-2.0"
] | 3
|
2021-07-07T17:48:28.000Z
|
2022-01-29T23:59:21.000Z
|
import numpy as np
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from gsae.utils import eval_metrics
# DEFINE THESE GLOBAL VARIABLES WITH YOUR OWN PATHS TO DATA
########################################
SEQ3_DATA_DIR = ''
SEQ4_DATA_DIR = ''
HIVTAR_DATA_DIR = ''
TEBOWN_DATA_DIR = ''
########################################
def load_seq3(batch_size=100,gnn=False, subsize=None, lognorm=False):
train_coeffs = np.load(SEQ3_DATA_DIR+"train_normcoeffs_0523.npy")
train_adjs = np.load(SEQ3_DATA_DIR+"train_adjs_0523.npy")
train_energies = np.load(SEQ3_DATA_DIR+"train_energies_0523.npy")
test_coeffs = np.load(SEQ3_DATA_DIR+"test_normcoeffs_0523.npy")
test_adjs = np.load(SEQ3_DATA_DIR+"test_adjs_0523.npy")
test_energies = np.load(SEQ3_DATA_DIR+"test_energies_0523.npy")
if lognorm:
# shift
train_coeffs += np.abs(train_coeffs.min()) + 1
test_coeffs += np.abs(train_coeffs.min()) + 1
# log
train_coeffs = np.log(train_coeffs)
test_coeffs = np.log(test_coeffs)
if gnn:
train_diracs = torch.eye(train_adjs.shape[-1]).unsqueeze(0).repeat(train_adjs.shape[0],1,1)
train_tup = (torch.Tensor(train_diracs),
torch.Tensor(train_adjs),
torch.Tensor(train_energies))
else:
train_tup = (torch.Tensor(train_coeffs),
torch.Tensor(train_energies))
if gnn:
test_diracs = torch.eye(test_adjs.shape[-1]).unsqueeze(0).repeat(test_adjs.shape[0],1,1)
test_tup = (torch.Tensor(test_diracs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
else:
test_tup = (torch.Tensor(test_coeffs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
#################
# SUBSET DATA
#################
if subsize != None:
train_tup, _ = eval_metrics.compute_subsample(train_tup, subsize)
test_tup, _ = eval_metrics.compute_subsample(test_tup, subsize)
train_dataset = torch.utils.data.TensorDataset(*train_tup)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True)
return train_loader, train_tup, test_tup
def load_seq4(batch_size=100, gnn=False, subsize=None):
train_coeffs = np.load(SEQ4_DATA_DIR+"train_normcoeffs_0523.npy")
train_adjs = np.load(SEQ4_DATA_DIR+"train_adjs_0523.npy")
train_energies = np.load(SEQ4_DATA_DIR+"train_energies_0523.npy")
test_coeffs = np.load(SEQ4_DATA_DIR+"test_normcoeffs_0523.npy")
test_adjs = np.load(SEQ4_DATA_DIR+"test_adjs_0523.npy")
test_energies = np.load(SEQ4_DATA_DIR+"test_energies_0523.npy")
if gnn:
train_diracs = torch.eye(train_adjs.shape[-1]).unsqueeze(0).repeat(train_adjs.shape[0],1,1)
train_tup = (torch.Tensor(train_diracs),
torch.Tensor(train_adjs),
torch.Tensor(train_energies))
else:
train_tup = (torch.Tensor(train_coeffs),
torch.Tensor(train_energies))
if gnn:
test_diracs = torch.eye(test_adjs.shape[-1]).unsqueeze(0).repeat(test_adjs.shape[0],1,1)
test_tup = (torch.Tensor(test_diracs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
else:
test_tup = (torch.Tensor(test_coeffs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
#################
# SUBSET DATA
#################
if subsize != None:
train_tup, _ = eval_metrics.compute_subsample(train_tup, subsize)
test_tup, _ = eval_metrics.compute_subsample(test_tup, subsize)
train_dataset = torch.utils.data.TensorDataset(*train_tup)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True)
return train_loader, train_tup, test_tup
def load_hivtar(batch_size=100,gnn=False,subsize=None):
train_coeffs = np.load(HIVTAR_DATA_DIR+"train_normcoeffs_0523.npy")
train_adjs = np.load(HIVTAR_DATA_DIR+"train_adjs_0523.npy")
train_energies = np.load(HIVTAR_DATA_DIR+"train_energies_0523.npy")
test_coeffs = np.load(HIVTAR_DATA_DIR+"test_normcoeffs_0523.npy")
test_adjs = np.load(HIVTAR_DATA_DIR+"test_adjs_0523.npy")
test_energies = np.load(HIVTAR_DATA_DIR+"test_energies_0523.npy")
if gnn:
train_diracs = torch.eye(train_adjs.shape[-1]).unsqueeze(0).repeat(train_adjs.shape[0],1,1)
train_tup = (torch.Tensor(train_diracs),
torch.Tensor(train_adjs),
torch.Tensor(train_energies))
else:
train_tup = (torch.Tensor(train_coeffs),
torch.Tensor(train_energies))
if gnn:
test_diracs = torch.eye(test_adjs.shape[-1]).unsqueeze(0).repeat(test_adjs.shape[0],1,1)
test_tup = (torch.Tensor(test_diracs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
else:
test_tup = (torch.Tensor(test_coeffs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
#################
# SUBSET DATA
#################
if subsize != None:
train_tup, _ = eval_metrics.compute_subsample(train_tup, subsize)
test_tup, _ = eval_metrics.compute_subsample(test_tup, subsize)
train_dataset = torch.utils.data.TensorDataset(*train_tup)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True)
return train_loader, train_tup, test_tup
def load_tebown(batch_size=100,gnn=False, subsize=None):
train_coeffs = np.load(TEBOWN_DATA_DIR+"train_normcoeffs_0523.npy")
train_adjs = np.load(TEBOWN_DATA_DIR+"train_adjs_0523.npy")
train_energies = np.load(TEBOWN_DATA_DIR+"train_energies_0523.npy")
test_coeffs = np.load(TEBOWN_DATA_DIR+"test_normcoeffs_0523.npy")
test_adjs = np.load(TEBOWN_DATA_DIR+"test_adjs_0523.npy")
test_energies = np.load(TEBOWN_DATA_DIR+"test_energies_0523.npy")
if gnn:
train_diracs = torch.eye(train_adjs.shape[-1]).unsqueeze(0).repeat(train_adjs.shape[0],1,1)
train_tup = (torch.Tensor(train_diracs),
torch.Tensor(train_adjs),
torch.Tensor(train_energies))
else:
train_tup = (torch.Tensor(train_coeffs),
torch.Tensor(train_energies))
if gnn:
test_diracs = torch.eye(test_adjs.shape[-1]).unsqueeze(0).repeat(test_adjs.shape[0],1,1)
test_tup = (torch.Tensor(test_diracs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
else:
test_tup = (torch.Tensor(test_coeffs),
torch.Tensor(test_adjs),
torch.Tensor(test_energies))
#################
# SUBSET DATA
#################
if subsize != None:
train_tup, _ = eval_metrics.compute_subsample(train_tup, subsize)
test_tup, _ = eval_metrics.compute_subsample(test_tup, subsize)
train_dataset = torch.utils.data.TensorDataset(*train_tup)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
shuffle=True)
return train_loader, train_tup, test_tup
| 33.131004
| 99
| 0.626203
|
795261c4a0db7e1ccd4e62ff4c7a884211b985e0
| 3,567
|
py
|
Python
|
accelerator/test_methods/a_test_rechain.py
|
sebras/berkeman-accelerator
|
be9d1b25ffdb66411db465bdbf3932ab937c5ace
|
[
"Apache-2.0"
] | null | null | null |
accelerator/test_methods/a_test_rechain.py
|
sebras/berkeman-accelerator
|
be9d1b25ffdb66411db465bdbf3932ab937c5ace
|
[
"Apache-2.0"
] | null | null | null |
accelerator/test_methods/a_test_rechain.py
|
sebras/berkeman-accelerator
|
be9d1b25ffdb66411db465bdbf3932ab937c5ace
|
[
"Apache-2.0"
] | null | null | null |
############################################################################
# #
# Copyright (c) 2019 Carl Drougge #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
############################################################################
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
description = r'''
Test re-using datasets (from test_selfchain) in new
chains, and verify that the old chain still works.
Also tests that the dataset cache is updated correctly
on re-chaining.
'''
from accelerator.dataset import Dataset, DatasetWriter
jobids = ('selfchain',)
def synthesis(job):
manual_chain = [Dataset(jobids.selfchain, name) for name in "abcdefgh"]
manual_abf = [manual_chain[0], manual_chain[1], manual_chain[5]]
# build a local abf chain
prev = None
for ix, ds in enumerate(manual_abf):
name = "abf%d" % (ix,)
prev = ds.link_to_here(name, override_previous=prev)
manual_abf_data = list(Dataset.iterate_list(None, None, manual_abf))
local_abf_data = list(Dataset(job, "abf2").iterate_chain(None, None))
assert manual_abf_data == local_abf_data
# disconnect h, verify there is no chain
manual_chain[-1].link_to_here("alone", override_previous=None)
assert len(Dataset(job, "alone").chain()) == 1
# check that the original chain is unhurt
assert manual_chain == manual_chain[-1].chain()
# So far so good, now make a chain long enough to have a cache.
prev = None
ix = 0
going = True
while going:
if prev and "cache" in prev._data:
going = False
name = "longchain%d" % (ix,)
dw = DatasetWriter(name=name, previous=prev)
dw.add("ix", "number")
dw.get_split_write()(ix)
prev = dw.finish()
ix += 1
# we now have a chain that goes one past the first cache point
full_chain = Dataset(prev).chain()
assert "cache" in full_chain[-2]._data # just to check the above logic is correct
assert "cache" not in full_chain[-1]._data # just to be sure..
full_chain[-2].link_to_here("nocache", override_previous=None)
full_chain[-1].link_to_here("withcache", override_previous=full_chain[-3])
assert "cache" not in Dataset(job, "nocache")._data
assert "cache" in Dataset(job, "withcache")._data
# And make sure they both get the right data too.
assert list(Dataset(prev).iterate_chain(None, "ix")) == list(range(ix))
assert list(Dataset(job, "nocache").iterate_chain(None, "ix")) == [ix - 2]
assert list(Dataset(job, "withcache").iterate_chain(None, "ix")) == list(range(ix - 2)) + [ix - 1]
| 46.934211
| 99
| 0.583403
|
795263e8dea3e395ad6bb8df0351e520a0a4f702
| 219
|
py
|
Python
|
dsat/test/__init__.py
|
jul/dsat
|
f1d4263eb29530f931273f0dd2e74b93b8bacca3
|
[
"BSD-2-Clause"
] | 2
|
2015-10-25T04:07:40.000Z
|
2017-06-22T20:18:10.000Z
|
dsat/test/__init__.py
|
jul/dsat
|
f1d4263eb29530f931273f0dd2e74b93b8bacca3
|
[
"BSD-2-Clause"
] | 5
|
2015-01-20T14:44:28.000Z
|
2015-01-20T14:50:12.000Z
|
dsat/test/__init__.py
|
jul/dsat
|
f1d4263eb29530f931273f0dd2e74b93b8bacca3
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import
import unittest
from dsat.test.test_state import TestFilterDict
from dsat.test.test_st_send_parse import TestSendParse
if __name__ == '__main__':
unittest.main(verbosity=4)
| 21.9
| 54
| 0.821918
|
7952644ebf47766b7f6eae6f77084e12872117a4
| 3,702
|
py
|
Python
|
RAISoft/instruments/TTi.py
|
daveraees/EMA_Test_Lab
|
a3073c5ec205d6ee327a993b38e92698c12cb0a6
|
[
"MIT"
] | null | null | null |
RAISoft/instruments/TTi.py
|
daveraees/EMA_Test_Lab
|
a3073c5ec205d6ee327a993b38e92698c12cb0a6
|
[
"MIT"
] | null | null | null |
RAISoft/instruments/TTi.py
|
daveraees/EMA_Test_Lab
|
a3073c5ec205d6ee327a993b38e92698c12cb0a6
|
[
"MIT"
] | null | null | null |
#from GPIBdevices import GPIBDevice
#from SERIALdevices import SERIALDevice
from DummyMeter import Bedna
# import bus
from numpy import array
from time import sleep
class PStsx3510P(Bedna):
"""
# test PowerSource
from threading import Thread
from time import sleep
import random
PowerSource = PStsx3510P('PowerSourceTTi')
def Source():
while True:
PowerSource.set_volts(random.random())
PowerSource.Measure()
#PowerSource.OUTqueue.join()
print 'source:', PowerSource.Readings
sleep(0.5)
return
def Valve():
while True:
#PT100.Measure()
sleep(0.5)
PowerSource.Set(random.random())
PowerSource.Measure()
#PowerSource.OUTqueue.join()
print 'valve:', PowerSource.Readings
return
source = Thread(target=Source)
valve = Thread(target=Valve)
source.setDaemon(True)
valve.setDaemon(True)
source.start()
valve.start()
valve.join(50)
source.join(54)
"""
sleeptime = 0.1
def __init__(self, BUS):
Bedna.__init__(self, BUS)
self.Readings = {'U(V)':array([]), 'I(A)':array([])}
self.dev.initSequence = ['I 1.0']
self.init()
self.limitA = self.get_amperes_limit()
self.out_of_bounds = 0
return
def _assert_OPC(self):
self.Bus.write_string('*OPC')
return
def Measure(self):
volts = self._get_volts()
amperes = self._get_amperes()
self.Readings['U(V)'] = array([volts])
self.Readings['I(A)'] = array([amperes])
#wats = self._get_wats()
#self.Readings = array([wats])
measurement_error = 0
return measurement_error
def _get_volts(self):
self.Bus.write_string(('VO?'))
sleep(self.sleeptime)
voltage = self.Bus.read_string()
voltage = voltage.strip('V')
return float(voltage)
def set_volts(self, voltage):
cmdString = ('V %f' % voltage)
self.Bus.write_string( cmdString)
return
def _get_amperes(self):
self.Bus.write_string(('IO?'))
sleep(self.sleeptime)
amperes = self.Bus.read_string()
amperes = amperes.strip('A')
return float(amperes)
def get_amperes_limit(self):
cmdString = ('I?')
self.Bus.write_string(cmdString)
sleep(self.sleeptime)
amperes = self.Bus.read_string()
print amperes
amperes = amperes.strip('I')
return float(amperes)
def set_amperes_limit(self, amperes):
cmdString = ('I %f' % amperes)
self.Bus.write_string(cmdString)
return
def _get_wats(self):
self.Bus.write_string(('POWER?'))
sleep(self.sleeptime)
wats = self.Bus.read_string()
wats = wats.strip('W')
return float(wats)
def set_output_on(self):
self.Bus.write_string(('OP 1' ))
return
def set_output_off(self):
self.Bus.write_string(('OP 0' ))
return
def Set(self, value):
out_of_bounds = 0
if value < 0:
out_of_bounds = 1
self.set_volts(0.0)
elif value > 4.0: # limit the PELTIER voltage to 4 volts
out_of_bounds = 2
self.set_volts(35)
else:
self.Bus.write_string(('V %f' % value))
#self._assert_OPC()
self.out_of_bounds = out_of_bounds
return
| 30.097561
| 71
| 0.548622
|
7952648bd92bca9903e524922674905dc24b5a47
| 2,601
|
py
|
Python
|
cumulusci/tasks/salesforce/BaseRetrieveMetadata.py
|
justindonnaruma/CumulusCI
|
cc097c1f6f102a104f83ad9a9684af9d6bc0af31
|
[
"BSD-3-Clause"
] | null | null | null |
cumulusci/tasks/salesforce/BaseRetrieveMetadata.py
|
justindonnaruma/CumulusCI
|
cc097c1f6f102a104f83ad9a9684af9d6bc0af31
|
[
"BSD-3-Clause"
] | 2
|
2021-03-25T23:56:47.000Z
|
2021-03-31T19:52:05.000Z
|
cumulusci/tasks/salesforce/BaseRetrieveMetadata.py
|
justindonnaruma/CumulusCI
|
cc097c1f6f102a104f83ad9a9684af9d6bc0af31
|
[
"BSD-3-Clause"
] | null | null | null |
from cumulusci.core.utils import process_bool_arg
from cumulusci.tasks.salesforce import BaseSalesforceMetadataApiTask
from cumulusci.utils import zip_inject_namespace
from cumulusci.utils import zip_strip_namespace
from cumulusci.utils import zip_tokenize_namespace
class BaseRetrieveMetadata(BaseSalesforceMetadataApiTask):
task_options = {
'path': {
'description': 'The path to write the retrieved metadata',
'required': True,
},
'unmanaged': {
'description': "If True, changes namespace_inject to replace tokens with a blank string",
},
'namespace_inject': {
'description': "If set, the namespace tokens in files and filenames are replaced with the namespace's prefix",
},
'namespace_strip': {
'description': "If set, all namespace prefixes for the namespace specified are stripped from files and filenames",
},
'namespace_tokenize': {
'description': "If set, all namespace prefixes for the namespace specified are replaced with tokens for use with namespace_inject",
},
'namespaced_org': {
'description': "If True, the tokens %%%NAMESPACED_ORG%%% and ___NAMESPACED_ORG___ will get replaced with the namespace. The default is false causing those tokens to get stripped and replaced with an empty string. Set this if deploying to a namespaced scratch org or packaging org.",
},
}
def _run_task(self):
api = self._get_api()
src_zip = api()
self._extract_zip(src_zip)
self.logger.info('Extracted retrieved metadata into {}'.format(self.options['path']))
def _process_namespace(self, src_zip):
if self.options.get('namespace_tokenize'):
src_zip = zip_tokenize_namespace(src_zip, self.options['namespace_tokenize'], logger=self.logger)
if self.options.get('namespace_inject'):
kwargs = {}
kwargs['unmanaged'] = process_bool_arg(self.options.get('unmanaged', True))
kwargs['namespaced_org'] = process_bool_arg(self.options.get('namespaced_org', False))
kwargs['logger'] = self.logger
src_zip = zip_inject_namespace(src_zip, self.options['namespace_inject'], **kwargs)
if self.options.get('namespace_strip'):
src_zip = zip_strip_namespace(src_zip, self.options['namespace_strip'], logger=self.logger)
return src_zip
def _extract_zip(self, src_zip):
src_zip = self._process_namespace(src_zip)
src_zip.extractall(self.options['path'])
| 49.075472
| 296
| 0.678201
|
795264c0eb2e7292c1dd16f454213f7cbda121ea
| 1,664
|
py
|
Python
|
origins/migrations/0081_auto_20220106_0124.py
|
dennereed/paleocore
|
d6da6c39cde96050ee4b9e7213ec1200530cbeee
|
[
"MIT"
] | null | null | null |
origins/migrations/0081_auto_20220106_0124.py
|
dennereed/paleocore
|
d6da6c39cde96050ee4b9e7213ec1200530cbeee
|
[
"MIT"
] | null | null | null |
origins/migrations/0081_auto_20220106_0124.py
|
dennereed/paleocore
|
d6da6c39cde96050ee4b9e7213ec1200530cbeee
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.5 on 2022-01-06 01:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('origins', '0080_auto_20211209_1423'),
]
operations = [
migrations.AddField(
model_name='nomen',
name='generic_name',
field=models.CharField(blank=True, help_text='The genus portion of the scientific name, e.g. Homo.', max_length=255, null=True),
),
migrations.AddField(
model_name='nomen',
name='specific_epithet',
field=models.CharField(blank=True, help_text='The trivial (species) portion of the scientific name, e.g. sapiens.', max_length=255, null=True),
),
migrations.AddField(
model_name='nomen',
name='taxon_rank_group',
field=models.CharField(blank=True, choices=[('species-group', 'species-group'), ('genus-group', 'genus-group'), ('family-group', 'family-group')], max_length=255, null=True),
),
migrations.AddField(
model_name='nomen',
name='type_genus',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='nomen',
name='type_species',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AlterField(
model_name='nomen',
name='authorship',
field=models.CharField(blank=True, help_text='The authorship of the naming publication, date included, e.g. King, 1864', max_length=255, null=True),
),
]
| 37.818182
| 186
| 0.602163
|
79526558c7b20033331dcc6b639869c681bc3b2e
| 16,733
|
py
|
Python
|
app/engine/skill_system.py
|
zerorock1312/lt-maker-master
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
[
"MIT"
] | null | null | null |
app/engine/skill_system.py
|
zerorock1312/lt-maker-master
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
[
"MIT"
] | null | null | null |
app/engine/skill_system.py
|
zerorock1312/lt-maker-master
|
82f733683f9dba763a5de8567c41fd7cbcfb0173
|
[
"MIT"
] | null | null | null |
class Defaults():
@staticmethod
def can_select(unit) -> bool:
return unit.team == 'player'
@staticmethod
def check_ally(unit1, unit2) -> bool:
if unit1 is unit2:
return True
elif unit1.team == 'player' or unit1.team == 'other':
return unit2.team == 'player' or unit2.team == 'other'
else:
return unit2.team == unit1.team
return False
@staticmethod
def check_enemy(unit1, unit2) -> bool:
if unit1.team == 'player' or unit1.team == 'other':
return not (unit2.team == 'player' or unit2.team == 'other')
else:
return not unit2.team == unit1.team
return True
@staticmethod
def can_trade(unit1, unit2) -> bool:
return unit2.position and unit1.team == unit2.team and check_ally(unit1, unit2)
@staticmethod
def exp_multiplier(unit1, unit2) -> float:
return 1.0
@staticmethod
def enemy_exp_multiplier(unit1, unit2) -> float:
return 1.0
@staticmethod
def steal_icon(unit1, unit2) -> bool:
return False
@staticmethod
def has_canto(unit1, unit2) -> bool:
return False
@staticmethod
def empower_heal(unit1, unit2) -> int:
return 0
@staticmethod
def limit_maximum_range(unit, item) -> int:
return 1000
@staticmethod
def modify_maximum_range(unit, item) -> int:
return 0
@staticmethod
def movement_type(unit):
return None
@staticmethod
def sight_range(unit):
return 0
@staticmethod
def empower_splash(unit):
return 0
@staticmethod
def modify_buy_price(unit, item) -> float:
return 1.0
@staticmethod
def modify_sell_price(unit, item) -> float:
return 1.0
@staticmethod
def damage_formula(unit) -> str:
return 'DAMAGE'
@staticmethod
def resist_formula(unit) -> str:
return 'DEFENSE'
@staticmethod
def accuracy_formula(unit) -> str:
return 'HIT'
@staticmethod
def avoid_formula(unit) -> str:
return 'AVOID'
@staticmethod
def crit_accuracy_formula(unit) -> str:
return 'CRIT_HIT'
@staticmethod
def crit_avoid_formula(unit) -> str:
return 'CRIT_AVOID'
@staticmethod
def attack_speed_formula(unit) -> str:
return 'ATTACK_SPEED'
@staticmethod
def defense_speed_formula(unit) -> str:
return 'DEFENSE_SPEED'
# Takes in unit, returns False if not present
# All default hooks are exclusive
formula = ('damage_formula', 'resist_formula', 'accuracy_formula', 'avoid_formula',
'crit_accuracy_formula', 'crit_avoid_formula', 'attack_speed_formula', 'defense_speed_formula')
default_behaviours = (
'pass_through', 'vantage', 'ignore_terrain', 'crit_anyway',
'ignore_region_status', 'no_double', 'def_double', 'alternate_splash',
'ignore_rescue_penalty', 'ignore_forced_movement', 'distant_counter')
# Takes in unit, returns default value
exclusive_behaviours = ('can_select', 'movement_type', 'sight_range', 'empower_splash')
exclusive_behaviours += formula
# Takes in unit and item, returns default value
item_behaviours = ('modify_buy_price', 'modify_sell_price', 'limit_maximum_range', 'modify_maximum_range')
# Takes in unit and target, returns default value
targeted_behaviours = ('check_ally', 'check_enemy', 'can_trade', 'exp_multiplier', 'enemy_exp_multiplier', 'steal_icon', 'has_canto', 'empower_heal')
# Takes in unit, item returns bonus
modify_hooks = (
'modify_damage', 'modify_resist', 'modify_accuracy', 'modify_avoid',
'modify_crit_accuracy', 'modify_crit_avoid', 'modify_attack_speed',
'modify_defense_speed')
# Takes in unit, item, target, mode, returns bonus
dynamic_hooks = ('dynamic_damage', 'dynamic_resist', 'dynamic_accuracy', 'dynamic_avoid',
'dynamic_crit_accuracy', 'dynamic_crit_avoid', 'dynamic_attack_speed', 'dynamic_defense_speed',
'dynamic_multiattacks')
# Takes in unit, item, target, mode returns bonus
multiply_hooks = ('damage_multiplier', 'resist_multiplier')
# Takes in unit
simple_event_hooks = ('on_death',)
# Takes in playback, unit, item, target
combat_event_hooks = ('start_combat', 'cleanup_combat', 'end_combat', 'pre_combat', 'post_combat', 'test_on', 'test_off')
# Takes in actions, playback, unit, item, target, mode
subcombat_event_hooks = ('after_hit', 'after_take_hit')
# Takes in unit, item
item_event_hooks = ('on_add_item', 'on_remove_item', 'on_equip_item', 'on_unequip_item')
def condition(skill, unit) -> bool:
for component in skill.components:
if component.defines('condition'):
if not component.condition(unit):
return False
return True
for behaviour in default_behaviours:
func = """def %s(unit):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
return component.%s(unit)
return False""" \
% (behaviour, behaviour, behaviour)
exec(func)
for behaviour in exclusive_behaviours:
func = """def %s(unit):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
return component.%s(unit)
return Defaults.%s(unit)""" \
% (behaviour, behaviour, behaviour, behaviour)
exec(func)
for behaviour in targeted_behaviours:
func = """def %s(unit1, unit2):
for skill in unit1.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit1):
return component.%s(unit1, unit2)
return Defaults.%s(unit1, unit2)""" \
% (behaviour, behaviour, behaviour, behaviour)
exec(func)
for behaviour in item_behaviours:
func = """def %s(unit, item):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
return component.%s(unit, item)
return Defaults.%s(unit, item)""" \
% (behaviour, behaviour, behaviour, behaviour)
exec(func)
for hook in modify_hooks:
func = """def %s(unit, item):
val = 0
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
val += component.%s(unit, item)
return val""" \
% (hook, hook, hook)
exec(func)
for hook in dynamic_hooks:
func = """def %s(unit, item, target, mode):
val = 0
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
val += component.%s(unit, item, target, mode)
return val""" \
% (hook, hook, hook)
exec(func)
for hook in multiply_hooks:
func = """def %s(unit, item, target, mode):
val = 1
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
val *= component.%s(unit, item, target, mode)
return val""" \
% (hook, hook, hook)
exec(func)
for hook in simple_event_hooks:
func = """def %s(unit):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
component.%s(unit)""" \
% (hook, hook, hook)
exec(func)
for hook in combat_event_hooks:
func = """def %s(playback, unit, item, target, mode):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
if component.ignore_conditional or condition(skill, unit):
component.%s(playback, unit, item, target, mode)""" \
% (hook, hook, hook)
exec(func)
for hook in subcombat_event_hooks:
func = """def %s(actions, playback, unit, item, target, mode):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
component.%s(actions, playback, unit, item, target, mode)""" \
% (hook, hook, hook)
exec(func)
for hook in item_event_hooks:
func = """def %s(unit, item):
for skill in unit.skills:
for component in skill.components:
if component.defines('%s'):
component.%s(unit, item)""" \
% (hook, hook, hook)
exec(func)
def available(unit, item) -> bool:
"""
If any hook reports false, then it is false
"""
for skill in unit.skills:
for component in skill.components:
if component.defines('available'):
if component.ignore_conditional or condition(skill, unit):
if not component.available(unit, item):
return False
return True
def stat_change(unit, stat) -> int:
bonus = 0
for skill in unit.skills:
for component in skill.components:
if component.defines('stat_change'):
if component.ignore_conditional or condition(skill, unit):
d = component.stat_change(unit)
bonus += d.get(stat, 0)
return bonus
def growth_change(unit, stat) -> int:
bonus = 0
for skill in unit.skills:
for component in skill.components:
if component.defines('growth_change'):
if component.ignore_conditional or condition(skill, unit):
d = component.growth_change(unit)
bonus += d.get(stat, 0)
return bonus
def mana(playback, unit, item, target) -> int:
mana = 0
for skill in unit.skills:
for component in skill.components:
if component.defines('mana'):
if component.ignore_conditional or condition(skill, unit):
d = component.mana(playback, unit, item, target)
mana += d
return mana
def can_unlock(unit, region) -> bool:
for skill in unit.skills:
for component in skill.components:
if component.defines('can_unlock'):
if component.ignore_conditional or condition(skill, unit):
if component.can_unlock(unit, region):
return True
return False
def on_upkeep(actions, playback, unit) -> tuple: # actions, playback
for skill in unit.skills:
for component in skill.components:
if component.defines('on_upkeep'):
if component.ignore_conditional or condition(skill, unit):
component.on_upkeep(actions, playback, unit)
return actions, playback
def on_endstep(actions, playback, unit) -> tuple: # actions, playback
for skill in unit.skills:
for component in skill.components:
if component.defines('on_endstep'):
if component.ignore_conditional or condition(skill, unit):
component.on_endstep(actions, playback, unit)
return actions, playback
def on_end_chapter(unit, skill):
for component in skill.components:
if component.defines('on_end_chapter'):
if component.ignore_conditional or condition(skill, unit):
component.on_end_chapter(unit, skill)
def init(skill):
"""
Initializes any data on the parent skill if necessary
"""
for component in skill.components:
if component.defines('init'):
component.init(skill)
def on_add(unit, skill):
for component in skill.components:
if component.defines('on_add'):
component.on_add(unit, skill)
for other_skill in unit.skills:
for component in other_skill.components:
if component.defines('on_gain_skill'):
component.on_gain_skill(unit, skill)
def on_remove(unit, skill):
for component in skill.components:
if component.defines('on_remove'):
component.on_remove(unit, skill)
def re_add(unit, skill):
for component in skill.components:
if component.defines('re_add'):
component.re_add(unit, skill)
def get_text(skill) -> str:
for component in skill.components:
if component.defines('text'):
return component.text()
return None
def get_cooldown(skill) -> float:
for component in skill.components:
if component.defines('cooldown'):
return component.cooldown()
return None
def trigger_charge(unit, skill):
for component in skill.components:
if component.defines('trigger_charge'):
component.trigger_charge(unit, skill)
return None
def get_extra_abilities(unit):
abilities = {}
for skill in unit.skills:
for component in skill.components:
if component.defines('extra_ability'):
if component.ignore_conditional or condition(skill, unit):
new_item = component.extra_ability(unit)
ability_name = new_item.name
abilities[ability_name] = new_item
return abilities
def get_combat_arts(unit):
from app.engine import item_funcs, target_system
combat_arts = {}
for skill in unit.skills:
if not condition(skill, unit):
continue
combat_art = None
combat_art_weapons = [item for item in item_funcs.get_all_items(unit) if item_funcs.available(unit, item)]
combat_art_set_max_range = None
combat_art_modify_max_range = None
for component in skill.components:
if component.defines('combat_art'):
combat_art = component.combat_art(unit)
if component.defines('combat_art_weapon_filter'):
combat_art_weapons = component.combat_art_weapon_filter(unit)
if component.defines('combat_art_set_max_range'):
combat_art_set_max_range = component.combat_art_set_max_range(unit)
if component.defines('combat_art_modify_max_range'):
combat_art_modify_max_range = component.combat_art_modify_max_range(unit)
if combat_art and combat_art_weapons:
good_weapons = []
# Check which of the good weapons meet the range requirements
for weapon in combat_art_weapons:
# Just for testing range
if combat_art_set_max_range:
weapon._force_max_range = max(0, combat_art_set_max_range)
elif combat_art_modify_max_range:
max_range = max(item_funcs.get_range(unit, weapon))
weapon._force_max_range = max(0, max_range + combat_art_modify_max_range)
targets = target_system.get_valid_targets(unit, weapon)
weapon._force_max_range = None
if targets:
good_weapons.append(weapon)
if good_weapons:
combat_arts[skill.name] = (skill, good_weapons)
return combat_arts
def activate_combat_art(unit, skill):
for component in skill.components:
if component.defines('on_activation'):
component.on_activation(unit)
def deactivate_all_combat_arts(unit):
for skill in unit.skills:
for component in skill.components:
if component.defines('on_deactivation'):
component.on_deactivation(unit)
| 37.350446
| 149
| 0.595769
|
795265591cb23f5862ab396366f61bce30369b4a
| 710
|
py
|
Python
|
NTA_detect_domain_flux.py
|
SYANiDE-/ViolentPython
|
1e56ef7f006272b170bba0a642860a892c016baa
|
[
"BSD-2-Clause"
] | 2
|
2019-02-21T08:38:32.000Z
|
2019-02-22T03:15:02.000Z
|
NTA_detect_domain_flux.py
|
SYANiDE-/ViolentPython
|
1e56ef7f006272b170bba0a642860a892c016baa
|
[
"BSD-2-Clause"
] | null | null | null |
NTA_detect_domain_flux.py
|
SYANiDE-/ViolentPython
|
1e56ef7f006272b170bba0a642860a892c016baa
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python2
from scapy.all import *
import sys
def QRTest(pk):
if pk.haslayer(DNSRR) and pk.getlayer(UDP).sport == 53:
rcode = pk.getlayer(DNS).rcode
qname = pk.getlayer(DNSQR).qname
if rcode == 3:
print("[!] Name lookup failed: %s" % qname)
return True
else:
return False
def main():
if len(sys.argv) <> 2:
print("USAGE: %S %S" % (sys.argv[0], "[pcap file]"))
sys.exit()
else:
UR = 0
pk = rdpcap(sys.argv[1])
for p in pk:
if QRTest(p):
UR = UR + 1
print("[!] %d total unanswered DNS requests" % UR)
if __name__=="__main__":
main()
| 21.515152
| 60
| 0.504225
|
795267097d2f8a808fe305716e45f2dd983268d2
| 6,106
|
py
|
Python
|
canopy/openapi/models/new_study_data_source.py
|
CanopySimulations/canopy-python
|
9ec37e674e65d6fbef0402ac0c612c163d55631e
|
[
"MIT"
] | null | null | null |
canopy/openapi/models/new_study_data_source.py
|
CanopySimulations/canopy-python
|
9ec37e674e65d6fbef0402ac0c612c163d55631e
|
[
"MIT"
] | 1
|
2022-01-31T10:18:08.000Z
|
2022-01-31T10:18:08.000Z
|
canopy/openapi/models/new_study_data_source.py
|
CanopySimulations/canopy-python
|
9ec37e674e65d6fbef0402ac0c612c163d55631e
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Canopy.Api
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from canopy.openapi.configuration import Configuration
class NewStudyDataSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'config_type': 'str',
'user_id': 'str',
'config_id': 'str',
'name': 'str',
'is_edited': 'bool'
}
attribute_map = {
'config_type': 'configType',
'user_id': 'userId',
'config_id': 'configId',
'name': 'name',
'is_edited': 'isEdited'
}
def __init__(self, config_type=None, user_id=None, config_id=None, name=None, is_edited=None, local_vars_configuration=None): # noqa: E501
"""NewStudyDataSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._config_type = None
self._user_id = None
self._config_id = None
self._name = None
self._is_edited = None
self.discriminator = None
if config_type is not None:
self.config_type = config_type
if user_id is not None:
self.user_id = user_id
if config_id is not None:
self.config_id = config_id
if name is not None:
self.name = name
if is_edited is not None:
self.is_edited = is_edited
@property
def config_type(self):
"""Gets the config_type of this NewStudyDataSource. # noqa: E501
:return: The config_type of this NewStudyDataSource. # noqa: E501
:rtype: str
"""
return self._config_type
@config_type.setter
def config_type(self, config_type):
"""Sets the config_type of this NewStudyDataSource.
:param config_type: The config_type of this NewStudyDataSource. # noqa: E501
:type: str
"""
self._config_type = config_type
@property
def user_id(self):
"""Gets the user_id of this NewStudyDataSource. # noqa: E501
:return: The user_id of this NewStudyDataSource. # noqa: E501
:rtype: str
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this NewStudyDataSource.
:param user_id: The user_id of this NewStudyDataSource. # noqa: E501
:type: str
"""
self._user_id = user_id
@property
def config_id(self):
"""Gets the config_id of this NewStudyDataSource. # noqa: E501
:return: The config_id of this NewStudyDataSource. # noqa: E501
:rtype: str
"""
return self._config_id
@config_id.setter
def config_id(self, config_id):
"""Sets the config_id of this NewStudyDataSource.
:param config_id: The config_id of this NewStudyDataSource. # noqa: E501
:type: str
"""
self._config_id = config_id
@property
def name(self):
"""Gets the name of this NewStudyDataSource. # noqa: E501
:return: The name of this NewStudyDataSource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this NewStudyDataSource.
:param name: The name of this NewStudyDataSource. # noqa: E501
:type: str
"""
self._name = name
@property
def is_edited(self):
"""Gets the is_edited of this NewStudyDataSource. # noqa: E501
:return: The is_edited of this NewStudyDataSource. # noqa: E501
:rtype: bool
"""
return self._is_edited
@is_edited.setter
def is_edited(self, is_edited):
"""Sets the is_edited of this NewStudyDataSource.
:param is_edited: The is_edited of this NewStudyDataSource. # noqa: E501
:type: bool
"""
self._is_edited = is_edited
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewStudyDataSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NewStudyDataSource):
return True
return self.to_dict() != other.to_dict()
| 27.137778
| 143
| 0.585653
|
7952671517cfbf1dc7e9bc8daf3e7bd62e1d2b1d
| 4,919
|
py
|
Python
|
read_data_cifar100.py
|
ImLaoBJie/yolo3_sort_deepsort
|
ae34fab9a2a7ae122a38bc44d145f049e2808b52
|
[
"MIT"
] | 13
|
2020-07-16T16:59:49.000Z
|
2022-02-06T14:12:32.000Z
|
read_data_cifar100.py
|
ronyclear/yolo3sort
|
6a6028d42f240aa0b79656f36f85f96b9a90ab00
|
[
"MIT"
] | 2
|
2020-10-13T02:03:55.000Z
|
2021-04-24T14:59:04.000Z
|
read_data_cifar100.py
|
ronyclear/yolo3sort
|
6a6028d42f240aa0b79656f36f85f96b9a90ab00
|
[
"MIT"
] | 4
|
2019-12-28T05:59:32.000Z
|
2020-04-12T14:13:35.000Z
|
import pickle
import numpy as np
import os
import matplotlib.pyplot as plt
PATH = 'cifar-100-python/'
REMOVE = list(range(0, 100))
REMAIN = list(np.concatenate([[11, 35, 46, 98], [8, 13, 48, 58], [81, 85]]))
for i in REMAIN:
REMOVE.remove(i)
def filter(image, label):
# filter
remove_index = []
for index, element in enumerate(label):
if int(element) in REMOVE:
remove_index.append(index)
label = np.delete(label, remove_index)
image = np.delete(image, remove_index, 0)
if not REMAIN == []:
value = 0
for index in REMAIN:
label[label == np.int32(index)] = np.int32(value)
value = value + 1
return image, label
def load_CIFAR_batch(filename, N, data_filter: bool):
# 单个batch
# load single batch of cifar
with open(filename, 'rb') as f:
datadict = pickle.load(f, encoding='latin1') # dict类型
image = datadict['data'] # X, ndarray, 像素值
label = datadict['fine_labels'] # Y, list, 标签, 分类
# check the id of fine_labels relevant to the coarse_labels
# label = np.array(label)
# coarse = np.array(datadict['coarse_labels'])
# print(np.unique(label[np.array(np.where(coarse == 19))[0]]))
# reshape, 一维数组转为矩阵10000行3列。每个entries是32x32
# transpose,转置
# astype,复制,同时指定类型
image = image.reshape(N, 3, 32, 32).transpose(0, 2, 3, 1).astype('float')
label = np.array(label)
if data_filter:
image, label = filter(image, label)
return image, label
def load_CIFAR100(path, data_filter: bool):
# 所有batch
# load all of cifar
images = [] # list
labels = []
# 训练集
f = os.path.join(path, 'train')
image, label = load_CIFAR_batch(f, 50000, data_filter)
images.append(image)
labels.append(label)
images = np.concatenate(images) # [ndarray, ndarray] 合并为一个ndarray
labels = np.concatenate(labels)
# 测试集
img_val, lab_val = load_CIFAR_batch(os.path.join(path, 'test'), 10000, data_filter)
return images, labels, img_val, lab_val
# 警告:使用该函数可能会导致内存溢出,可以适当修改减少扩充量
# WARNING:Using this function may cause out of memory and OS breakdown
def creat_more_data(images):
# 通过旋转、翻转扩充数据 expand dataset through rotation and mirroring
images_rot90 = []
images_rot180 = []
images_rot270 = []
img_lr = []
img_ud = []
for index in range(0, images.shape[0]):
band_1 = images[index, :, :, 0]
band_2 = images[index, :, :, 1]
band_3 = images[index, :, :, 2]
# 旋转90, rotating 90 degrees
band_1_rot90 = np.rot90(band_1)
band_2_rot90 = np.rot90(band_2)
band_3_rot90 = np.rot90(band_3)
images_rot90.append(np.dstack((band_1_rot90, band_2_rot90, band_3_rot90)))
# 180
band_1_rot180 = np.rot90(band_1_rot90)
band_2_rot180 = np.rot90(band_2_rot90)
band_3_rot180 = np.rot90(band_3_rot90)
images_rot180.append(np.dstack((band_1_rot180, band_2_rot180, band_3_rot180)))
# 270
band_1_rot270 = np.rot90(band_1_rot180)
band_2_rot270 = np.rot90(band_2_rot180)
band_3_rot270 = np.rot90(band_3_rot180)
images_rot270.append(np.dstack((band_1_rot270, band_2_rot270, band_3_rot270)))
# 左右翻转 flip horizontally
lr1 = np.flip(band_1, 0)
lr2 = np.flip(band_2, 0)
lr3 = np.flip(band_3, 0)
img_lr.append(np.dstack((lr1, lr2, lr3)))
# 上下反转 flip vertical
ud1 = np.flip(band_1, 1)
ud2 = np.flip(band_2, 1)
ud3 = np.flip(band_3, 1)
img_ud.append(np.dstack((ud1, ud2, ud3)))
rot90 = np.array(images_rot90)
rot180 = np.array(images_rot180)
rot270 = np.array(images_rot270)
lr = np.array(img_lr)
ud = np.array(img_ud)
images = np.concatenate((rot90, rot180, rot270, lr, ud))
return images
def shuffle(images, labels):
permutation = np.random.permutation(images.shape[0])
shuffled_dataset = images[permutation, :, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
def data(path, more_data: bool, shuffle_data: bool, data_filter: bool):
images, labels, img_val, lab_val = load_CIFAR100(path, data_filter)
if more_data:
# 扩充数据 expand dataset
images = creat_more_data(np.array(images))
# 扩充标签 expend labels
labels = np.concatenate((labels, labels, labels, labels, labels, labels))
if shuffle_data:
images, labels = shuffle(images, labels)
img_val, lab_val = shuffle(img_val, lab_val)
return images, labels, img_val, lab_val
def main():
images, labels, img_val, lab_val = data(PATH, False, True, True)
# test
print(len(images))
print(len(labels))
plt.imshow(images[0] / 255)
print(images[0])
print(labels[0])
plt.show()
if __name__ == '__main__':
main()
| 29.106509
| 87
| 0.630616
|
7952675efdb3678e523cae7768d4d265a384a903
| 654
|
py
|
Python
|
taco_aug_scripts/get_zw_metal_avg_wxh.py
|
dbash/zerowaste
|
4047ae92a31cece9d848d38a57fd33cc85e7a8bb
|
[
"MIT"
] | 11
|
2021-06-09T04:16:39.000Z
|
2022-03-02T07:33:25.000Z
|
taco_aug_scripts/get_zw_metal_avg_wxh.py
|
dbash/zerowaste
|
4047ae92a31cece9d848d38a57fd33cc85e7a8bb
|
[
"MIT"
] | 2
|
2022-01-11T10:15:15.000Z
|
2022-03-24T23:16:13.000Z
|
create_zerowasteaug/get_zw_metal_avg_wxh.py
|
dbash/zerowaste
|
4047ae92a31cece9d848d38a57fd33cc85e7a8bb
|
[
"MIT"
] | 3
|
2021-11-02T08:26:10.000Z
|
2022-02-24T06:09:58.000Z
|
import json
import os
import shutil
import ntpath
#
f = open('/research/axns2/mabdelfa/TACO/data/coco_format/labels.json',)
data = json.load(f)
total_w = 0
total_h = 0
n = 0
for ann in data['annotations']:
if ann['category_id'] == 3:
total_w += ann['bbox'][2]
total_h += ann['bbox'][3]
n += 1
print(total_w/n, total_h/n)
f = open('/research/axns2/mabdelfa/zerowaste/zerowaste-f/train/labels.json',)
data = json.load(f)
total_w = 0
total_h = 0
n = 0
for ann in data['annotations']:
if ann['category_id'] == 3:
total_w += ann['bbox'][2]
total_h += ann['bbox'][3]
n += 1
print(total_w/n, total_h/n)
| 21.8
| 77
| 0.619266
|
795267c3f831a21d6f02048c881ca79c8ce5cbe4
| 2,090
|
py
|
Python
|
tests/providers/google/cloud/operators/test_video_intelligence_system.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 3
|
2015-08-25T13:56:44.000Z
|
2020-03-21T10:26:58.000Z
|
tests/providers/google/cloud/operators/test_video_intelligence_system.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 37
|
2020-07-21T07:50:02.000Z
|
2022-03-29T22:31:28.000Z
|
tests/providers/google/cloud/operators/test_video_intelligence_system.py
|
IGIT-CN/airflow
|
a6e5bcd59198afe5716813e84ebc4c59eade532c
|
[
"Apache-2.0"
] | 4
|
2020-07-17T14:02:28.000Z
|
2022-02-23T04:29:58.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
from airflow.providers.google.cloud.example_dags.example_video_intelligence import GCP_BUCKET_NAME
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_AI_KEY, GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, GoogleSystemTest, provide_gcp_context
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_VIDEO_SOURCE_URL = "https://www.sample-videos.com/video123/mp4/720/big_buck_bunny_720p_1mb.mp4"
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.credential_file(GCP_AI_KEY)
class CloudVideoIntelligenceExampleDagsTest(GoogleSystemTest):
@provide_gcp_context(GCP_AI_KEY)
def setUp(self):
self.create_gcs_bucket(GCP_BUCKET_NAME, location="europe-north1")
self.execute_with_ctx(
cmd=[
"bash",
"-c",
f"curl {GCP_VIDEO_SOURCE_URL} | gsutil cp - gs://{GCP_BUCKET_NAME}/video.mp4"
], key=GCP_GCS_KEY
)
super().setUp()
@provide_gcp_context(GCP_AI_KEY)
def tearDown(self):
self.delete_gcs_bucket(GCP_BUCKET_NAME)
super().tearDown()
@provide_gcp_context(GCP_AI_KEY)
def test_example_dag(self):
self.run_dag('example_gcp_video_intelligence', CLOUD_DAG_FOLDER)
| 38.703704
| 103
| 0.745455
|
795268ec9e57b5fa316a76d81f92ba12d0f3ef4f
| 30,778
|
py
|
Python
|
bin/jbam_tools_v02.py
|
yasinkaymaz/ViralGenomeAssembly
|
03e75ee7358946823660a299e0718213dbbd161c
|
[
"MIT"
] | null | null | null |
bin/jbam_tools_v02.py
|
yasinkaymaz/ViralGenomeAssembly
|
03e75ee7358946823660a299e0718213dbbd161c
|
[
"MIT"
] | null | null | null |
bin/jbam_tools_v02.py
|
yasinkaymaz/ViralGenomeAssembly
|
03e75ee7358946823660a299e0718213dbbd161c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
###standard modules
import argparse #parse initial arguements from command line
import re
import os
import os.path
import pickle
import sys
import subprocess
import textwrap
import glob
import csv
from collections import defaultdict, Counter
#from collections import namedtuple
import collections
#key science modules
import pysam #for reading sam files as well as other files
import numpy as np
import pybedtools as pybt
#import numpy.string_
#import csv
## key bio python modules
#print pybt.check_for_bedtools()
import Bio
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
from Bio import AlignIO
from Bio import Phylo #not used yet but good for reading dendograms from clustalw
###other biologic modules
import pybedtools as pybt
#pybedtools.load_path_config( 'bedtoolspath.txt')#{
###global variable to allow for documentation in location for subcommands
###this is populated as code is traversed initially until main is launched at end
##This work will all be in as start zero and end 1 coordinates UCSC unless noted
## e.g. in ACTG , CT would be designated as 1,3 (this is the same as python)
##built in defaults to incorporated key programs specific to install##
## these never change on an install unlike the config file that can require
## modificaitons between builds, organisms, etc.
#note to programmers: pysam access is all base [0,1) for tabix and fasta files
# Fastafile.fetch(self, reference=None, start=None, end=None, region=None)
# (reference = None, start = None, end = None, region = None)
###############################################################################
# INSTALL SPECIFIC ENVIRONMENTAL PARAMETERS ##################################
###############################################################################
#binpath='/data/bailey/CNV_MIP_R01/pipeline_design/bin'
#path_clustalw='/usr/bin/clustalw'
#pybt.set_tempdir('/scratch')
#pybt.load_path_config( { 'bedtools' , '/share/pkg/bedtools/2.22.0/bin/', 'tabix' , '/share/pkg/tabix/0.2.6/', 'r', '/share/pkg/R/2.15.0/bin/R', 'tabix', '/share/pkg/tabix/0.2.6/'})
###############################################################################
# INITIALIZE GLOBAL VARIABLES #################################################
###############################################################################
subcommands={} #collects data at the point
#https://www.docker.io/learn_more/
###############################################################################
# LIST OF NEEDED DATA SETS ####################################################
###############################################################################
# Common SNPs(137) - SNPs with >= 1% minor allele frequency (MAF), mapping only once to reference assembly.
# Flagged SNPs(137) - SNPs < 1% minor allele frequency (MAF) (or unknown), mapping only once to reference assembly, flagged in dbSnp as "clinically associated" -- not necessarily a risk allele!
# Mult. SNPs(137) - SNPs mapping in more than one place on reference assembly.
#All SNPs(137) - all SNPs from dbSNP mapping to reference assembly.
#ALL SNPS (138)
#gunzip -c snp138.txt.gz | cut -f 2- | bgzip > snp138.bed0.gz
#tabix -s 1 -b 2 -e 3 -0 snp138.bed0.gz
###Get all text UCSC file ###
#gunzip -c simpleRepeat.txt.gz | cut -f 2- | bgzip > simpleRepeat.bed0.gz
#tabix -s 1 -b 2 -e 3 -0 simpleRepeat.bed0.gz
####All SNPs is the most useful since it contains everything else and can be parsed to our specifications
###############################################################################
# MAIN ###############################################################
###############################################################################
aparser=argparse.ArgumentParser()
def main( args ):
"""Main allows selection of the main subcommand (aka function).
Each subcommand launches a separate function. The pydoc subcommand
launches pydoc on this overall program file.
:param args: the main command line arguments passed minus subcommand
"""
#print globals().keys()
if len(args) == 0 or args[0] in ["h", "help", "-h", "--h", "--help","-help"] :
verbosity= 'shortDesc'
if args[0] in ["help" , "--help", "-help"]:
verbosity = 'longDesc'
program_name=os.path.basename(__file__)
print "USAGE:",program_name, "[-h] subcommand [suboptions]"
print "DESCRIPTION: A collection of tools to create and manipulate MIPs"
print "SUBCOMMANDS:"
#tw=TextWrap()
for k in subcommands.keys():
text=subcommands[k][verbosity]
text= textwrap.dedent(text)
if text:
text = "%s: %s " %(k, text )
print textwrap.fill(text,77, initial_indent='', subsequent_indent=' ')
print "HELP:"
print "pydoc detailed documentation of program structure"
print "-h/-help short / long subcommand descriptions"
print "For specific options:",program_name,"[subcommand] --help"
elif args[0] == 'pydoc':
os.system( "pydoc " + os.path.abspath(__file__) )
elif args[0] in subcommands.keys():
#execute sub command function
globals()[args[0]](args[1:])
else:
print "unknown subcommand (" + args[0] + ") use -h for list of subcommands!"
sys.exit(-1)
sys.exit(0) #normal exit
#------------------------------------------------------------------------------
###############################################################################
#### SUBCOMMAND DESIGN_REGION ################################################
###############################################################################
#------------------------------------------------------------------------------
###############################################################################
#### SUBCOMMAND: MERGE BAM ALIGNED CONTIGS AGAINST REFERENCE #################
###############################################################################
shortDescText="merge ref aligned contigs that overlap when placed on reference"
longDescText="""merge contigs that have been aligned to a reference--
finds overlaps and deals with poorly trimmed ends (containing extraneous seq)
as well as with poor depth and resolves sequence differences due to these
artifcats
"""
subcommands['ref_merge_contigs'] = { 'shortDesc':shortDescText, 'longDesc': longDescText }
def ref_merge_contigs(args):
"""This subcommand scans a bam file against a region or genome and merges sequences (contigs) resolving discrepancies based on multiple different parameters including distance from end of contig, number of differences from reference, depth of reads, etc.
"""
aparser.add_argument("-c", "--contig_bam", required=True, help='indexed contig bam file against reference')
aparser.add_argument("-n", "--nameChr", required=True, help='indexed contig bam file against reference (added by Cliff and Yasin)')
aparser.add_argument("-r", "--read_bam", required=False, help='indexed read bam file against reference')
aparser.add_argument("-f", "--fasta_reference", required=True, help='indexed read bam file against reference')
aparser.add_argument("-u", "--unmapped_trim_length" ,required=True, type=int, help='number of bases to remove when end not aligned')
aparser.add_argument("-t", "--mapped_trim_max_length", required=True, type=int, help='number of base pairs from end of contig to potentially trim')
aparser.add_argument("-g", "--good_end_max_mismatch", required=True, type=int, help='number of mismatches to tolerate before calling bad end')
aparser.add_argument("--min_unique_bp", required=True,type=int ,help = ' number of unique bases')
aparser.add_argument( "--regions_to_mask", required=False, help='repetitive or other regions to exclude')
aparser.add_argument("--wipe", required=False, action="store_true",help='wipe any intermediate tmp files (fully_reruns process)')
aparser.add_argument("--verbose", required=False, action="store_true",help='provide additional output for debugging')
aparser.add_argument("--check",required=False, action="store_true",help='initial check of genome databases for integrity')
aparser.add_argument("-o","--outfilebase", help='output base path and filename for generated data')
##fix need to change the program name##
fullname= aparser.prog + " ref_merge_contigs"
aparser.prog=fullname
args=aparser.parse_args(args=args)
contigbam=pysam.Samfile(args.contig_bam, 'rb')
###load reference sequence ###
from Bio import SeqIO
refseqs = list( SeqIO.parse(args.fasta_reference, "fasta"))
print refseqs[0].id
print refseqs[0]
newref=[None] * (len(refseqs[0].seq)+1)
###load mask regions ###
tmp=FlexBed.readfile(args.regions_to_mask, args.nameChr)
maskbed=FlexBed(tmp)
if args.read_bam:
readbam=pysam.Samfile(args.read_bam, 'rb')
print "Mapped # of reads:", readbam.mapped
print "Reference of reads mapped to is ", readbam.references
print "Reference length :", readbam.lengths
# Read_pile = readbam.pileup(readbam.references[0])
NucleotidesfromReads = {}
for readcol in readbam.pileup(readbam.references[0]):
readcolnucleotides = []
deletedReads=[]
# print ("\ncoverage at base %s = %s" %(readcol.pos, readcol.n))
for pileupread in readcol.pileups:
# if not pileupread.is_del and not pileupread.is_refskip:
# query position is None if is_del or is_refskip is set.
if pileupread.indel > 0:
# print pileupread.indel, readcol.reference_pos, pileupread.alignment.query_sequence[pileupread.query_position:pileupread.query_position+pileupread.indel+1]
readcolnucleotides.append(pileupread.alignment.query_sequence[pileupread.query_position:pileupread.query_position+pileupread.indel+1])
elif pileupread.is_del:
# print readcol.reference_pos
deletedReads.append(readcol.reference_pos)
readcolnucleotides.append(pileupread.alignment.query_sequence[pileupread.query_position])
else:
readcolnucleotides.append(pileupread.alignment.query_sequence[pileupread.query_position])
readnucleotidecomposition=collections.Counter(readcolnucleotides)
# print readnucleotidecomposition,
ConsensusReadBase=''
if len(readnucleotidecomposition.most_common(2)) > 1:
if readnucleotidecomposition.most_common(2)[0][1] == readnucleotidecomposition.most_common(2)[1][1]:
# print readcol.pos, readnucleotidecomposition.most_common(1)[0], readnucleotidecomposition.most_common(2)[1], refseqs[0].seq[readcol.reference_pos]
ConsensusReadBase=refseqs[0].seq[readcol.reference_pos]
else:
ConsensusReadBase=readnucleotidecomposition.most_common(1)[0][0]
else:
ConsensusReadBase=readnucleotidecomposition.most_common(1)[0][0]
# print readcol.pos, readnucleotidecomposition.most_common(1)[0]
if len(deletedReads) > readcol.n/2:
# print len(deletedReads), readcol.reference_pos
ConsensusReadBase=''
else:
pass
# print readnucleotidecomposition
# print(readnucleotidecomposition.most_common(1))
# print readcol.reference_pos, readnucleotidecomposition.most_common(1)[0][0]
NucleotidesfromReads[readcol.reference_pos]=ConsensusReadBase
print NucleotidesfromReads
# print ('\tbase in read %s = %s' %(pileupread.alignment.query_name,pileupread.alignment.query_sequence[pileupread.query_position]))
# for readread in readcol.pileups:
# print readread.query_position, readread.alignment.seq[readread.query_position]
#HERE STORE READ BASE frequencies in a dict. keep Major variant.
### output basic statistics for the bam file
# print "NO COORDINATE #:", contigbam.nocoordinate
print "Contigs MAPPED #:", contigbam.mapped
# print "REFERENCE #: contigbam.nreferences
print "REFERENCES:", contigbam.references
print "REF LENGTHS:", contigbam.lengths
### NEW REFERENCE GENOME ###
# refbases= [None]* (len refseqs[0].Seq +10000)
### GENERATE LIST OF CONTIGS ###
print "READING CONTIGS..."
contigs=[]
contigdict={}
for s in contigbam:
a=ContigSegment()
# print "a:",a
a.data=s
a.name = a.data.query_name
a.length = a.data.query_length
contigdict[a.name]=a
contigs.append(a)
if a.data.is_unmapped:
###add code here to dump unmapped contigs
continue
else:
a.start = a.data.reference_start
a.end = a.data.reference_end
### SORT CONTIGS DESCENDING ORDER ###
print "SORTING CONTIGS ON THE BASIS OF LENGTH LARGEST TO SMALLEST..."
contigs = sorted(contigs, key=lambda cntg:cntg.data.query_length, reverse=True)
print "Sorting done."
#############################################################################
### RUN THROUGH THE ENTIRE PILEUP ###########################################
pile = contigbam.pileup(contigbam.references[0])
for col in pile:
ref_base=''
contig_bases=[]
for read in col.pileups:
readbase = read.alignment.seq[read.query_position]
ref_base = refseqs[0].seq[col.reference_pos]
# print read.query_position, readbase, ref_base
# print read.query_position
### the entire inserted sequence ###
readseq = read.alignment.query_sequence[read.query_position: read.query_position+read.indel+1]
qname= read.alignment.query_name
### DETERMINE BASES FROM EITHER END (NEGATIVE -- RIGHT)(POSITIVE --LEFT)
endpos=calculate_endposition(read.query_position, read.alignment.query_length)
if contigdict[qname].leftmost_endpos==None:
contigdict[qname].leftmost_endpos=endpos
contigdict[qname].rightmost_endpos=endpos
#print col.pos, ref_base, readbase, readseq, read.indel
if abs (endpos) <= int(args.mapped_trim_max_length):
d= [endpos,read.indel,readbase, ref_base, read.query_position ,readseq ]
#print qname, endpos, read.query_position, read.alignment.query_length, " ",
if endpos <0:
contigdict[qname].right_bases.append(d)
contigdict[qname].right_aligned+=1
if readbase!=ref_base:
contigdict[qname].right_mismatches+=1
else:
contigdict[qname].left_bases.append(d)
contigdict[qname].left_aligned+=1
if readbase!=ref_base:
contigdict[qname].left_mismatches+=1
# print qname, contigdict[qname].left_mismatches,":" , contigdict[qname].left_aligned, " ",contigdict[qname].right_mismatches, ":", contigdict[qname].right_aligned
#############################################################################
print "CONTIG PROCESSING FOR DUP/REPEAT CONTENT AND FOR END ALIGNMENT QUALITY..."
for c in contigs:
### find repetitive content
#print c.leftmost_endpos, c.rightmost_endpos, c.length
if c.data.is_unmapped==False:
qname = c.name
repeatoverlap=maskbed.calc_bp_overlap2interval(contigbam.references[0],c.start, c.end)
c.uniquebp = c.end-c.start - repeatoverlap
#print c.start, c.end, "LEN", c.length, "UNIQUE",c.uniquebp
#check right alignment
for x in c.right_bases:
if x[1]==0 and x[2]==x[3]:
c.right_last_good=x[0]
else:
break
#check left alignment
c.left_bases=sorted(c.left_bases , key= lambda x:x[0],reverse=True)
for x in c.left_bases:
if x[1]==0 and x[2]==x[3]:
c.left_last_good=x[0]
else:
break
#print c.left_bases
#print c.left_last_good
#############################################################################
pile = contigbam.pileup(contigbam.references[0]) #TODO: learn out to reset
print "PICK THE BEST BASE AND RECORD CHANGES..."
for col in pile:
ref_base=''
contig_bases=[]
### HERE WE CAN START LOOKING AT GENOMIC REGIONS WITHOUT CONTIGS TO SEE IF THERE IS READ SUPPORT FROM READ ALIGNMENT FILE,
### from i=0 to i=endOfGenome: IF i is not col.reference_pos; then look at alignment file to see if there is any read aligned to i. in other words, check if col.reference_pos in NucleotidesfromReads.keys():
### if if col.reference_pos in NucleotidesfromReads.keys(): TRUE then take the base as newbase
for read in col.pileups:
readbase = read.alignment.query_sequence[read.query_position]
ref_base = refseqs[0].seq[col.reference_pos]
readseq = read.alignment.query_sequence[read.query_position: read.query_position+read.indel+1]
# print col.pos, "R",ref_base, readbase, readseq
### DETERMINE BASES FROM END ###
endpos=calculate_endposition(read.query_position, read.alignment.query_length)
valid=True
repeat=False
if abs (endpos) <= int(args.mapped_trim_max_length):
last_good_base = contigdict[read.alignment.query_name].left_last_good
if endpos < 0:
last_good_base = contigdict[read.alignment.query_name].right_last_good
if last_good_base == None or abs(endpos) < abs(last_good_base):
valid=False
if contigdict[read.alignment.query_name].uniquebp < args.min_unique_bp:
repeat=True
readdata = [readbase, readseq, endpos, read.indel, read.query_position, read.alignment.query_length, read.alignment.query_name, valid, repeat]
contig_bases.append(readdata)
### out of contig loop #################
allbasecounts = [ c[1] for c in contig_bases ]
allbasecounts = Counter(allbasecounts)
#VALID CONTIGS ARE ANCHORED IN NONREPETIVE SEQUENCE
contig_bases_valid=[c for c in contig_bases if c[7]==True and c[8]==False]
validbasecounts=[c[1] for c in contig_bases_valid ]
validbasecounts=Counter(validbasecounts)
validbases=validbasecounts.keys()
# print "here is valid bases", validbases, col.reference_pos
newbase="NOT DETERMINED"
if len (validbases) == 1:
### we have single sequence in all our contigs that are valid ###
# if ref_base == validbases[0] and col.reference_pos in NucleotidesfromReads.keys(): #contigs same as reference AND READ PILE UP IS PRESENT
# newbase=NucleotidesfromReads[col.reference_pos]
# elif ref_base != validbases[0] and col.reference_pos in NucleotidesfromReads.keys():
# newbase= validbases[0].lower() #lowercase (for reference)
if col.reference_pos in NucleotidesfromReads.keys():
newbase=NucleotidesfromReads[col.reference_pos]#*************************************************
else: #contigs represent a substitution
newbase=validbases[0] #uppercase
elif len (validbases) > 1:
if len(validbases[0]) > 1 or len(validbases[1]) > 1:
print "This should be inserted: ", validbases[0], validbases[1]
if ref_base in validbases:
#MULTIPLE WELL PLACED VALID CONTIGS#
maxlen=max( [c[5] for c in contig_bases_valid ] )
lendict={}
for c in contig_bases_valid:
lendict[c[5]] = c[1]
#WARNINGS START
if lendict[maxlen] != ref_base and args.verbose:
print col.pos, ref_base
print "WARNING ref_base", ref_base, " is not the largest contig", c[5], lendict
print " ", contig_bases
for c in contig_bases_valid:
if len(c[1])>10:
print col.pos, ref_base, c[1],"WARNINGSKIPPING LARGE INSERTION"
#WARNINGS END
if col.reference_pos in NucleotidesfromReads.keys():
newbase=NucleotidesfromReads[col.reference_pos]#*************************************************
#HERE WHERE YOU NEED TO PICK MAJOR VARIANT!!!
print "Here you should pick Major var:", NucleotidesfromReads[col.reference_pos], col.reference_pos
else:
newbase=ref_base.lower()#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
if args.verbose:
print "WARNING: NO REF BASE DESPITE MULTIPLE BASES--PICK LARGER CONTIG"
contig_bases=sorted(contig_bases, key=lambda cntg:cntg[5], reverse=True)
if col.reference_pos in NucleotidesfromReads.keys():
newbase=NucleotidesfromReads[col.reference_pos]#*************************************************
else:
newbase=contig_bases[0][1]
#HERE WHERE YOU CAN PICK MAJOR VARIANT!!!
elif len (validbases) ==0:
check=[]
for c in contig_bases:
span=abs(c[2])+c[3]
if c[2] > 0: #left with
#print "LEFT TRIM ",span, args.unmapped_trim_length
if span > int(args.unmapped_trim_length):
contigdict[c[6]].left_trim=args.unmapped_trim_length
starttrim=contigdict[c[6]].left_trim-c[2]+1
newbase=c[1][starttrim:]
###add a removal step
#print "fixiing trim", starttrim, contigdict[c[6]].left_trim, newbase
else:
newbase=''
else: #C[2] <0 left
if len(c[1]) > 5:
print contig_bases
print " ",allbasecounts
print "RIGHT ",span, args.unmapped_trim_length
print
else: #toss it
newbase=''
check.append(newbase)
check=sorted(check,key=lambda x:len(x),reverse=True)
if len(check) > 1:
print "here is check: ", check
if check[0]=='':
check[0]=None
newbase=None
### OUT OF THE LOOP
newref[col.pos]=newbase
if newbase == "NOT DETERMINED" :
print "NEWBASE",newbase
print ref_base, col.pos
print contig_bases
print allbasecounts
print contig_bases_valid
sys.exit(0)
#######################################################################3#####
### DUMP THE OUTPUT ########################################################
lastpos=None
contig=0
gfile = open ( args.outfilebase + "_genome.fa", 'w')
gfile.write( '>'+args.outfilebase + "\n")
cfile = open ( args.outfilebase + "_contig_new.fa", 'w')
contigseq=''
for seq in newref:
if seq==None:
gfile.write('N')
else:
gfile.write(seq)
contigseq+=seq
if seq==None and lastpos!=None:
name=str(contig).zfill(3)
cfile.write( ">"+name+"\n")
cfile.write(contigseq+"\n")
contigseq=''
contig+=1
lastpos=seq
gfile.close()
cfile.close()
###### DUMP THE OLD CONTIGS ALBEIT TRIMMED ##################################
cfile = open ( args.outfilebase + "_contig_old.fa", 'w')
for c in contigs:
header=">"+c.name
seq=''
begin=None
end=None
if c.data.is_unmapped:
begin=args.unmapped_trim_length
end=c.length-args.unmapped_trim_length
header+="_tt"
else:
#left
#### print c.left_last_good, c.right_last_good, c.name
begin=args.unmapped_trim_length
left="t"
if c.left_last_good !=None and begin <= args.mapped_trim_max_length:
begin=c.left_last_good-1
left="T"
#right
end=c.length-args.unmapped_trim_length
right="t"
if c.right_last_good != None and c.right_last_good <= args.mapped_trim_max_length:
end= c.length + c.right_last_good + 1
right="T"
header+="_"+left+right
header+=str(begin) + "-"+str(end) + " OLDLEN" + str(c.length)
#### print len (c.data.query_sequence), c.length
seq= c.data.query_sequence[begin:end]
cfile.write(header+ "\n"+ seq + "\n")
cfile.close()
def calculate_endposition(readposition, readlength):
endpos=0 # zero is the edge , positive bases are left, negative are negative
if readposition < readlength/2 :
endpos=readposition + 1 #left side will be positive bases from end
else:
endpos=-1*(readlength-readposition) #right side will be negative bases from end
return endpos
class ContigSegment():
""" A Data structure for Segments
"""
def __init__(self):
self.seq=None
self.ref_begin=None
self.ref_end=None
self.name=None
self.length=None
self.rightmost_endpos=None
self.leftmost_endpos=None
self.left_mismatches=0
self.right_mismatches=0
self.left_last_good=None
self.right_last_good=None
self.left_bases=[]
self.right_bases=[]
self.left_aligned=0
self.right_aligned=0
self.left_trim=None #number of bases to trim back
self.right_trim =None #base corrections
self.fixes=[] # [[start,end,newbases]] newbases="" for deletion
self.data=None
self.base_aligned_unique=None
##----------------------------------------------------------------------------
class FlexBed:
"""flexible format bed class for in memory manipulations
note: beds are just lists but this provides static methods
to operate on such lists.
the only fixed data requirements are:
#col(1)seq/chr name
#col(2)begin position (zero coordinate)
#col(3)end position (one cooridnate)
#col(4)name
note: a bed is only defined seq:start:end:orient (no more required)
seq
"""
def __init__(self,bed):
"""sets up a standard bed format file"""
self.bed=bed
self.current_positions=[0,0,0,0,0]
self.previous_sequences=[ [] ] * len(self.current_positions)
#0=seq,#1=start,#2=stop,#3=name, #4=score,#5=orientation
def bed_set (self, bed):
""" set the bed with in the bed object , provides no checking """
self.bed_clear()
self.bed=bed
def bed_clear(self):
self.bed=[]
self.current_positions= [position] * len(self.current_positions)
self.previous_sequences=[ [] ] * len(self.current_positions)
def walker_reset(self,counter=None, position=0):
""" walker reset will reset the current_positions to index position zero
by default """
if counter!=None:
self.current_positions[counter]=position
self.previous_sequences[counter]=[]
else:
self.current_positions= [position] * len(self.current_positions)
self.previous_sequences=[ [] ] * len(self.current_positions)
def walker_get_counters(self):
return list(self.current_positions)
def bed_sort(self):
#TODO: need to actually add a sort function
if len(self.bed)==0:
return
from operator import itemgetter
#print self.bed[0:4]
#check to be sure that first element
self.bed.sort(key=itemgetter(0,1,2))
return
def writefile(self,filename):
outfile=open(filename, 'w')
#print outfile, len(self.bed)
for i in self.bed:
outline=outfile.write("\t".join([str(x) for x in i])+ "\n")
outfile.close()
return
def walker_step_upto(self, seq, start, counter=0):
# for i in xrange (self.current_positions[counter],len(self.bed)):
# feature=self.bed[i]
# if seq!=feature[0]:
return
def walker_get_range(self, seq, start, end, counter=0,trim=False, \
autoincrement=False):
"""walks along chromosome in zero one coordinates
this requires that comparisons are done in order
sorted sequences and shorted coordinates"""
newbed = []
for i in xrange ( self.current_positions[counter], len(self.bed) ):
feature=self.bed[i]
#print feature
if seq==feature[0] : #bed feature on same seq as our range
if feature[2] < start: #bed feature less than start of range
if autoincrement==True:
self.current_positions[counter]=i+1 #so increment counter
continue #and go to next
newbegin=max(start,feature[1])
newend=min(end,feature[2])
if newend-newbegin > 0: #bed feature within our range
newbed.append(feature)
continue
else: # query feature << bed feature
break # already past it so stop
if seq < feature[0]: #stop and bail current seq position is greater than seq
break
return newbed
# return list (empty if nothing intersecting)
return []
def calc_bp_overlap2interval(self,seq,begin,end ):
totalbases=0
for bedsegment in self.bed:
intersect=Interval.intersect2seq( seq,begin,end, bedsegment[0], bedsegment[1],bedsegment[2] )
#print intersect[3]
totalbases+=intersect[3]
return totalbases
@staticmethod
def readfile(filepath, check='chr', checknum=True, seq=0, start=1, stop=2,
name=3,score=4,orient=5):
"""read a tab-delimited bed file and return bed list of lists"""
lines = open(filepath, "r").readlines()
print lines
bed= FlexBed.readlist(lines, check=check,checknum=checknum,seq=seq,start=start,
stop=stop,name=name,score=score,orient=orient)
return bed
@staticmethod
def readlist(textlist, check='chr',checknum=True, seq=0,
start=1, stop=2, name=3,score=4,orient=5):
bed=[]
#print "textlist",textlist
usedcolumns=[seq,start,stop,name,score,orient]
for line in textlist:
if line.startswith("#"):
continue
x=line.strip().split("\t")
y=[ x[seq],x[start],x[stop],None,None,None ]
if name!=None:
y[3]=x[name]
if score!=None:
y[4]=x[score]
if orient!=None:
y[5]=x[orient]
for i in xrange(0,len(x)):
if i in usedcolumns:
continue
else:
y.append(x[i])
if checknum:
y[1]=int(y[1])
y[2]=int(y[2])
if not x[0].startswith(check):
print >> sys.stderr, x[self.chrom]+ " is not a proper seq name with : ("+check+")"
sys.exit()
bed.append(y)
return bed
#write a bed
#bed.split("\t")
#print bed
# bed.check(check='chr',checknum=True)
#def bed.readlist(self,tab_delimited_list):
#"""parse list of tab-delimited text into bed list of lists"""
#tablist.strip()
#return
#def bed.check(tab_delmited_bed_list):
###THIS is now used in my code ###
class Interval:
""" ucsc genome coordinates [a,b) """
@staticmethod
def intersect2interval(begin1,end1, begin2,end2):
"intersect [a,b) [c,d) returns new begin and end of intersect"
newbegin=max(begin1,begin2)
newend=min(end1,end2)
intersect=newend-newbegin
if intersect <=0:
return (0,'','')
return intersect, newbegin, newend
@staticmethod
def intervalWellWithinInterval2(begin1,end1,begin2,end2,flank):
"must be ensconced by a certain distance "
#TODO: finish this
@staticmethod
def intervalWithinInterval2(begin1,end1,begin2,end2):
"is [begin1,end1) within [begin2,end2)"
if begin1 >= begin2 and end1 <= end2:
return True
return False
@staticmethod
def intersect2seq( seq1,begin1,end1, seq2,begin2,end2):
if seq1!=seq2:
print "MISS", seq1 , seq2
return [None,None,None,0]
newbegin=max(begin1,begin2)
newend=min(end1,end2)
intersect=newend-newbegin
if intersect<=0:
return [None,None,None,0]
return (seq1,newbegin,newend,intersect)
###WHAT IS THIS CLASS DOING IN MY CODE!!!!!
class coordmapper:
"""given a mapping file this class allows cross mapping.
Mapping that isn't unique will cause failure."""
def __init__(self):
"""list of coordinates"""
self.coord=[]
self.reverseNames={} #pushes list positions
self.forwardNames={} #pushes list positions
def load_coord(self,coordlist):
"""seq, begin, end, name, seq, begin, end, orient,
First seq alway forward/plus orientation.
Requires 0,1 coordinates (aka bed file)
No data checks currently!
"""
self.coord=coordlist
return
def append(self, coordarray):
"""[seq1,begin1,end1, seq2,begin2,end2,orient, name1,name2,data...]
No data checks currently!
"""
self.coord.append(coordarray)
return
def recalculateSeqLookup(self):
"""this recalculates dictionaries for more rapid lookups"""
def checkData(self):
"""NOT IMPLEMENTED: could check consistency of the data"""
def forwardMap(self, seq,begin,end=None):
"""given seq1,begin1,[end1], return seq2 equilvalent"""
return
def reverseMap(self,seq,begin,end=None):
"""given se2,begin,[end2], lookup seq1 equilvaent..."""
return
###############################################################################
###############################################################################
#------------------------------------------------------------------------------
#main is left to the bottom so that global variables can be populated
if __name__ == "__main__":
if len (sys.argv)==1:
sys.argv.append("-h") #if no command then it is a cry for help
main(sys.argv[1:])
| 38.665829
| 255
| 0.662649
|
795268edd6c4ccd02cba51eeca0a18534bcc7c6f
| 3,387
|
py
|
Python
|
python_package/madflow/parameters.py
|
simonthor/madflow
|
cc4ee123d1d6cea7295a8f4be774eff87ea727cf
|
[
"Apache-2.0"
] | 6
|
2021-06-22T06:49:48.000Z
|
2021-06-24T02:38:37.000Z
|
python_package/madflow/parameters.py
|
simonthor/madflow
|
cc4ee123d1d6cea7295a8f4be774eff87ea727cf
|
[
"Apache-2.0"
] | 13
|
2021-06-14T13:27:03.000Z
|
2022-03-28T09:53:46.000Z
|
python_package/madflow/parameters.py
|
simonthor/madflow
|
cc4ee123d1d6cea7295a8f4be774eff87ea727cf
|
[
"Apache-2.0"
] | 1
|
2022-02-15T19:27:36.000Z
|
2022-02-15T19:27:36.000Z
|
"""
Utilities and functions to deal with the parameters of the model
"""
from .config import DTYPE, DTYPECOMPLEX, complex_me, float_me, run_eager
import numpy as np
import tensorflow as tf
from itertools import chain
GS_SIGNATURE = [tf.TensorSpec(shape=[None], dtype=DTYPECOMPLEX)]
ALPHAS_SIGNATURE = [tf.TensorSpec(shape=[None], dtype=DTYPE)]
@tf.function(input_signature=ALPHAS_SIGNATURE)
def _alphas_to_gs(alpha_s):
return complex_me(2.0 * tf.math.sqrt(np.pi * alpha_s))
class Model:
"""This class is instantiated with knowledge about
all couplings and parameters in the process of interest
and provides an interface to compute them in a per-phase space
basis
Parameters
---------
constants: tuple(DTYPE)
tuple with all constants of the model
functions: tuple(functions)
tuple with all parameters of the model which depend on g_s
"""
def __init__(self, constants, functions):
self._tuple_constants = constants
self._tuple_functions = functions
self._constants = list(constants)
self._to_evaluate = [tf.function(i, input_signature=GS_SIGNATURE) for i in functions]
self._frozen = []
@property
def frozen(self):
"""Whether the model is frozen for a given value of alpha_s or not"""
return bool(self._frozen)
def freeze_alpha_s(self, alpha_s):
"""The model can be frozen to a specific value
of alpha_s such that all phase space points are evaluated at that value
Parameters
----------
alpha_s: float
"""
if self.frozen:
raise ValueError("The model is already frozen")
self._frozen = self._evaluate(float_me([alpha_s]))
def unfreeze(self):
"""Remove the frozen status"""
self._frozen = []
@tf.function(input_signature=ALPHAS_SIGNATURE)
def _evaluate(self, alpha_s):
"""Evaluate all couplings for the given values of alpha_s
Parameters
----------
alpha_s: tensor of shape (None,)
"""
gs = _alphas_to_gs(alpha_s)
results = [fun(gs) for fun in self._to_evaluate]
if not results:
return self._constants
if not self._constants:
return results
return list(chain.from_iterable([self._constants, results]))
def get_masses(self):
"""Get the masses that entered the model as constants"""
masses = []
for key, val in self._tuple_constants._asdict().items():
if key.startswith("mdl_M"):
masses.append(val)
return masses
def parse_parameter(self, parameter_name):
"""Parse a (constant) parameter given its string name"""
if parameter_name == "ZERO":
return 0.0
if hasattr(self._tuple_constants, parameter_name):
return getattr(self._tuple_constants, parameter_name)
if hasattr(self._tuple_functions, parameter_name):
return getattr(self._tuple_functions, parameter_name)
raise AttributeError(f"The model class does not contain parameter {parameter_name}")
def evaluate(self, alpha_s=None):
"""Evaluate alpha_s, if the model is frozen
returns the frozen values"""
if self.frozen:
return self._frozen
return self._evaluate(alpha_s)
| 34.212121
| 93
| 0.64659
|
79526936d823fb0451afd6fbf6985a5c34d9f1e0
| 2,640
|
py
|
Python
|
saleor/order/tests/test_order_search.py
|
Vultik/saleor
|
dc8548f7ad49cc26950dbfa0fd81f02617350240
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/order/tests/test_order_search.py
|
Vultik/saleor
|
dc8548f7ad49cc26950dbfa0fd81f02617350240
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/order/tests/test_order_search.py
|
Vultik/saleor
|
dc8548f7ad49cc26950dbfa0fd81f02617350240
|
[
"CC-BY-4.0"
] | null | null | null |
from decimal import Decimal
from ...discount import DiscountValueType
from ..models import OrderLine
from ..search import prepare_order_search_vector_value, update_order_search_vector
def test_update_order_search_vector(order):
# given
order.search_vector = ""
order.save(update_fields=["search_vector"])
assert not order.search_vector
# when
update_order_search_vector(order)
# then
assert order.search_vector
def test_prepare_order_search_vector_value(
order_with_lines, address_usa, payment_dummy
):
# given
order = order_with_lines
order.shipping_address = address_usa
order.save(update_fields=["shipping_address"])
order.discounts.create(
value_type=DiscountValueType.FIXED,
name="discount",
translated_name="discount translated",
value=Decimal("20"),
reason="Discount reason",
amount=(order.undiscounted_total - order.total).gross, # type: ignore
)
psp_reference = "TestABC"
payment_dummy.psp_reference = psp_reference
payment_dummy.save(update_fields=["psp_reference"])
# when
search_vector = prepare_order_search_vector_value(order)
# then
assert search_vector
def test_prepare_order_search_vector_value_empty_relation_fields(
order_with_lines, payment_dummy
):
# given
order = order_with_lines
order.billing_address = None
order.shipping_address = None
order.save(update_fields=["shipping_address", "billing_address"])
order.discounts.create(
value_type=DiscountValueType.FIXED,
value=Decimal("20"),
reason="Discount reason",
amount=(order.undiscounted_total - order.total).gross, # type: ignore
)
payment_dummy.psp_reference = None
payment_dummy.save(update_fields=["psp_reference"])
lines = []
for line in order.lines.all():
line.product_sku = None
lines.append(line)
OrderLine.objects.bulk_update(lines, ["product_sku"])
# when
search_vector_value = prepare_order_search_vector_value(order)
# then
assert search_vector_value
def test_prepare_order_search_vector_value_no_relations_data(order, address_usa):
# given
order.shipping_address = address_usa
order.user = None
order.billing_address = None
order.shipping_address = None
order.save(update_fields=["shipping_address", "billing_address", "user"])
assert not order.lines.all()
assert not order.discounts.all()
assert not order.payments.all()
# when
search_vector_value = prepare_order_search_vector_value(order)
# then
assert search_vector_value
| 27.5
| 82
| 0.723106
|
7952697d914fdc80daa0d4bd33b92e0970954677
| 992
|
py
|
Python
|
tests/test_cband_h5.py
|
RocketRoss/setigen
|
fde4434094b5cc0d095c341d0b90e8f65c9e9215
|
[
"MIT"
] | 21
|
2019-01-25T20:44:56.000Z
|
2022-03-16T23:30:26.000Z
|
tests/test_cband_h5.py
|
RocketRoss/setigen
|
fde4434094b5cc0d095c341d0b90e8f65c9e9215
|
[
"MIT"
] | 7
|
2020-07-15T08:54:27.000Z
|
2021-09-24T03:57:30.000Z
|
tests/test_cband_h5.py
|
RocketRoss/setigen
|
fde4434094b5cc0d095c341d0b90e8f65c9e9215
|
[
"MIT"
] | 10
|
2020-03-17T17:59:26.000Z
|
2022-02-01T08:33:11.000Z
|
import pytest
import os
import numpy as np
# from astropy import units as u
import setigen as stg
@pytest.fixture()
def frame_setup_from_h5():
my_path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(my_path, 'assets/sample.fil')
frame = stg.Frame(waterfall=path)
return frame
def test_setup_from_h5(frame_setup_from_h5):
frame = frame_setup_from_h5
assert frame.fchans == 1024
assert frame.tchans == 32
assert frame.shape == (32, 1024)
assert frame.df == pytest.approx(1.3969838619232178)
assert frame.dt == pytest.approx(1.431655765333332)
assert frame.fmax == pytest.approx(6663999999.873341)
assert frame.fmin == pytest.approx(6663998569.361866)
assert frame.mean() == pytest.approx(484170.38)
assert frame.noise_mean == pytest.approx(461739.75)
assert frame.std() == pytest.approx(253477.6)
assert frame.noise_std == pytest.approx(217443.1)
assert np.min(frame.data) == pytest.approx(15350.96)
| 28.342857
| 57
| 0.714718
|
79526a360c29da4c2b5320e1dc30a9a350d4bff9
| 5,249
|
py
|
Python
|
molar/backend/database/query.py
|
aspuru-guzik-group/molar
|
a3e0c337bd8a41c94b2c25831c95048cc7614f04
|
[
"BSD-3-Clause"
] | 4
|
2021-07-20T18:49:44.000Z
|
2021-10-15T00:58:12.000Z
|
molar/backend/database/query.py
|
aspuru-guzik-group/molar
|
a3e0c337bd8a41c94b2c25831c95048cc7614f04
|
[
"BSD-3-Clause"
] | null | null | null |
molar/backend/database/query.py
|
aspuru-guzik-group/molar
|
a3e0c337bd8a41c94b2c25831c95048cc7614f04
|
[
"BSD-3-Clause"
] | 2
|
2022-01-07T17:57:42.000Z
|
2022-01-13T21:00:20.000Z
|
# std
from typing import Any, Dict, List, Optional, Union
# external
import pkg_resources
import sqlalchemy
from sqlalchemy.orm import aliased, Session
# molar
from molar.backend import schemas
from molar.backend.database.utils import sqlalchemy_to_dict
INFORMATION_QUERY = open(
pkg_resources.resource_filename("molar", "sql/information_query.sql"), "r"
).read()
def resolve_type(type: str, models, alias_registry=None):
if alias_registry is None:
alias_registry = {}
types = type.split(".")
if len(types) == 1:
if isinstance(models, sqlalchemy.orm.attributes.InstrumentedAttribute):
return models[types[0]].astext
type_ = getattr(models, types[0], None)
if type_ is not None:
return type_
if types[0] in alias_registry.keys():
return alias_registry[types[0]]
raise ValueError(f"Type {type} not found in database!")
submodel = getattr(models, types[0], None)
if submodel is None and types[0] in alias_registry.keys():
submodel = alias_registry[types[0]]
if submodel is not None:
return resolve_type(".".join(types[1:]), submodel, alias_registry)
raise ValueError(f"Type {type} not found in database!")
def query_builder(
db: Session,
models,
types: schemas.QueryTypes,
limit: int,
offset: int,
joins: Optional[schemas.QueryJoins] = None,
filters: Optional[schemas.QueryFilters] = None,
order_by: Optional[schemas.QueryOrderBys] = None,
aliases: Optional[schemas.QueryAliases] = None,
):
alias_registry: Dict[str, Any] = {}
# Resolving aliases
if aliases is not None:
if not isinstance(aliases, list):
aliases = [aliases]
for alias in aliases:
alias_registry[alias.alias] = aliased(
resolve_type(alias.type, models), name=alias.alias
)
# Resolving main types
if not isinstance(types, list):
types = [types]
db_objs = []
for type_ in types:
db_obj = resolve_type(type_, models, alias_registry)
db_objs.append(db_obj)
query = db.query(*db_objs)
if joins is not None:
if not isinstance(joins, list):
joins = [joins]
for join in joins:
joined_table = resolve_type(
join.type,
models,
alias_registry,
)
onclause = None
if join.on is not None:
onclause = resolve_type(
join.on.column1, models, alias_registry
) == resolve_type(join.on.column2, models, alias_registry)
query = query.join(
joined_table,
onclause,
isouter=True if join.join_type == "outer" else False,
full=True if join.join_type == "full" else False,
)
if filters is not None:
filters = expand_filters(filters, models, alias_registry)
query = query.filter(filters)
if order_by is not None:
if not isinstance(order_by, list):
order_by = [order_by]
order_bys = []
for ob in order_by:
t = resolve_type(ob.type, models, alias_registry)
if ob.order == "asc":
order_bys.append(t.asc())
else:
order_bys.append(t.desc())
query = query.order_by(*order_bys)
query = query.offset(offset).limit(limit)
return query, db_objs, types
def process_query_output(db_objs, query_results, types):
if len(db_objs) == 1:
return [sqlalchemy_to_dict(db_objs[0], r, types[0]) for r in query_results]
results = []
for result in query_results:
result_dict = {}
for res, db_obj, t in zip(result, db_objs, types):
result_dict.update(sqlalchemy_to_dict(db_obj, res, t, add_table_name=True))
results.append(result_dict)
return results
def expand_filters(filters, models, alias_registry):
if isinstance(filters, schemas.QueryFilterList):
filters = [expand_filters(f) for f in filters.filters]
if filters.op == "and":
return sqlalchemy.and_(*filters)
elif filters.op == "or":
return sqlalchemy.or_(*filters)
else:
raise ValueError(f"Filter operator not supported: {filters.op}")
elif isinstance(filters, schemas.QueryFilter):
type = resolve_type(filters.type, models, alias_registry)
operator = filters.op
if filters.op == "==":
operator = "__eq__"
elif filters.op == "!=":
operator = "__ne__"
elif filters.op == ">":
operator = "__gt__"
elif filters.op == "<":
operator = "__lt__"
elif filters.op == ">=":
operator = "__ge__"
elif filters.op == "<=":
operator = "__le__"
# If value is another column
value = filters.value
if isinstance(value, str):
try:
value_type = resolve_type(value, models, alias_registry)
except ValueError:
pass
else:
value = value_type
return getattr(type, operator)(value)
| 30.34104
| 87
| 0.594589
|
79526a522d81da774830d310028e606e8f0cd906
| 14,324
|
py
|
Python
|
metal/unit.py
|
cHemingway/test
|
7fcbd56ad6fe5368b927ea146363bf3d69cd7617
|
[
"Apache-2.0"
] | 24
|
2020-07-24T18:33:58.000Z
|
2022-03-23T21:00:19.000Z
|
metal/unit.py
|
cHemingway/test
|
7fcbd56ad6fe5368b927ea146363bf3d69cd7617
|
[
"Apache-2.0"
] | 4
|
2020-07-27T05:44:26.000Z
|
2021-09-02T16:05:47.000Z
|
metal/unit.py
|
cHemingway/test
|
7fcbd56ad6fe5368b927ea146363bf3d69cd7617
|
[
"Apache-2.0"
] | 1
|
2020-07-25T15:13:20.000Z
|
2020-07-25T15:13:20.000Z
|
import json
import sys
class Control:
enter = "enter"
exit = "exit"
cancel = "cancel"
class Level:
info = "info"
warning = "expect"
assertion = "assert"
class Scope:
def __init__(self, name):
self.executed = 0
self.errors = 0
self.warnings = 0
self.children = []
self.tests = []
self.cancelled = False
self.parent = None
self.name = name
self.description = None
def __iadd__(self, rhs):
self.executed += rhs.executed
self.errors += rhs.errors
self.warnings += rhs.warnings
self.children.append(rhs)
return self
#def cancel(self, ):
def append_test(self, data):
if "level" in data and "condition" in data:
self.executed += 1
if not data["condition"]:
if data["level"] == "assert":
self.errors += 1
elif data["level"] == "expect":
self.warnings += 1
self.tests.append(data)
def to_dict(self):
res = {
"summary": {
"executed": self.executed,
"warnings": self.warnings,
"errors": self.errors
},
"cancelled": self.cancelled,
"children": [ch.to_dict() for ch in self.children],
"tests": self.tests
}
if self.description:
res["description"] = self.description
return res
class MainScope(Scope):
def __init__(self):
super().__init__('<main>')
def loc_str(file, line):
return "{}({})".format(file.replace('\\\\', '\\'), line)
def message_str(file, line, level, condition):
return loc_str(file, line) + (" assertion " if level == "assert" else " expectation ") + ("succeeded " if condition else "failed ")
class Reporter:
def __init__(self):
self.hrf_sink = sys.stdout
self.print_level = 'warning' # all, warning error
self.json_sink = None
self.__scope_stack = [MainScope()]
def should_print(self, level = Level.info, condition = 1):
if self.hrf_sink is None:
return False
if self.print_level == 'all':
return True
if self.print_level == 'warning':
return (not condition) and level >= Level.warning
return (not condition) and level == Level.assertion
@property
def current_scope(self):
return self.__scope_stack[len(self.__scope_stack) - 1]
def critical(self, file, line, control):
if self.hrf_sink:
self.hrf_sink.write("{} critical check failed, cancelling\n".format(loc_str(file, line)))
self.current_scope.cancelled = True
self.current_scope.append_test({"type": "critical", "file": file, "line": line, "control": control})
def loop(self, file, line, control):
if self.hrf_sink:
self.hrf_sink.write("{} for loop cancelled\n".format(loc_str(file, line)))
self.current_scope.append_test({"type": "loop", "file": file, "line": line, "control": control})
def ranged(self, file, line, control, condition_or_length, range_info, range_info_values=None):
data = {"type": "ranged", "file": file, "line": line, "control": control, "length": condition_or_length, "control": control}
if control == "cancel":
if self.hrf_sink:
self.hrf_sink.write("{} ranged test cancelled at pos {}\n".format(loc_str(file, line), condition_or_length))
elif control =="exit":
if self.hrf_sink:
self.hrf_sink.write("{} ranged test completed with {} elements\n".format(loc_str(file, line), condition_or_length))
elif control == 'enter':
lhs, lhs_len, rhs, rhs_len = range_info
data['range_info'] = {'lhs': lhs, 'lhs_len': lhs_len, 'rhs': rhs, 'rhs_len': rhs_len}
if range_info_values:
lhs_v, lhs_len_v, rhs_v, rhs_len_v = range_info_values
data['range_info_values'] = {'lhs': lhs_v, 'lhs_len': lhs_len_v, 'rhs': rhs_v, 'rhs_len': rhs_len_v}
self.hrf_sink.write("{} ranged test starting with {} elements for {}[0 ... {}] and {}[0 ... {}]: [{}[0 ... {}], {}[0 ... {}]]\n"
.format(loc_str(file, line), condition_or_length, lhs, lhs_len, rhs, rhs_len, lhs_v, lhs_len_v, rhs_v, rhs_len_v))
else:
self.hrf_sink.write("{} ranged test starting with {} elements for {}[0 ... {}] and {}[0 ... {}]\n"
.format(loc_str(file, line), condition_or_length, lhs, lhs_len, rhs, rhs_len))
else:
raise Exception("Unknown control {}".format(control))
self.current_scope.append_test(data)
def call(self, file, line, control, condition, function, description=None):
if control == 'enter':
scp = Scope(function if function else "**unknown**")
scp.parent = self.current_scope
if description:
scp.description = description
self.__scope_stack.append(scp)
self.hrf_sink.write("{} entering test case {}\n"
.format(loc_str(file, line), (" " + description) if description else function))
elif control == 'exit':
sc = self.current_scope
self.hrf_sink.write("{} {} test case {}, {} with : {{executed: {}, warnings: {}, errors: {}}}\n"
.format(loc_str(file, line),
'cancelling' if sc.cancelled else 'exiting',
(" " + description) if description else function,
("succeeded " if condition == 0 else "failed "),
sc.executed, sc.warnings, sc.errors))
self.__scope_stack.pop()
self.__scope_stack[len(self.__scope_stack) - 1] += sc
else:
raise Exception('Unknown control {}'.format(control))
def log(self, file, line, message):
if self.hrf_sink:
self.hrf_sink.write("{} log: {}\n".format(loc_str(file, line), message))
self.current_scope.append_test({"type": "log", "file": file, "line": line, "message": message})
def checkpoint(self, file, line):
if self.should_print():
self.hrf_sink.write("{} checkpoint\n".format(loc_str(file, line)))
self.current_scope.append_test({"type": "checkpoint", "file": file, "line": line})
def message(self, file, line, level, condition, message):
if self.hrf_sink:
self.hrf_sink.write("{} message: {}\n".format(message_str(file, line, level, condition), message))
self.current_scope.append_test({"type": "message", "file": file, "line": line, "message": message, "level": level, "condition": condition})
def plain (self, file, line, level, condition, description, value=None):
if self.hrf_sink and value:
self.hrf_sink.write("{} [plain]: {}: [{}]\n".format(message_str(file, line, level, condition), description, value))
elif self.hrf_sink:
self.hrf_sink.write("{} [plain]: {}\n".format(message_str(file, line, level, condition), description))
self.current_scope.append_test({"type": "plain", "file": file, "line": line, "description": description, "level": level, "condition": condition})
def equal(self, file, line, level, condition, lhs, rhs, lhs_val=None, rhs_val=None):
ex = '{} == {}'.format(lhs, rhs) if rhs else lhs
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [equal]: {}: [{} == {}]\n".format(message_str(file, line, level, condition), ex, lhs_val, rhs_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [equal]: {}\n".format(message_str(file, line, level, condition), ex))
self.current_scope.append_test({"type": "equal", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val})
def not_equal(self, file, line, level, condition, lhs, rhs, lhs_val=None, rhs_val=None):
ex = '{} != {}'.format(lhs, rhs) if rhs else lhs
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [not_equal]: {}: [{} != {}]\n".format(message_str(file, line, level, condition), ex, lhs_val, rhs_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [not_equal]: {}\n".format(message_str(file, line, level, condition), ex))
self.current_scope.append_test({"type": "not_equal", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val})
def ge(self, file, line, level, condition, lhs, rhs, lhs_val=None, rhs_val=None):
ex = '{} >= {}'.format(lhs, rhs) if rhs else lhs
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [ge]: {}: [{} >= {}]\n".format(message_str(file, line, level, condition), ex, lhs_val, rhs_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [ge]: {}\n".format(message_str(file, line, level, condition), ex))
self.current_scope.append_test({"type": "ge", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val})
def le(self, file, line, level, condition, lhs, rhs, lhs_val=None, rhs_val=None):
ex = '{} <= {}'.format(lhs, rhs) if rhs else lhs
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [le]: {}: [{} <= {}]\n".format(message_str(file, line, level, condition), ex, lhs_val, rhs_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [le]: {}\n".format(message_str(file, line, level, condition), ex))
self.current_scope.append_test({"type": "le", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val})
def greater(self, file, line, level, condition, lhs, rhs, lhs_val=None, rhs_val=None):
ex = '{} > {}'.format(lhs, rhs) if rhs else lhs
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [greater]: {}: [{} > {}]\n".format(message_str(file, line, level, condition), ex, lhs_val, rhs_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [greater]: {}\n".format(message_str(file, line, level, condition), ex))
self.current_scope.append_test({"type": "greater", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val})
def lesser(self, file, line, level, condition, lhs, rhs, lhs_val=None, rhs_val=None):
ex = '{} < {}'.format(lhs, rhs) if rhs else lhs
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [lesser]: {}: [{} < {}]\n".format(message_str(file, line, level, condition), ex, lhs_val, rhs_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [lesser]: {}\n".format(message_str(file, line, level, condition), ex))
self.current_scope.append_test({"type": "lesser", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val})
def close(self, file, line, level, condition, lhs, rhs, tolerance, lhs_val=None, rhs_val=None, tolerance_val=None):
expression = lhs
if rhs and tolerance:
expression = '{} = {} +- ~ {}'.format(lhs, rhs, tolerance)
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [close]: {}: [{} = {} +- {}]\n".format(message_str(file, line, level, condition), expression, lhs_val, rhs_val, tolerance_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [close]: {}\n".format(message_str(file, line, level, condition), expression))
self.current_scope.append_test({"type": "close", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val, "tolerance": tolerance, "tolerance_val": tolerance_val})
def close_relative(self, file, line, level, condition, lhs, rhs, tolerance, lhs_val=None, rhs_val=None, tolerance_val=None):
expression = lhs
if rhs and tolerance:
expression = '{} = {} +- ~ {}'.format(lhs, rhs, tolerance)
if self.hrf_sink and lhs_val and rhs_val:
self.hrf_sink.write("{} [close_relative]: {}: [{} = {} +- ~ {}]\n".format(message_str(file, line, level, condition), expression, lhs_val, rhs_val, tolerance_val))
elif self.hrf_sink:
self.hrf_sink.write("{} [close_relative]: {}\n".format(message_str(file, line, level, condition), expression))
self.current_scope.append_test({"type": "close", "file": file, "line": line, "lhs" : lhs, "rhs": rhs, "level": level, "condition": condition, "lhs_val" : lhs_val, "rhs_val": rhs_val, "tolerance": tolerance, "tolerance_val": tolerance_val})
def predicate(self, file, line, level, condition, function, args, function_val=None, args_val=None):
if self.hrf_sink and function_val and args_val:
self.hrf_sink.write("{} [predicate]: {}({}): [{}({})]\n".format(message_str(file, line, level, condition), function, ', '.join(args), function_val, ', '.join(args_val)))
elif self.hrf_sink:
self.hrf_sink.write("{} [predicate]: {}({})\n".format(message_str(file, line, level, condition), function, ', '.join(args)))
self.current_scope.append_test({"type": "close", "file": file, "line": line, "function": function, "args": args, "level": level, "condition": condition, "function_val": function_val, "args_val": args_val})
def report(self, file, line, condition):
if self.hrf_sink:
self.hrf_sink.write("{}: full test report: {{executed: {}, warnings: {}, errors: {}}}\n"
.format(loc_str(file, line), self.__scope_stack[0].executed, self.__scope_stack[0].warnings, self.__scope_stack[0].errors))
if self.json_sink:
self.json_sink.write(json.dumps(self.__scope_stack[0].to_dict()))
| 53.447761
| 247
| 0.590687
|
79526b32e1bed3a119aff208442acaf6a1cd2382
| 9,656
|
py
|
Python
|
excel_validator.py
|
Vinaychalluru/excel_validator
|
9e5f7d643694abccd807a5417030f0e67a4b1a40
|
[
"MIT"
] | null | null | null |
excel_validator.py
|
Vinaychalluru/excel_validator
|
9e5f7d643694abccd807a5417030f0e67a4b1a40
|
[
"MIT"
] | null | null | null |
excel_validator.py
|
Vinaychalluru/excel_validator
|
9e5f7d643694abccd807a5417030f0e67a4b1a40
|
[
"MIT"
] | 1
|
2019-11-08T03:09:03.000Z
|
2019-11-08T03:09:03.000Z
|
#!/usr/bin/python -u
# -*- coding: UTF-8 -*-
import argparse
import os.path
import sys
import time
import yaml
from openpyxl.reader.excel import load_workbook
from openpyxl.styles import PatternFill
from openpyxl.utils import column_index_from_string, get_column_letter
from progress.bar import Bar
from validator import *
def isValid(type, value, coordinate, errors, value2 = None):
'''Takes the validation type assigned to the cell,
cell value, coordinates of the cell, errors of previous validation break list
'''
#Assigning each class to the validation type
classmap = {
'NotBlank': NotBlankValidator.NotBlankValidator,
'Type': TypeValidator.TypeValidator,
'Length': LengthValidator.LengthValidator,
'Regex': RegexValidator.RegexValidator,
'Email': EmailValidator.EmailValidator,
'Choice': ChoiceValidator.ChoiceValidator,
'Date': DateTimeValidator.DateTimeValidator,
'ExcelDate': ExcelDateValidator.ExcelDateValidator,
'Country': CountryValidator.CountryValidator,
'Conditional': ConditionalValidator.ConditionalValidator
}
violations = []
#name is the validation type name (NotBlank, Regex, Length ,.... etc)
#data is the value assigned by the user to be validated ( 3 chars , regex pattern , ... etc)
name = list(type.keys())[0]
data =list(type.values())[0]
validator = classmap[name](data)
#conditional validator will take two arguments to evaluate
if name != 'Conditional':
result = validator.validate(value)
else:
result = validator.validate(value, value2)
#If the cell value breakes the validation tule , append violations list
if (result == False):
violations.append(validator.getMessage())
if len(violations) > 0:
errors.append((coordinate, violations))
#return result != False
#result is the output of each validation for each cell
if (result == False):
return False
else:
return True
def setSettings(config):
'''function takes the config yaml file and converts it to dictionary
'''
settings = {}
#excludes are the columns that we won't validate
excludes = []
print ("Get validation config " + config)
try:
stream = open(config, 'r')
except IOError as e:
print (e)
exit(1)
config = yaml.safe_load(stream)
#Make sure that the yaml file follows the rules
if 'validators' in config and 'columns' in config.get('validators'):
settings['validators'] = config.get('validators').get('columns')
else:
return False
if 'default' in config.get('validators') :
settings['defaultValidator'] = config.get('validators').get('default')[0]
else:
settings['defaultValidator'] = None
if 'excludes' in config:
for column in config.get('excludes'):
excludes.append(column_index_from_string(column))
settings['excludes'] = excludes
else:
settings['excludes'] = []
if 'range' in config:
settings['range'] = config.get('range')[0] + "1:" + config.get('range')[1]
else:
settings['range'] = None
if 'header' in config:
settings['header'] = config.get('header')
else:
settings['header'] = True
return settings
def markErrors(errors, excelFile, sheetName, tmpDir, printErrors = False):
''' Function takes the error lists (coordinates,violations) , excel file , sheet name
output directory
'''
progressBar = Bar('Processing', max = len(errors))
#Checking size of the file
if os.path.getsize(excelFile) > 10485760:
print ("Log broken cells")
for error in errors:
progressBar.next()
if printErrors.lower() == "true":
print ("Broken Excel cell: " + error[0] + " [ "+ ','.join(error[1]) + " ]")
else:
print ("Broken Excel cell: " + error[0])
progressBar.finish();
return
#open Excel file
newFile = os.path.join(tmpDir , "errors_" + time.strftime("%Y-%m-%d") + "_" + str(int(time.time())) + "_" + os.path.basename(excelFile))
fileName,fileExtension = os.path.splitext(excelFile)
if fileExtension == '.xlsm':
wb = load_workbook(excelFile, keep_vba=True, data_only=True)
else:
wb = load_workbook(excelFile, data_only=True)
creator = wb.properties.creator
ws = wb.get_sheet_by_name(sheetName)
#fill the error values with red pattern
redFill = PatternFill(start_color='FFFF0000',
end_color = 'FFFF0000',
fill_type = 'solid')
for error in errors:
progressBar.next()
print ("Broken Excel cell: " + error[0])
cell = ws[error[0]]
if printErrors:
cell.value = ','.join(error[1])
cell.fill = redFill
progressBar.finish()
#save error excel file
wb.properties.creator = creator
print ("[[Save file: " + newFile + "]]")
try:
wb.save(newFile)
except Exception as e:
print (e)
exit(1)
return newFile
def validate(settings, excelFile, sheetName, tmpDir, printErrors = False):
'''the main function of valitations, takes settings dictionary (validations)
and returns the validation result
'''
print ("Validate Excel Sheet " + sheetName)
errors = []
#open Excel file
print ("Parse Excel file")
wb = load_workbook(excelFile, keep_vba=True, data_only=True, read_only=True)
#ws = wb.get_sheet_by_name(sheetName)
ws = wb[sheetName]
progressBar = Bar('Processing', max=ws.max_row)
if 'range' in settings and settings['range'] != None:
settings['range'] = settings['range'] + (str)(ws.max_row)
# range now equals A1:D(150) for example
#iterate excel sheet
rowCounter = 0
for row in ws.iter_rows(settings['range']):
progressBar.next()
columnCounter = 0
rowCounter = rowCounter + 1
#do not parse empty rows
if isEmpty(row):
continue
for cell in row:
columnCounter = columnCounter + 1
try:
value = cell.value
except ValueError:
#case when it is not possible to read value at all from any reason
column = get_column_letter(columnCounter)
coordinates = "%s%d" % (column, rowCounter)
errors.append((coordinates, ValueError))
#find header (first) row
#if the code founded the first header "ID", then it is the header row
if settings['header'] != True:
if value == settings['header']:
#Replace the header with true incase it meets a value of "ID" downside
settings['header'] = True
# skip el row
break
#skip excludes column
if hasattr(cell, 'column') and cell.column in settings['excludes']:
continue
column = get_column_letter(columnCounter)
coordinates = "%s%d" % (column, rowCounter)
## column:A Coordinate:A2, for example
if column in settings['validators']:
for type in settings['validators'][column]:
name = list(type.keys())[0] # notblank, Regex, Length
if name != 'Conditional':
res = isValid(type, value, coordinates, errors)
else:
fieldB = list(type.values())[0]['fieldB']
value2 = ws[fieldB + str(rowCounter)].value
res = isValid(type, value, coordinates, errors, value2)
if not res:
break
elif settings['defaultValidator'] != None:
isValid(settings['defaultValidator'], value, coordinates, errors)
progressBar.finish()
print ("Found %d error(s)" % len(errors))
if (len(errors) > 0):
return markErrors(errors, excelFile, sheetName, tmpDir, printErrors)
return True
def isEmpty(row):
''' function to get if the row is empty or not
'''
for cell in row:
if cell.value:
return False
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'Mark validation errors in Excel sheet.')
parser.add_argument('config', metavar = 'config', help = 'Path to YAML config file')
parser.add_argument('file', metavar = 'file', help = 'Path to excel sheet file')
parser.add_argument('sheetName', metavar = 'sheetName', help = 'Excel Sheet Name')
parser.add_argument('tmpDir', metavar = 'tmpDir', help = 'Temporary directory path')
parser.add_argument('--errors', metavar = 'errors', help = 'Print errors messages in cells marked as invalid')
args = parser.parse_args()
settings = setSettings(args.config)
if settings == False:
sys.exit("Incorrect config file " + args.config)
try:
results = validate(settings, args.file, args.sheetName, args.tmpDir, args.errors)
except Exception as e:
sys.exit("Error occured: " + str(e))
# if result = true that means file is originaly true and all values are correct
# if result != True and not equal None, get result file name
# if results !=True and equal None that means File is too large , Exit
if results != True:
if results:
sys.exit("Validation errors store in: [[" + results + "]]")
else:
sys.exit("Invalid file is too big to generate annotated Excel file")
sys.exit(0)
| 33.880702
| 140
| 0.613815
|
79526c03d69ed64515442e861d8f14dbac929b0b
| 15,175
|
py
|
Python
|
google/ads/google_ads/v6/proto/resources/ad_group_ad_asset_view_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/resources/ad_group_ad_asset_view_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/resources/ad_group_ad_asset_view_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/resources/ad_group_ad_asset_view.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.common import policy_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_common_dot_policy__pb2
from google.ads.google_ads.v6.proto.enums import asset_field_type_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_asset__field__type__pb2
from google.ads.google_ads.v6.proto.enums import asset_performance_label_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_asset__performance__label__pb2
from google.ads.google_ads.v6.proto.enums import policy_approval_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_policy__approval__status__pb2
from google.ads.google_ads.v6.proto.enums import policy_review_status_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_policy__review__status__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/resources/ad_group_ad_asset_view.proto',
package='google.ads.googleads.v6.resources',
syntax='proto3',
serialized_options=b'\n%com.google.ads.googleads.v6.resourcesB\027AdGroupAdAssetViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V6.Resources\312\002!Google\\Ads\\GoogleAds\\V6\\Resources\352\002%Google::Ads::GoogleAds::V6::Resources',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n>google/ads/googleads/v6/resources/ad_group_ad_asset_view.proto\x12!google.ads.googleads.v6.resources\x1a+google/ads/googleads/v6/common/policy.proto\x1a\x34google/ads/googleads/v6/enums/asset_field_type.proto\x1a;google/ads/googleads/v6/enums/asset_performance_label.proto\x1a:google/ads/googleads/v6/enums/policy_approval_status.proto\x1a\x38google/ads/googleads/v6/enums/policy_review_status.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1cgoogle/api/annotations.proto\"\xda\x05\n\x12\x41\x64GroupAdAssetView\x12J\n\rresource_name\x18\x01 \x01(\tB3\xe0\x41\x03\xfa\x41-\n+googleads.googleapis.com/AdGroupAdAssetView\x12\x44\n\x0b\x61\x64_group_ad\x18\t \x01(\tB*\xe0\x41\x03\xfa\x41$\n\"googleads.googleapis.com/AdGroupAdH\x00\x88\x01\x01\x12:\n\x05\x61sset\x18\n \x01(\tB&\xe0\x41\x03\xfa\x41 \n\x1egoogleads.googleapis.com/AssetH\x01\x88\x01\x01\x12Y\n\nfield_type\x18\x02 \x01(\x0e\x32@.google.ads.googleads.v6.enums.AssetFieldTypeEnum.AssetFieldTypeB\x03\xe0\x41\x03\x12\x19\n\x07\x65nabled\x18\x08 \x01(\x08\x42\x03\xe0\x41\x03H\x02\x88\x01\x01\x12[\n\x0epolicy_summary\x18\x03 \x01(\x0b\x32>.google.ads.googleads.v6.resources.AdGroupAdAssetPolicySummaryB\x03\xe0\x41\x03\x12n\n\x11performance_label\x18\x04 \x01(\x0e\x32N.google.ads.googleads.v6.enums.AssetPerformanceLabelEnum.AssetPerformanceLabelB\x03\xe0\x41\x03:\x8c\x01\xea\x41\x88\x01\n+googleads.googleapis.com/AdGroupAdAssetView\x12Ycustomers/{customer_id}/adGroupAdAssetViews/{ad_group_id}~{ad_id}~{asset_id}~{field_type}B\x0e\n\x0c_ad_group_adB\x08\n\x06_assetB\n\n\x08_enabled\"\xc4\x02\n\x1b\x41\x64GroupAdAssetPolicySummary\x12S\n\x14policy_topic_entries\x18\x01 \x03(\x0b\x32\x30.google.ads.googleads.v6.common.PolicyTopicEntryB\x03\xe0\x41\x03\x12\x64\n\rreview_status\x18\x02 \x01(\x0e\x32H.google.ads.googleads.v6.enums.PolicyReviewStatusEnum.PolicyReviewStatusB\x03\xe0\x41\x03\x12j\n\x0f\x61pproval_status\x18\x03 \x01(\x0e\x32L.google.ads.googleads.v6.enums.PolicyApprovalStatusEnum.PolicyApprovalStatusB\x03\xe0\x41\x03\x42\x84\x02\n%com.google.ads.googleads.v6.resourcesB\x17\x41\x64GroupAdAssetViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v6/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V6.Resources\xca\x02!Google\\Ads\\GoogleAds\\V6\\Resources\xea\x02%Google::Ads::GoogleAds::V6::Resourcesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_common_dot_policy__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_asset__field__type__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_asset__performance__label__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_policy__approval__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_policy__review__status__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_ADGROUPADASSETVIEW = _descriptor.Descriptor(
name='AdGroupAdAssetView',
full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A-\n+googleads.googleapis.com/AdGroupAdAssetView', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='ad_group_ad', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.ad_group_ad', index=1,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A$\n\"googleads.googleapis.com/AdGroupAd', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='asset', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.asset', index=2,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003\372A \n\036googleads.googleapis.com/Asset', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='field_type', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.field_type', index=3,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='enabled', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.enabled', index=4,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='policy_summary', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.policy_summary', index=5,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='performance_label', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView.performance_label', index=6,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=b'\352A\210\001\n+googleads.googleapis.com/AdGroupAdAssetView\022Ycustomers/{customer_id}/adGroupAdAssetViews/{ad_group_id}~{ad_id}~{asset_id}~{field_type}',
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='_ad_group_ad', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView._ad_group_ad',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_asset', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView._asset',
index=1, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
_descriptor.OneofDescriptor(
name='_enabled', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetView._enabled',
index=2, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=470,
serialized_end=1200,
)
_ADGROUPADASSETPOLICYSUMMARY = _descriptor.Descriptor(
name='AdGroupAdAssetPolicySummary',
full_name='google.ads.googleads.v6.resources.AdGroupAdAssetPolicySummary',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='policy_topic_entries', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetPolicySummary.policy_topic_entries', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='review_status', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetPolicySummary.review_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='approval_status', full_name='google.ads.googleads.v6.resources.AdGroupAdAssetPolicySummary.approval_status', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\003', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1203,
serialized_end=1527,
)
_ADGROUPADASSETVIEW.fields_by_name['field_type'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_asset__field__type__pb2._ASSETFIELDTYPEENUM_ASSETFIELDTYPE
_ADGROUPADASSETVIEW.fields_by_name['policy_summary'].message_type = _ADGROUPADASSETPOLICYSUMMARY
_ADGROUPADASSETVIEW.fields_by_name['performance_label'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_asset__performance__label__pb2._ASSETPERFORMANCELABELENUM_ASSETPERFORMANCELABEL
_ADGROUPADASSETVIEW.oneofs_by_name['_ad_group_ad'].fields.append(
_ADGROUPADASSETVIEW.fields_by_name['ad_group_ad'])
_ADGROUPADASSETVIEW.fields_by_name['ad_group_ad'].containing_oneof = _ADGROUPADASSETVIEW.oneofs_by_name['_ad_group_ad']
_ADGROUPADASSETVIEW.oneofs_by_name['_asset'].fields.append(
_ADGROUPADASSETVIEW.fields_by_name['asset'])
_ADGROUPADASSETVIEW.fields_by_name['asset'].containing_oneof = _ADGROUPADASSETVIEW.oneofs_by_name['_asset']
_ADGROUPADASSETVIEW.oneofs_by_name['_enabled'].fields.append(
_ADGROUPADASSETVIEW.fields_by_name['enabled'])
_ADGROUPADASSETVIEW.fields_by_name['enabled'].containing_oneof = _ADGROUPADASSETVIEW.oneofs_by_name['_enabled']
_ADGROUPADASSETPOLICYSUMMARY.fields_by_name['policy_topic_entries'].message_type = google_dot_ads_dot_googleads_dot_v6_dot_common_dot_policy__pb2._POLICYTOPICENTRY
_ADGROUPADASSETPOLICYSUMMARY.fields_by_name['review_status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_policy__review__status__pb2._POLICYREVIEWSTATUSENUM_POLICYREVIEWSTATUS
_ADGROUPADASSETPOLICYSUMMARY.fields_by_name['approval_status'].enum_type = google_dot_ads_dot_googleads_dot_v6_dot_enums_dot_policy__approval__status__pb2._POLICYAPPROVALSTATUSENUM_POLICYAPPROVALSTATUS
DESCRIPTOR.message_types_by_name['AdGroupAdAssetView'] = _ADGROUPADASSETVIEW
DESCRIPTOR.message_types_by_name['AdGroupAdAssetPolicySummary'] = _ADGROUPADASSETPOLICYSUMMARY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
AdGroupAdAssetView = _reflection.GeneratedProtocolMessageType('AdGroupAdAssetView', (_message.Message,), {
'DESCRIPTOR' : _ADGROUPADASSETVIEW,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_ad_asset_view_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroupAdAssetView)
})
_sym_db.RegisterMessage(AdGroupAdAssetView)
AdGroupAdAssetPolicySummary = _reflection.GeneratedProtocolMessageType('AdGroupAdAssetPolicySummary', (_message.Message,), {
'DESCRIPTOR' : _ADGROUPADASSETPOLICYSUMMARY,
'__module__' : 'google.ads.googleads.v6.resources.ad_group_ad_asset_view_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.resources.AdGroupAdAssetPolicySummary)
})
_sym_db.RegisterMessage(AdGroupAdAssetPolicySummary)
DESCRIPTOR._options = None
_ADGROUPADASSETVIEW.fields_by_name['resource_name']._options = None
_ADGROUPADASSETVIEW.fields_by_name['ad_group_ad']._options = None
_ADGROUPADASSETVIEW.fields_by_name['asset']._options = None
_ADGROUPADASSETVIEW.fields_by_name['field_type']._options = None
_ADGROUPADASSETVIEW.fields_by_name['enabled']._options = None
_ADGROUPADASSETVIEW.fields_by_name['policy_summary']._options = None
_ADGROUPADASSETVIEW.fields_by_name['performance_label']._options = None
_ADGROUPADASSETVIEW._options = None
_ADGROUPADASSETPOLICYSUMMARY.fields_by_name['policy_topic_entries']._options = None
_ADGROUPADASSETPOLICYSUMMARY.fields_by_name['review_status']._options = None
_ADGROUPADASSETPOLICYSUMMARY.fields_by_name['approval_status']._options = None
# @@protoc_insertion_point(module_scope)
| 69.610092
| 2,385
| 0.816738
|
79526d509f0b5aebf6b83e2b5a23e69b459f8168
| 6,913
|
py
|
Python
|
tests/testapp/models.py
|
behconsci/django-extensions
|
716832fb9fbe78c6970930b378e0c0962beccd2e
|
[
"MIT"
] | null | null | null |
tests/testapp/models.py
|
behconsci/django-extensions
|
716832fb9fbe78c6970930b378e0c0962beccd2e
|
[
"MIT"
] | null | null | null |
tests/testapp/models.py
|
behconsci/django-extensions
|
716832fb9fbe78c6970930b378e0c0962beccd2e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import models
from django_extensions.db.fields import (
AutoSlugField,
RandomCharField,
ShortUUIDField,
)
from django_extensions.db.fields.json import JSONField
from django_extensions.db.models import ActivatorModel, TimeStampedModel
class Secret(models.Model):
name = models.CharField(blank=True, max_length=255, null=True)
text = models.TextField(blank=True, null=True)
class Meta:
app_label = 'django_extensions'
class Name(models.Model):
name = models.CharField(max_length=50)
class Meta:
app_label = 'django_extensions'
class Note(models.Model):
note = models.TextField()
class Meta:
app_label = 'django_extensions'
class Personality(models.Model):
description = models.CharField(max_length=50)
class Club(models.Model):
name = models.CharField(max_length=50)
class Person(models.Model):
name = models.ForeignKey(Name, on_delete=models.CASCADE)
age = models.PositiveIntegerField()
children = models.ManyToManyField('self')
notes = models.ManyToManyField(Note)
personality = models.OneToOneField(
Personality,
null=True,
on_delete=models.CASCADE)
clubs = models.ManyToManyField(Club, through='testapp.Membership')
class Meta:
app_label = 'django_extensions'
class Membership(models.Model):
person = models.ForeignKey(Person, on_delete=models.CASCADE)
club = models.ForeignKey(Club, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now=True)
class Post(ActivatorModel):
title = models.CharField(max_length=255)
class Meta:
app_label = 'django_extensions'
class PostWithTitleOrdering(Post):
class Meta:
proxy = True
ordering = ['title']
class SluggedTestModel(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from='title')
class Meta:
app_label = 'django_extensions'
class SluggedTestBytesLookupValueModel(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from=b'title')
class Meta:
app_label = 'django_extensions'
class ChildSluggedTestModel(SluggedTestModel):
class Meta:
app_label = 'django_extensions'
def get_readable_title(instance):
return "The title is {}".format(instance.title)
class ModelMethodSluggedTestModel(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from='get_readable_title')
class Meta:
app_label = 'django_extensions'
def get_readable_title(self):
return get_readable_title(self)
class FunctionSluggedTestModel(models.Model):
title = models.CharField(max_length=42)
slug = AutoSlugField(populate_from=get_readable_title)
class Meta:
app_label = 'django_extensions'
class FKSluggedTestModel(models.Model):
related_field = models.ForeignKey(SluggedTestModel, on_delete=models.CASCADE)
slug = AutoSlugField(populate_from="related_field__title")
class Meta:
app_label = 'django_extensions'
class FKSluggedTestModelCallable(models.Model):
related_field = models.ForeignKey(ModelMethodSluggedTestModel, on_delete=models.CASCADE)
slug = AutoSlugField(populate_from="related_field__get_readable_title")
class Meta:
app_label = 'django_extensions'
class JSONFieldTestModel(models.Model):
a = models.IntegerField()
j_field = JSONField()
class Meta:
app_label = 'django_extensions'
class ShortUUIDTestModel_field(models.Model):
a = models.IntegerField()
uuid_field = ShortUUIDField()
class Meta:
app_label = 'django_extensions'
class ShortUUIDTestModel_pk(models.Model):
uuid_field = ShortUUIDField(primary_key=True)
class Meta:
app_label = 'django_extensions'
class ShortUUIDTestAgregateModel(ShortUUIDTestModel_pk):
a = models.IntegerField()
class Meta:
app_label = 'django_extensions'
class ShortUUIDTestManyToManyModel(ShortUUIDTestModel_pk):
many = models.ManyToManyField(ShortUUIDTestModel_field)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModel(models.Model):
random_char_field = RandomCharField(length=8, unique=False)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelUnique(models.Model):
random_char_field = RandomCharField(length=8, unique=True)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelAlphaDigits(models.Model):
random_char_field = RandomCharField(length=8, unique=True)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelLowercaseAlphaDigits(models.Model):
random_char_field = RandomCharField(length=8, lowercase=True)
class Meta:
app_label = 'django_extensions'
verbose_name = 'lowercase alpha digits'
class RandomCharTestModelUppercaseAlphaDigits(models.Model):
random_char_field = RandomCharField(length=8, uppercase=True)
class Meta:
app_label = 'django_extensions'
verbose_name = 'uppercase alpha digits'
class RandomCharTestModelLowercase(models.Model):
random_char_field = RandomCharField(length=8, lowercase=True, include_digits=False)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelUppercase(models.Model):
random_char_field = RandomCharField(length=8, uppercase=True, include_digits=False)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelAlpha(models.Model):
random_char_field = RandomCharField(length=8, include_digits=False)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelDigits(models.Model):
random_char_field = RandomCharField(length=8, include_alpha=False)
class Meta:
app_label = 'django_extensions'
class RandomCharTestModelPunctuation(models.Model):
random_char_field = RandomCharField(
length=8,
include_punctuation=True,
include_digits=False,
include_alpha=False,
)
class Meta:
app_label = 'django_extensions'
class TimestampedTestModel(TimeStampedModel):
class Meta:
app_label = 'django_extensions'
class Permission(models.Model):
text = models.CharField(max_length=32)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
class UniqueTestAppModel(models.Model):
global_id = models.CharField(max_length=32, unique=True)
class SqlDiff(models.Model):
number = models.CharField(max_length=40, null=True, verbose_name='Chargennummer')
creator = models.CharField(max_length=20, null=True, blank=True)
class SqlDiffUniqueTogether(models.Model):
aaa = models.CharField(max_length=20)
bbb = models.CharField(max_length=20)
class Meta:
unique_together = ['aaa', 'bbb']
| 25.047101
| 92
| 0.729929
|
79526d97613a8658719e35ea734ad3566bf14f36
| 9,484
|
py
|
Python
|
enigma/machine.py
|
spgill/enigma
|
b9cf3aff2e3c4ef442c578cbddc39a9b53dadb4e
|
[
"MIT"
] | 1
|
2021-01-01T14:02:53.000Z
|
2021-01-01T14:02:53.000Z
|
enigma/machine.py
|
spgill/enigma
|
b9cf3aff2e3c4ef442c578cbddc39a9b53dadb4e
|
[
"MIT"
] | null | null | null |
enigma/machine.py
|
spgill/enigma
|
b9cf3aff2e3c4ef442c578cbddc39a9b53dadb4e
|
[
"MIT"
] | null | null | null |
# stdlib imports
import array
import enum
import io
import pickle
import random
# third party imports
# local module imports
import enigma.rotors as rotors
class OUTPUT(enum.Enum):
PENTAGRAPH = 1
CONTINUOUS = 2
class Machine:
def __init__(
self,
plugboardStack=[],
rotorStack=[],
reflector=None,
state=None,
stateSeed='',
outputMode=OUTPUT.PENTAGRAPH
):
"""Initialize a new Enigma Machine.
"""
# Initialize the empty variables
self.plugboard = []
self.rotors = []
self.reflector = None
self.pentacount = 0
# Unpack the state
if state:
self.stateSet(state)
# If seed is present, generate randomized state
elif stateSeed:
self.stateRandom(stateSeed)
# or unpack the args into the class
else:
self._initPlugboard(plugboardStack)
self._initRotors(rotorStack)
self._initReflector(reflector)
# Link all of the rotors and reflectors together
self._link()
# Go ahead and set a break point
self.breakSet()
# Store the mode
self.mode = outputMode
def _initPlugboard(self, stack):
'''Initialize the plugboard translation matrix'''
# Start with an 1:1 mapping
self.plugboard = array.array('b', [i for i in range(26)])
# Swap up the mappings for each desired pair
for pair in stack:
x = pair[0]
y = pair[1]
x = rotors._RotorBase._abet.index(x.upper())
y = rotors._RotorBase._abet.index(y.upper())
self.plugboard[x] = y
self.plugboard[y] = x
def _initRotors(self, stack):
'''Check the passed rotors to see if they're strings or real rotors'''
for i, entry in enumerate(stack):
rotor = None
# if it's an actual rotor instance, keep on swimming
if isinstance(entry, rotors._RotorBase):
rotor = entry
# if it's a string, turn it into a rotor
if isinstance(entry, str):
rotor = rotors.stringToRotor(entry)
# Must be invalid then
if rotor is None:
raise TypeError(
'Unknown type of rotor passed into the machine'
)
# Append it, yo
self.rotors.append(rotor)
def _initReflector(self, reflector):
'''Check to make sure a real reflector was passed in'''
# if it's an actual reflector instance, keep on swimming
if isinstance(reflector, rotors._ReflectorBase):
self.reflector = reflector
# if it's a string, turn it into a reflector
if isinstance(reflector, str):
self.reflector = rotors.stringToReflector(reflector)
# Must be invalid then
if self.reflector is None:
raise TypeError(
'Unknown type of reflector passed into the machine'
)
def _link(self):
"""Link the rotors and reflectors together in a node-like fashion"""
# Link the rotors forward
for i in range(len(self.rotors))[:-1]:
self.rotors[i].next = self.rotors[i + 1]
# Link the rotors backwards
for i in range(len(self.rotors))[1:]:
self.rotors[i].previous = self.rotors[i - 1]
# Link the reflector into the loop
self.rotors[-1].next = self.reflector
self.reflector.previous = self.rotors[-1]
def _checkByte(self, b):
'''Sanitize a single character'''
# Uppercase alpha. Good to go.
if b >= 65 and b <= 90:
return b
# Lowercase alpha. Let's capitalize it.
elif b >= 97 and b <= 122:
return b - 32
# Invalid character.
else:
return False
def stateGet(self):
'''Get a serialized state of the machine. (the 'settings')'''
return pickle.dumps((
self.plugboard,
self.rotors,
self.reflector
), -1)
def stateSet(self, state):
'''Set the state of the machine from a serialized input'''
(
self.plugboard,
self.rotors,
self.reflector
) = pickle.loads(state)
def stateRandom(self, seed):
"""Randomly generate a state from a string seed"""
# Seed the random generator
random.seed(seed)
# Generate a random plugboard
plugboardStack = []
abet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
for i in range(random.randint(0, 13)):
pair = ''
for j in range(2):
k = random.randrange(0, len(abet))
pair += abet[k]
del abet[k]
plugboardStack.append(pair)
self._initPlugboard(plugboardStack)
# Generate random rotors (there will always be three)
rotorStack = []
abet = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')
rotorNames = sorted(
[r._short for r in rotors._RotorBase.__subclasses__()]
)
rotorNames.remove('base-ref')
for i in range(3):
rotor = '{0}:{1}:{2}'.format(
random.choice(rotorNames),
random.choice(abet),
random.choice(abet)
)
rotorStack.append(rotor)
self._initRotors(rotorStack)
# Pick a random reflector
reflNames = sorted(
[r._short for r in rotors._ReflectorBase.__subclasses__()]
)
reflector = random.choice(reflNames)
self._initReflector(reflector)
def breakSet(self):
'''Save the current state to be easily returned to later'''
self._breakstate = self.stateGet()
def breakGo(self):
'''Return to the saved break state'''
assert hasattr(self, '_breakstate')
self.stateSet(self._breakstate)
def translatePin(self, pin):
"""
Translate a singular pin (as an integer) through the plugboard,
rotors, reflector, and back again.
"""
# Isolate the first (maybe only) rotor
rotor = self.rotors[0]
# Forward through the plugboard
pin = self.plugboard[pin]
# Send the pin through the rotors
pin = rotor.translate(pin)
# Backwards through the plugboard
pin = self.plugboard[pin]
# Step the rotors
rotor.step()
# Return the fruits of our labor
return pin
def translateChunk(self, chunk_in):
"""
Translate a non-empty bytes or bytearray object through the machine.
"""
# Initialize the outgoing chunk
chunk_out = bytearray()
# Text modes
for byte_in in chunk_in:
# Check the byte
byte_out = self._checkByte(byte_in)
if not byte_out:
continue
# Convert the byte to a pin
byte_out -= 65
# Run it through the machine
byte_out = self.translatePin(byte_out)
# Convert it back into a byte
byte_out += 65
# Append it to the array
chunk_out.append(byte_out)
# If pentagraph mode is on, increment and Check
if self.mode == OUTPUT.PENTAGRAPH:
self.pentacount += 1
if self.pentacount == 5:
chunk_out.append(0x20)
self.pentacount = 0
# Return the processed chunk
return chunk_out
def translateString(self, s, **kwargs):
"""Lazy method to translate a string"""
# Reset the pentagraph counter
self.pentacount = 0
return str(self.translateChunk(bytes(s), **kwargs))
def _readChunks(self, stream, chunkSize):
"""Yield discrete chunks from a stream."""
while True:
data = stream.read(chunkSize)
if not data:
break
yield data
def _streamSize(self, stream):
"""Return the size of a stream in bytes"""
stream.seek(0, 2)
size = stream.tell()
stream.seek(0)
return size
def translateStream(
self,
stream_in,
stream_out=None,
progressCallback=None,
chunkSize=128,
**kwargs
):
"""Translate a stream (file-like object) chunk by chunk."""
# Reset the pentagraph counter
self.pentacount = 0
# Figure out the size of the input stream
stream_in_size = self._streamSize(stream_in)
# If no outgoing stream is specified, make one
if not stream_out:
stream_out = io.BytesIO()
stream_out_size = 0
# Make the initial call to the progress function
if progressCallback:
progressCallback(stream_out_size, stream_in_size)
# Iterate through chunks
for chunk_in in self._readChunks(stream_in, chunkSize):
chunk_out = self.translateChunk(chunk_in, **kwargs)
stream_out.write(chunk_out)
stream_out_size += chunkSize
if progressCallback:
progressCallback(stream_out_size, stream_in_size)
# Return the outgoing stream (in case one wasn't passed in)
return stream_out
| 29.453416
| 78
| 0.560418
|
79526dc8083815714585aa4e3c0857f0f19c6166
| 28,272
|
py
|
Python
|
neutron/services/segments/plugin.py
|
acdc-cloud/neutron
|
2510836886555179f9e9e39b1fdbf94296befc51
|
[
"Apache-2.0"
] | 1
|
2018-10-19T01:48:37.000Z
|
2018-10-19T01:48:37.000Z
|
neutron/services/segments/plugin.py
|
acdc-cloud/neutron
|
2510836886555179f9e9e39b1fdbf94296befc51
|
[
"Apache-2.0"
] | null | null | null |
neutron/services/segments/plugin.py
|
acdc-cloud/neutron
|
2510836886555179f9e9e39b1fdbf94296befc51
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Hewlett Packard Enterprise Development, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from keystoneauth1 import loading as ks_loading
import netaddr
from neutron_lib.api.definitions import ip_allocation as ipalloc_apidef
from neutron_lib.api.definitions import l2_adjacency as l2adj_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.api.definitions import port as port_def
from neutron_lib.api.definitions import subnet as subnet_def
from neutron_lib.api import validators
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib.exceptions import placement as placement_exc
from neutron_lib.plugins import directory
from novaclient import client as nova_client
from novaclient import exceptions as nova_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from neutron._i18n import _
from neutron.db import _resource_extend as resource_extend
from neutron.extensions import segment
from neutron.notifiers import batch_notifier
from neutron.objects import network as net_obj
from neutron.objects import subnet as subnet_obj
from neutron.services.segments import db
from neutron.services.segments import exceptions
from neutron.services.segments import placement_client
LOG = log.getLogger(__name__)
NOVA_API_VERSION = '2.41'
IPV4_RESOURCE_CLASS = 'IPV4_ADDRESS'
SEGMENT_NAME_STUB = 'Neutron segment id %s'
MAX_INVENTORY_UPDATE_RETRIES = 10
@resource_extend.has_resource_extenders
@registry.has_registry_receivers
class Plugin(db.SegmentDbMixin, segment.SegmentPluginBase):
_instance = None
supported_extension_aliases = ["segment", "ip_allocation",
l2adj_apidef.ALIAS,
"standard-attr-segment",
"subnet-segmentid-writable",
'segments-peer-subnet-host-routes']
__native_pagination_support = True
__native_sorting_support = True
__filter_validation_support = True
def __init__(self):
self.nova_updater = NovaSegmentNotifier()
self.segment_host_routes = SegmentHostRoutes()
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_network_dict_binding(network_res, network_db):
if not directory.get_plugin('segments'):
return
# TODO(carl_baldwin) Make this work with service subnets when
# it's a thing.
is_adjacent = (not network_db.subnets or
not network_db.subnets[0].segment_id)
network_res[l2adj_apidef.L2_ADJACENCY] = is_adjacent
@staticmethod
@resource_extend.extends([subnet_def.COLLECTION_NAME])
def _extend_subnet_dict_binding(subnet_res, subnet_db):
subnet_res['segment_id'] = subnet_db.get('segment_id')
@staticmethod
@resource_extend.extends([port_def.COLLECTION_NAME])
def _extend_port_dict_binding(port_res, port_db):
if not directory.get_plugin('segments'):
return
value = ipalloc_apidef.IP_ALLOCATION_IMMEDIATE
if port_db.get('ip_allocation'):
value = port_db.get('ip_allocation')
port_res[ipalloc_apidef.IP_ALLOCATION] = value
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
@registry.receives(resources.SEGMENT, [events.BEFORE_DELETE])
def _prevent_segment_delete_with_subnet_associated(
self, resource, event, trigger, context, segment,
for_net_delete=False):
"""Raise exception if there are any subnets associated with segment."""
if for_net_delete:
# don't check if this is a part of a network delete operation
return
segment_id = segment['id']
subnets = subnet_obj.Subnet.get_objects(context,
segment_id=segment_id)
subnet_ids = [s.id for s in subnets]
if subnet_ids:
reason = _("The segment is still associated with subnet(s) "
"%s") % ", ".join(subnet_ids)
raise exceptions.SegmentInUse(segment_id=segment_id,
reason=reason)
class Event(object):
def __init__(self, method, segment_ids, total=None, reserved=None,
segment_host_mappings=None, host=None):
self.method = method
if isinstance(segment_ids, set):
self.segment_ids = segment_ids
else:
self.segment_id = segment_ids
self.total = total
self.reserved = reserved
self.segment_host_mappings = segment_host_mappings
self.host = host
@registry.has_registry_receivers
class NovaSegmentNotifier(object):
def __init__(self):
self.p_client, self.n_client = self._get_clients()
self.batch_notifier = batch_notifier.BatchNotifier(
cfg.CONF.send_events_interval, self._send_notifications)
def _get_clients(self):
p_client = placement_client.PlacementAPIClient()
n_auth = ks_loading.load_auth_from_conf_options(cfg.CONF, 'nova')
n_session = ks_loading.load_session_from_conf_options(
cfg.CONF,
'nova',
auth=n_auth)
extensions = [
ext for ext in nova_client.discover_extensions(NOVA_API_VERSION)
if ext.name == "server_external_events"]
n_client = nova_client.Client(
NOVA_API_VERSION,
session=n_session,
region_name=cfg.CONF.nova.region_name,
endpoint_type=cfg.CONF.nova.endpoint_type,
extensions=extensions)
return p_client, n_client
def _send_notifications(self, batched_events):
for event in batched_events:
try:
event.method(event)
except placement_exc.PlacementEndpointNotFound:
LOG.debug('Placement API was not found when trying to '
'update routed networks IPv4 inventories')
return
def _notify_subnet(self, context, subnet, segment_id):
total, reserved = self._calculate_inventory_total_and_reserved(subnet)
if total:
segment_host_mappings = net_obj.SegmentHostMapping.get_objects(
context, segment_id=segment_id)
self.batch_notifier.queue_event(Event(
self._create_or_update_nova_inventory, segment_id, total=total,
reserved=reserved,
segment_host_mappings=segment_host_mappings))
@registry.receives(resources.SUBNET, [events.AFTER_CREATE])
def _notify_subnet_created(self, resource, event, trigger, context,
subnet, **kwargs):
segment_id = subnet.get('segment_id')
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
return
self._notify_subnet(context, subnet, segment_id)
def _create_or_update_nova_inventory(self, event):
try:
self._update_nova_inventory(event)
except placement_exc.PlacementResourceProviderNotFound:
self._create_nova_inventory(event.segment_id, event.total,
event.reserved,
event.segment_host_mappings)
def _update_nova_inventory(self, event):
for count in range(MAX_INVENTORY_UPDATE_RETRIES):
ipv4_inventory = self.p_client.get_inventory(event.segment_id,
IPV4_RESOURCE_CLASS)
if event.total:
ipv4_inventory['total'] += event.total
if event.reserved:
ipv4_inventory['reserved'] += event.reserved
try:
self.p_client.update_inventory(event.segment_id,
ipv4_inventory,
IPV4_RESOURCE_CLASS)
return
except placement_exc.PlacementInventoryUpdateConflict:
LOG.debug('Re-trying to update Nova IPv4 inventory for '
'routed network segment: %s', event.segment_id)
LOG.error('Failed to update Nova IPv4 inventory for routed '
'network segment: %s', event.segment_id)
def _get_nova_aggregate_uuid(self, aggregate):
try:
return aggregate.uuid
except AttributeError:
with excutils.save_and_reraise_exception():
LOG.exception("uuid was not returned as part of the aggregate "
"object which indicates that the Nova API "
"backend does not support microversions. Ensure "
"that the compute endpoint in the service "
"catalog points to the v2.1 API.")
def _create_nova_inventory(self, segment_id, total, reserved,
segment_host_mappings):
name = SEGMENT_NAME_STUB % segment_id
resource_provider = {'name': name, 'uuid': segment_id}
self.p_client.create_resource_provider(resource_provider)
aggregate = self.n_client.aggregates.create(name, None)
aggregate_uuid = self._get_nova_aggregate_uuid(aggregate)
self.p_client.associate_aggregates(segment_id, [aggregate_uuid])
for mapping in segment_host_mappings:
self.n_client.aggregates.add_host(aggregate.id, mapping.host)
ipv4_inventory = {'total': total, 'reserved': reserved,
'min_unit': 1, 'max_unit': 1, 'step_size': 1,
'allocation_ratio': 1.0,
'resource_class': IPV4_RESOURCE_CLASS}
self.p_client.create_inventory(segment_id, ipv4_inventory)
def _calculate_inventory_total_and_reserved(self, subnet):
total = 0
reserved = 0
allocation_pools = subnet.get('allocation_pools') or []
for pool in allocation_pools:
total += int(netaddr.IPAddress(pool['end']) -
netaddr.IPAddress(pool['start'])) + 1
if total:
if subnet['gateway_ip']:
total += 1
reserved += 1
if subnet['enable_dhcp']:
reserved += 1
return total, reserved
@registry.receives(resources.SUBNET, [events.AFTER_UPDATE])
def _notify_subnet_updated(self, resource, event, trigger, context,
subnet, original_subnet, **kwargs):
segment_id = subnet.get('segment_id')
original_segment_id = original_subnet.get('segment_id')
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
return
if original_segment_id != segment_id:
# Migration to routed network, treat as create
self._notify_subnet(context, subnet, segment_id)
return
filters = {'segment_id': [segment_id],
'ip_version': [constants.IP_VERSION_4]}
if not subnet['allocation_pools']:
plugin = directory.get_plugin()
alloc_pools = [s['allocation_pools'] for s in
plugin.get_subnets(context, filters=filters)]
if not any(alloc_pools):
self.batch_notifier.queue_event(Event(
self._delete_nova_inventory, segment_id))
return
original_total, original_reserved = (
self._calculate_inventory_total_and_reserved(original_subnet))
updated_total, updated_reserved = (
self._calculate_inventory_total_and_reserved(subnet))
total = updated_total - original_total
reserved = updated_reserved - original_reserved
if total or reserved:
segment_host_mappings = None
if not original_subnet['allocation_pools']:
segment_host_mappings = net_obj.SegmentHostMapping.get_objects(
context, segment_id=segment_id)
self.batch_notifier.queue_event(Event(
self._create_or_update_nova_inventory, segment_id, total=total,
reserved=reserved,
segment_host_mappings=segment_host_mappings))
@registry.receives(resources.SUBNET, [events.AFTER_DELETE])
def _notify_subnet_deleted(self, resource, event, trigger, context,
subnet, **kwargs):
segment_id = subnet.get('segment_id')
if not segment_id or subnet['ip_version'] != constants.IP_VERSION_4:
return
total, reserved = self._calculate_inventory_total_and_reserved(subnet)
if total:
filters = {'segment_id': [segment_id], 'ip_version': [4]}
plugin = directory.get_plugin()
if plugin.get_subnets_count(context, filters=filters) > 0:
self.batch_notifier.queue_event(Event(
self._update_nova_inventory, segment_id, total=-total,
reserved=-reserved))
else:
self.batch_notifier.queue_event(Event(
self._delete_nova_inventory, segment_id))
def _get_aggregate_id(self, segment_id):
aggregate_uuid = self.p_client.list_aggregates(
segment_id)['aggregates'][0]
aggregates = self.n_client.aggregates.list()
for aggregate in aggregates:
nc_aggregate_uuid = self._get_nova_aggregate_uuid(aggregate)
if nc_aggregate_uuid == aggregate_uuid:
return aggregate.id
def _delete_nova_inventory(self, event):
aggregate_id = self._get_aggregate_id(event.segment_id)
aggregate = self.n_client.aggregates.get_details(
aggregate_id)
for host in aggregate.hosts:
self.n_client.aggregates.remove_host(aggregate_id,
host)
self.n_client.aggregates.delete(aggregate_id)
self.p_client.delete_resource_provider(event.segment_id)
@registry.receives(resources.SEGMENT_HOST_MAPPING, [events.AFTER_CREATE])
def _notify_host_addition_to_aggregate(self, resource, event, trigger,
context, host, current_segment_ids,
**kwargs):
subnets = subnet_obj.Subnet.get_objects(context,
segment_id=current_segment_ids)
segment_ids = {s.segment_id for s in subnets}
self.batch_notifier.queue_event(Event(self._add_host_to_aggregate,
segment_ids, host=host))
def _add_host_to_aggregate(self, event):
for segment_id in event.segment_ids:
try:
aggregate_id = self._get_aggregate_id(segment_id)
except placement_exc.PlacementAggregateNotFound:
LOG.info('When adding host %(host)s, aggregate not found '
'for routed network segment %(segment_id)s',
{'host': event.host, 'segment_id': segment_id})
continue
try:
self.n_client.aggregates.add_host(aggregate_id, event.host)
except nova_exc.Conflict:
LOG.info('Host %(host)s already exists in aggregate for '
'routed network segment %(segment_id)s',
{'host': event.host, 'segment_id': segment_id})
@registry.receives(resources.PORT,
[events.AFTER_CREATE, events.AFTER_DELETE])
def _notify_port_created_or_deleted(self, resource, event, trigger,
context, port, **kwargs):
if not self._does_port_require_nova_inventory_update(port):
return
ipv4_subnets_number, segment_id = (
self._get_ipv4_subnets_number_and_segment_id(port, context))
if segment_id:
if event == events.AFTER_DELETE:
ipv4_subnets_number = -ipv4_subnets_number
self.batch_notifier.queue_event(Event(self._update_nova_inventory,
segment_id, reserved=ipv4_subnets_number))
@registry.receives(resources.PORT, [events.AFTER_UPDATE])
def _notify_port_updated(self, resource, event, trigger, context,
**kwargs):
port = kwargs.get('port')
original_port = kwargs.get('original_port')
does_original_port_require_nova_inventory_update = (
self._does_port_require_nova_inventory_update(original_port))
does_port_require_nova_inventory_update = (
self._does_port_require_nova_inventory_update(port))
if not (does_original_port_require_nova_inventory_update or
does_port_require_nova_inventory_update):
return
original_port_ipv4_subnets_number, segment_id = (
self._get_ipv4_subnets_number_and_segment_id(original_port,
context))
if not segment_id:
return
port_ipv4_subnets_number = len(self._get_ipv4_subnet_ids(port))
if not does_original_port_require_nova_inventory_update:
original_port_ipv4_subnets_number = 0
if not does_port_require_nova_inventory_update:
port_ipv4_subnets_number = 0
update = port_ipv4_subnets_number - original_port_ipv4_subnets_number
if update:
self.batch_notifier.queue_event(Event(self._update_nova_inventory,
segment_id, reserved=update))
def _get_ipv4_subnets_number_and_segment_id(self, port, context):
ipv4_subnet_ids = self._get_ipv4_subnet_ids(port)
if not ipv4_subnet_ids:
return 0, None
subnet = subnet_obj.Subnet.get_object(context, id=ipv4_subnet_ids[0])
if subnet and subnet.segment_id:
return len(ipv4_subnet_ids), subnet.segment_id
return 0, None
def _does_port_require_nova_inventory_update(self, port):
device_owner = port.get('device_owner')
if (device_owner.startswith(constants.DEVICE_OWNER_COMPUTE_PREFIX) or
device_owner == constants.DEVICE_OWNER_DHCP):
return False
return True
def _get_ipv4_subnet_ids(self, port):
ipv4_subnet_ids = []
for ip in port.get('fixed_ips', []):
if netaddr.IPAddress(
ip['ip_address']).version == constants.IP_VERSION_4:
ipv4_subnet_ids.append(ip['subnet_id'])
return ipv4_subnet_ids
@registry.has_registry_receivers
class SegmentHostRoutes(object):
def _get_subnets(self, context, network_id):
return subnet_obj.Subnet.get_objects(context, network_id=network_id)
def _calculate_routed_network_host_routes(self, context, ip_version,
network_id=None, subnet_id=None,
segment_id=None,
host_routes=None,
gateway_ip=None,
old_gateway_ip=None,
deleted_cidr=None):
"""Calculate host routes for routed network.
This method is used to calculate the host routes for routed networks
both when handling the user create or update request and when making
updates to subnets on the network in response to events: AFTER_CREATE
and AFTER_DELETE.
:param ip_version: IP version (4/6).
:param network_id: Network ID.
:param subnet_id: UUID of the subnet.
:param segment_id: Segement ID associated with the subnet.
:param host_routes: Current host_routes of the subnet.
:param gateway_ip: The subnets gateway IP address.
:param old_gateway_ip: The old gateway IP address of the subnet when it
is changed on update.
:param deleted_cidr: The cidr of a deleted subnet.
:returns Host routes with routes for the other subnet's on the routed
network appended unless a route to the destination already
exists.
"""
if host_routes is None:
host_routes = []
dest_ip_nets = [netaddr.IPNetwork(route['destination']) for
route in host_routes]
# Drop routes to the deleted cidr, when the subnet was deleted.
if deleted_cidr:
delete_route = {'destination': deleted_cidr, 'nexthop': gateway_ip}
if delete_route in host_routes:
host_routes.remove(delete_route)
for subnet in self._get_subnets(context, network_id):
if (subnet.id == subnet_id or subnet.segment_id == segment_id or
subnet.ip_version != ip_version):
continue
subnet_ip_net = netaddr.IPNetwork(subnet.cidr)
if old_gateway_ip:
old_route = {'destination': str(subnet.cidr),
'nexthop': old_gateway_ip}
if old_route in host_routes:
host_routes.remove(old_route)
dest_ip_nets.remove(subnet_ip_net)
if gateway_ip:
# Use netaddr here in case the user provided a summary route
# (supernet route). I.e subnet.cidr = 10.0.1.0/24 and
# the user provided a host route for 10.0.0.0/16. We don't
# need to append a route in this case.
if not any(subnet_ip_net in ip_net for ip_net in dest_ip_nets):
host_routes.append({'destination': subnet.cidr,
'nexthop': gateway_ip})
return host_routes
def _host_routes_need_update(self, host_routes, calc_host_routes):
"""Compare host routes and calculated host routes
:param host_routes: Current host routes
:param calc_host_routes: Host routes + calculated host routes for
routed network
:returns True if host_routes and calc_host_routes are not equal
"""
return ((set((route['destination'],
route['nexthop']) for route in host_routes) !=
set((route['destination'],
route['nexthop']) for route in calc_host_routes)))
def _update_routed_network_host_routes(self, context, network_id,
deleted_cidr=None):
"""Update host routes on subnets on a routed network after event
Host routes on the subnets on a routed network may need updates after
any CREATE or DELETE event.
:param network_id: Network ID
:param deleted_cidr: The cidr of a deleted subnet.
"""
for subnet in self._get_subnets(context, network_id):
host_routes = [{'destination': str(route.destination),
'nexthop': route.nexthop}
for route in subnet.host_routes]
calc_host_routes = self._calculate_routed_network_host_routes(
context=context,
ip_version=subnet.ip_version,
network_id=subnet.network_id,
subnet_id=subnet.id,
segment_id=subnet.segment_id,
host_routes=copy.deepcopy(host_routes),
gateway_ip=subnet.gateway_ip,
deleted_cidr=deleted_cidr)
if self._host_routes_need_update(host_routes, calc_host_routes):
LOG.debug(
"Updating host routes for subnet %s on routed network %s",
(subnet.id, subnet.network_id))
plugin = directory.get_plugin()
plugin.update_subnet(context, subnet.id,
{'subnet': {
'host_routes': calc_host_routes}})
@registry.receives(resources.SUBNET, [events.BEFORE_CREATE])
def host_routes_before_create(self, resource, event, trigger, context,
subnet, **kwargs):
segment_id = subnet.get('segment_id')
gateway_ip = subnet.get('gateway_ip')
if validators.is_attr_set(subnet.get('host_routes')):
host_routes = subnet.get('host_routes')
else:
host_routes = []
if segment_id is not None and validators.is_attr_set(gateway_ip):
calc_host_routes = self._calculate_routed_network_host_routes(
context=context,
ip_version=netaddr.IPNetwork(subnet['cidr']).version,
network_id=subnet['network_id'],
segment_id=subnet['segment_id'],
host_routes=copy.deepcopy(host_routes),
gateway_ip=gateway_ip)
if (not host_routes or
self._host_routes_need_update(host_routes,
calc_host_routes)):
subnet['host_routes'] = calc_host_routes
@registry.receives(resources.SUBNET, [events.BEFORE_UPDATE])
def host_routes_before_update(self, resource, event, trigger, **kwargs):
context = kwargs['context']
subnet, original_subnet = kwargs['request'], kwargs['original_subnet']
segment_id = subnet.get('segment_id', original_subnet['segment_id'])
gateway_ip = subnet.get('gateway_ip', original_subnet['gateway_ip'])
host_routes = subnet.get('host_routes', original_subnet['host_routes'])
if (segment_id and (host_routes != original_subnet['host_routes'] or
gateway_ip != original_subnet['gateway_ip'])):
calc_host_routes = self._calculate_routed_network_host_routes(
context=context,
ip_version=netaddr.IPNetwork(original_subnet['cidr']).version,
network_id=original_subnet['network_id'],
segment_id=segment_id,
host_routes=copy.deepcopy(host_routes),
gateway_ip=gateway_ip,
old_gateway_ip=original_subnet['gateway_ip'] if (
gateway_ip != original_subnet['gateway_ip']) else None)
if self._host_routes_need_update(host_routes, calc_host_routes):
subnet['host_routes'] = calc_host_routes
@registry.receives(resources.SUBNET, [events.AFTER_CREATE])
def host_routes_after_create(self, resource, event, trigger, **kwargs):
context = kwargs['context']
subnet = kwargs['subnet']
# If there are other subnets on the network and subnet has segment_id
# ensure host routes for all subnets are updated.
if (len(self._get_subnets(context, subnet['network_id'])) > 1 and
subnet.get('segment_id')):
self._update_routed_network_host_routes(context,
subnet['network_id'])
@registry.receives(resources.SUBNET, [events.AFTER_DELETE])
def host_routes_after_delete(self, resource, event, trigger, context,
subnet, **kwargs):
# If this is a routed network, remove any routes to this subnet on
# this networks remaining subnets.
if subnet.get('segment_id'):
self._update_routed_network_host_routes(
context, subnet['network_id'], deleted_cidr=subnet['cidr'])
| 46.271686
| 79
| 0.624186
|
79526e3a2cbb8312099bad294c645cd36ef0b1cb
| 2,819
|
py
|
Python
|
build/lib/anipick/char.py
|
pengode-handal/anipick
|
f711620d9c12581f13f951204151b60eac4e1736
|
[
"MIT"
] | 1
|
2022-03-02T07:59:15.000Z
|
2022-03-02T07:59:15.000Z
|
build/lib/anipick/char.py
|
pengode-handal/anipick
|
f711620d9c12581f13f951204151b60eac4e1736
|
[
"MIT"
] | null | null | null |
build/lib/anipick/char.py
|
pengode-handal/anipick
|
f711620d9c12581f13f951204151b60eac4e1736
|
[
"MIT"
] | null | null | null |
import requests
from googlesearch import search
from bs4 import BeautifulSoup
from .error_handling import SearchNotWork, NoResultFound
class Charapedia:
def __init__(self, char: str):
try:
mal_char_id = search('site:myanimelist.net {} character info inurl:/character/'.format(char), num_results=0)
except SearchNotWork:
raise SearchNotWork('Search Library Not Work')
try:
mal_char_id = ''.join(mal_char_id).split('/')[4]
except:
raise NoResultFound('Character Not Found')
self.mal_char_id = mal_char_id
base_api = 'https://api.jikan.moe/v3/character/{}/'.format(self.mal_char_id)
r = requests.get(base_api)
result = r.json()
self.result = result
#Caharcter Name
try:
name = result['name']
name = f'{name} ({result["name_kanji"]})'
except KeyError:
raise NoResultFound(f'{char} is not Anime characters or u typo')
self.name = name or None
#url name
url = result['url']
self.url = url
#image url
image_url = result['image_url']
self.image_url = image_url
#about
about = result['about']
if 'No biography written.' in about:
self.age = about
about = ''.join(about)
self.about = about
self.anu = self.about.split('\n')
#age
try:
age = self.anu[0].split('Age: ')[1]
except:
age = 'Age biography not written.'
self.age = age
#birthday
try:
try:
birthday = self.anu[1].split('Birthday: ')[1]
except:
birthday = self.anu[0].split('Birthday: ')[1]
except:
birthday = 'Birthday biography not written'
self.birthday = birthday
#height
try:
try:
height = self.anu[1].split('Height: ')[1]
except:
try:
height = self.anu[2].split('Height: ')[1]
except:
height = self.anu[3].split('Height:')[1]
except:
height = 'Height biography not written'
self.height = height
#weight
try:
try:
weight = self.anu[1].split('Weight: ')[1]
except:
try:
weight = self.anu[2].split('Weight: ')[1]
except:
weight = self.anu[3].split('Weight:')[1]
except:
weight = 'weight biography not written'
self.weight = weight
#nickname
nickname = result['nicknames']
nickname = ', '.join(nickname)
if ',' not in nickname:
nickname = 'None'
self.nickname = nickname
#anime reference
@property
def anime(self) -> list:
anime = []
for nama in self.result['animeography']:
anime.append(nama['name'])
anime = ', '.join(anime)
return anime or None
#manga reference
@property
def manga(self) -> list:
manga = []
for nama in self.result['mangaography']:
manga.append(nama['name'])
manga = ', '.join(manga)
return manga or None
| 27.105769
| 114
| 0.60447
|
79526ed527e874489a5258e088040fb8d2ea6fc8
| 5,485
|
py
|
Python
|
tests/python/contrib/test_ethosu/test_replace_pooling.py
|
Xuyuanjia2014/tvm
|
892f8305e77ad506660b851f9ce4c81be0f95d9d
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/python/contrib/test_ethosu/test_replace_pooling.py
|
Xuyuanjia2014/tvm
|
892f8305e77ad506660b851f9ce4c81be0f95d9d
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/python/contrib/test_ethosu/test_replace_pooling.py
|
Xuyuanjia2014/tvm
|
892f8305e77ad506660b851f9ce4c81be0f95d9d
|
[
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
pytest.importorskip("ethosu.vela")
import tvm
from tvm import relay
from tvm.relay.testing import run_opt_pass
from tvm.relay.backend.contrib.ethosu.tir import spec
from tvm.relay.backend.contrib.ethosu.tir.compiler import lower_to_tir
from .infra import make_ethosu_pooling, get_pooling_args
@pytest.mark.parametrize(
"ifm_shape, ofm_channels, ifm_layout, ofm_layout, rounding_mode",
[
((1, 5, 9, 3), 3, "NHWC", "NHWC", "TFL"),
((1, 8, 3, 9, 16), 40, "NHCWB16", "NHCWB16", "NATURAL"),
((1, 8, 3, 9, 16), 40, "NHCWB16", "NHWC", "TRUNCATE"),
((1, 8, 9, 40), 40, "NHWC", "NHCWB16", "TFL"),
],
)
@pytest.mark.parametrize("pooling_type", ["AVG", "MAX"])
@pytest.mark.parametrize("activation", ["NONE", "CLIP"])
def test_pooling_single(
ifm_shape,
ofm_channels,
ifm_layout,
ofm_layout,
pooling_type,
activation,
rounding_mode,
):
pool_shape = (3, 2)
strides = (1, 2)
padding = (1, 1, 1, 0)
ifm = relay.var("ifm", shape=ifm_shape, dtype="int8")
pooling = make_ethosu_pooling(
ifm,
pooling_type,
pool_shape,
ofm_channels,
strides,
padding,
activation,
ifm_layout,
ofm_layout,
rounding_mode,
)
func = relay.Function(relay.analysis.free_vars(pooling), pooling)
func = run_opt_pass(func, relay.transform.InferType())
mod, _ = lower_to_tir(func)
data = []
def _visit(stmt):
if isinstance(stmt, tvm.tir.Call):
data.append(get_pooling_args(stmt))
tvm.tir.stmt_functor.post_order_visit(mod["main"].body, _visit)
if ifm_layout == "NHWC":
ifm_stride_c = 1
ifm_stride_w = ifm_shape[3]
ifm_stride_h = ifm_shape[2] * ifm_shape[3]
ofm_height = (ifm_shape[1] - pool_shape[0] + padding[0] + padding[0]) // strides[0] + 1
ofm_width = (ifm_shape[2] - pool_shape[1] + padding[1] + padding[1]) // strides[1] + 1
else:
ifm_stride_w = 16
ifm_stride_c = 16 * ifm_shape[3]
ifm_stride_h = 16 * ifm_shape[2] * ifm_shape[3]
ofm_height = (ifm_shape[1] - pool_shape[0] + padding[0] + padding[0]) // strides[0] + 1
ofm_width = (ifm_shape[3] - pool_shape[1] + padding[1] + padding[1]) // strides[1] + 1
if ofm_layout == "NHWC":
ofm_stride_c = 1
ofm_stride_w = ofm_channels if ofm_width > 1 else 1
ofm_stride_h = ofm_channels * ofm_width if ofm_height > 1 else 1
else:
ofm_stride_w = 16
ofm_stride_c = 16 * ofm_width
ofm_stride_h = 16 * ofm_width * ((ofm_channels - 1) // 16 + 1)
serial_pooling = spec.SerialPooling(
ifm=spec.SerialFeatureMap(
data_type="int8",
height=ifm_shape[1],
width=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
channels=ofm_channels,
tile_height_0=ifm_shape[1],
tile_height_1=0,
tile_width_0=ifm_shape[2] if ifm_layout == "NHWC" else ifm_shape[3],
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ifm_layout,
stride_h=ifm_stride_h,
stride_w=ifm_stride_w,
stride_c=ifm_stride_c,
),
ofm=spec.SerialFeatureMap(
data_type="int8",
height=ofm_height,
width=ofm_width,
channels=ofm_channels,
tile_height_0=ofm_height,
tile_height_1=0,
tile_width_0=ofm_width,
tile_address_0=0,
tile_address_1=0,
tile_address_2=0,
tile_address_3=0,
scale=1.0,
zero_point=0,
layout=ofm_layout,
stride_h=ofm_stride_h,
stride_w=ofm_stride_w,
stride_c=ofm_stride_c,
),
pooling_type=pooling_type,
pool_shape=spec.SerialKernel(
width=pool_shape[1],
height=pool_shape[0],
stride_w=strides[1],
stride_h=strides[0],
dilation_w=1,
dilation_h=1,
),
padding=spec.SerialPadding(
top=padding[0], left=padding[1], bottom=padding[2], right=padding[3]
),
activation=spec.SerialActivation(
op=activation,
clip_min=10 if activation == "CLIP" else 0,
clip_max=100 if activation == "CLIP" else 0,
),
rounding_mode=rounding_mode,
upscale="NONE",
)
assert data[0] == ["ethosu_pooling"] + list(serial_pooling)
if __name__ == "__main__":
pytest.main([__file__])
| 33.858025
| 95
| 0.60948
|
79526f539b86da455bb0ee580c6958bcb341cb8a
| 1,105
|
py
|
Python
|
setup.py
|
srimani-programmer/password-checker
|
8a069e7806d53ee2d274e3d2362253215d49446e
|
[
"MIT"
] | null | null | null |
setup.py
|
srimani-programmer/password-checker
|
8a069e7806d53ee2d274e3d2362253215d49446e
|
[
"MIT"
] | null | null | null |
setup.py
|
srimani-programmer/password-checker
|
8a069e7806d53ee2d274e3d2362253215d49446e
|
[
"MIT"
] | null | null | null |
from setuptools import setup
def readme():
with open('README.md') as f:
README = f.read()
return README
setup(
name="password-checker",
version="1.0.0",
description="A Python package to check vulnerability and strength of a password.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/srimani-programmer/password-checker",
author="Sri Manikanta Palakollu",
author_email="srimani.crypter@gmail.com",
license="MIT",
packages=setuptools.find_packages(),
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.5",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=["password_checker"],
include_package_data=True,
)
| 33.484848
| 86
| 0.644344
|
79526fb74a18df42297d4ef1cdf728ec17327d74
| 10,054
|
py
|
Python
|
SOURCES/ddstat/ddstat.py
|
dingqiangliu/vertica-minio
|
0defc75084743f872b659f1dd35a989a57939de3
|
[
"Apache-2.0"
] | null | null | null |
SOURCES/ddstat/ddstat.py
|
dingqiangliu/vertica-minio
|
0defc75084743f872b659f1dd35a989a57939de3
|
[
"Apache-2.0"
] | null | null | null |
SOURCES/ddstat/ddstat.py
|
dingqiangliu/vertica-minio
|
0defc75084743f872b659f1dd35a989a57939de3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
#encoding: utf-8
#
# Copyright (c) 2017 - 2020
# Description: cluster monitoring
# Author: DingQiang Liu
import os, sys
from optparse import OptionParser
import logging
import time
from cStringIO import StringIO
import re
from getpass import getuser
from cluster import getCluster
import util.reflection as reflection
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
import dstat as dstatmodule
import dstatproxy
sys.stdout = old_stdout
srcclusterdstatmodule = reflection.overridemodule(dstatmodule, dstatproxy)
logger = logging.getLogger("clustermon")
ansi = {
'reset': '\033[0;0m',
'bold': '\033[1m',
'reverse': '\033[2m',
'underline': '\033[4m',
'clear': '\033[2J',
'clearline': '\033[2K',
'save': '\033[s',
'restore': '\033[u',
'save_all': '\0337',
'restore_all': '\0338',
'linewrap': '\033[7h',
'nolinewrap': '\033[7l',
'up': '\033[1A',
'down': '\033[1B',
'right': '\033[1C',
'left': '\033[1D',
'default': '\033[0;0m',
}
def remotecall(src, args, nodeNamesPattern) :
""" remotely execute script on Vertica cluster.
Arguments:
- src: string, python scriptlet.
- args: dictionary of arguments for script.
- nodeNamesPattern: regular expression pattern for select Vertica nodes.
Returns: list of result from each nodes of Vertica cluster.
"""
ret = {}
hosts = args.pop('hosts', ['localhost'])
user = args.pop('user', getuser())
vc = getCluster(hosts, user)
mch = vc.executors.remote_exec(src)
mch.send_each(args)
q = mch.make_receive_queue(endmarker=None)
terminated = 0
while 1:
channel, result = q.get()
if result is None :
terminated += 1
if terminated == len(mch):
break
continue
else:
nodeName = channel.gateway.id
ret.update({nodeName: result})
return [ret[k] for k in [key for key in sorted(ret) if nodeNamesPattern.match(key) ]]
def initmonitor(hosts, user, nodeNamesPattern, output2csv, args) :
""" remotely execute script on Vertica cluster.
Arguments:
- hosts: list of host name or IP.
- args: list of arguments for dstat
- nodeNamesPattern: regular expression pattern for select Vertica nodes.
Returns: header list of ansi lines and csv lines
"""
headers = ['', '']
src = srcclusterdstatmodule + """
def myExit(code) :
raise 'exitcode: ' + str(code)
if __name__ == '__channelexec__' or __name__ == '__main__' :
global totlist, nodeName
nodeName = channel.gateway.id.split('-')[0] # remove the tailing '-slave'
remoteargs = channel.receive()
args = remoteargs["args"]
nodeName = nodeName.rjust(int(remoteargs["maxNodeNameLength"]))
output2csv = remoteargs["output2csv"]
old_stdout = sys.stdout
old_exit = sys.exit
sys.exit = myExit
from cStringIO import StringIO
sys.stdout = mystdout = StringIO()
try :
dstatmodule.initterm()
dstatmodule.op = dstatmodule.Options(args)
dstatmodule.theme = dstatmodule.set_theme()
dstatmodule.main()
dstatmodule.perform(0)
except :
channel.send([mystdout.getvalue(), ''])
else :
channel.send([header(totlist, totlist), csvheader(totlist) if output2csv else ''])
sys.stdout = old_stdout
sys.exit = old_exit
"""
for lines in remotecall(src, {'args': args, 'maxNodeNameLength': max([len(n) for n in hosts]), 'output2csv': output2csv, 'hosts': hosts, 'user': user}, nodeNamesPattern) :
#only get headers from 1st node
headers = lines
break
return headers
def monitoring(update, nodeNamesPattern, output2csv) :
""" remotely execute script on Vertica cluster.
Arguments:
- update: sequence number
- nodeNamesPattern: regular expression pattern for select Vertica nodes.
Returns: list of ansi monitoring lines and csv lines
"""
src = """
if __name__ == '__channelexec__' or __name__ == '__main__' :
global op, outputfile
remoteargs = channel.receive()
update = remoteargs["update"]
output2csv = remoteargs["output2csv"]
old_stdout = sys.stdout
from cStringIO import StringIO
sys.stdout = mystdout = StringIO()
old_exit = sys.exit
sys.exit = myExit
# DEBUG
if output2csv :
op.output = '/tmp/unkown.csv'
outputfile = mycsvout = StringIO()
try :
dstatmodule.perform(update)
except :
pass
finally :
channel.send([mystdout.getvalue(), mycsvout.getvalue() if output2csv else ''])
sys.stdout = old_stdout
sys.exit = old_exit
"""
return remotecall(src, {"update": update, 'output2csv': output2csv}, nodeNamesPattern)
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def myExit(code) :
raise 'exitcode: ' + str(code)
if __name__ == "__main__":
class MyOptionParser(OptionParser):
def error(self, msg):
pass
parser = MyOptionParser()
hostsEnv = ','.join([h for h in os.getenv('NODE_LIST', 'localhost').split(" ") if h != ''])
parser.add_option('-H', '--hosts', type='string', dest='hostsCSV', default=hostsEnv, help='hosts name or IP seperated by comma, default is "%s"' % hostsEnv)
parser.add_option('-S', '--select', type='string', dest='nodeNamesExpress', default='.*', help='regular expression for select hosts to show, default is ".*" for all hosts')
defaultUser = getuser()
parser.add_option('-U', '--user', type='string', dest='user', default=defaultUser, help='os user to access the cluster, default is current user: "%s"' % defaultUser)
parser.add_option('--output', type='string', dest="outputFile", default=None, help='write CSV output to file')
parser.usage = ""
helpArgs = ['-h', '--help', '--list']
if [ a for a in sys.argv[1:] if a in helpArgs] :
# hack for replace help info
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_exit = sys.exit
try :
dstatmodule.initterm()
dstatmodule.Options([ a for a in sys.argv[1:] if a in helpArgs ])
except :
pass
if not '--list' in sys.argv[1:] :
parser.remove_option('--output')
parser.print_help()
sys.stdout = old_stdout
sys.exit = old_exit
print(mystdout.getvalue().replace('dstat', 'ddstat').replace('Options:', 'Additional options:'))
exit(0)
args = ["--time", "--nodename"]
needAll = True
noColor = False
noHeaders = False
skipnext = False
for arg in sys.argv[1:] :
if arg in ['-H', '--hosts', '-U', '--user', '-S', '--select', '--output', '-t', '--time', '--nodename', '--color', '--nocolor', '--noheaders', '--noupdate']:
if arg == '--color' :
noColor = False
if arg == '--nocolor' :
noColor = True
if arg == '--noheaders' :
noHeaders = True
if arg in ['-H', '--hosts', '-U', '--user', '-S', '--select', '--output']:
skipnext = True
continue
if skipnext :
skipnext = False
else :
args.append(arg)
if arg[0] == '-'and len(arg) > 1 and arg not in ['--bits', '--float', '--integer', '--bw', '--black-on-white', '--output', '--profile']:
needAll = False
delay = 1
count = -1
if len(args) >= 1 :
last1 = args[-1]
last2 = ''
if len(args) >= 2 :
last2 = args[-2]
if RepresentsInt(last2) and RepresentsInt(last1):
delay = int(last2)
count = int(last1)
args = args[:-2]
elif RepresentsInt(last1):
delay = int(last1)
args = args[:-1]
if delay <= 0 :
delay = 1
if needAll :
args = args + ['--all']
if not noColor :
args = args + ['--color']
args = args + ['--noheaders', '--noupdate', str(delay), '0'] # '--noupdate' should be the lastest parameter
(options, _) = parser.parse_args()
hosts = options.hostsCSV.split(',')
nodeNamesPattern = re.compile(options.nodeNamesExpress)
outputfile = None
try:
# init, get headers
headers = initmonitor(hosts, options.user, nodeNamesPattern, not options.outputFile is None, args)
if 'not recognized' in headers[0]:
print(headers[0].replace('dstat', 'ddstat'))
exit(1)
newFile = True
if options.outputFile :
if os.path.exists(options.outputFile) :
outputfile = open(options.outputFile, 'a', 0)
newFile = False
else :
outputfile = open(options.outputFile, 'w', 0)
# get counters
UPDATE_BEGIN = 1000 # Seems there will be weird issue if update increase from 1
update = UPDATE_BEGIN
sys.stdout.write(ansi['nolinewrap'])
while (count != 0) :
if not noHeaders :
sys.stdout.write(headers[0])
if outputfile and newFile and update == UPDATE_BEGIN :
outputfile.write(headers[1])
for lines in monitoring(update, nodeNamesPattern, not options.outputFile is None) :
sys.stdout.write(lines[0])
# TODO: average line
if outputfile :
outputfile.write(lines[1])
update += 1
if update < 0 :
update = UPDATE_BEGIN + 1
if (count > 0) and (update - UPDATE_BEGIN >= count) :
break
time.sleep(delay)
except KeyboardInterrupt :
pass
finally :
if outputfile :
outputfile.flush()
outputfile.close()
sys.stdout.write(ansi['reset'])
sys.stdout.write('\r')
sys.stdout.flush()
exit(0)
| 30.466667
| 178
| 0.582455
|
7952719678e7657e79ff48d081e9d315ed60c5ff
| 757
|
py
|
Python
|
ckanext/scheming/tests/test_group_display.py
|
fjelltopp/ckanext-scheming
|
e7f863d5b94547d2362f93b0888a60b34ff6bae5
|
[
"MIT"
] | null | null | null |
ckanext/scheming/tests/test_group_display.py
|
fjelltopp/ckanext-scheming
|
e7f863d5b94547d2362f93b0888a60b34ff6bae5
|
[
"MIT"
] | 10
|
2020-03-17T19:21:12.000Z
|
2021-05-10T10:47:16.000Z
|
ckanext/scheming/tests/test_group_display.py
|
fjelltopp/ckanext-scheming
|
e7f863d5b94547d2362f93b0888a60b34ff6bae5
|
[
"MIT"
] | null | null | null |
import pytest
from ckantoolkit.tests.factories import Sysadmin, Organization, Group
@pytest.mark.usefixtures("clean_db")
class TestOrganizationDisplay(object):
def test_organization_displays_custom_fields(self, app):
user = Sysadmin()
Organization(user=user, name="org-one", department_id="3008")
response = app.get("/organization/about/org-one")
assert "Department ID" in response.body
@pytest.mark.usefixtures("clean_db")
class TestGroupDisplay(object):
def test_group_displays_custom_fields(self, app):
user = Sysadmin()
Group(user=user, name="group-one", bookface="theoneandonly", type='group')
response = app.get("/group/about/group-one")
assert "Bookface" in response.body
| 32.913043
| 82
| 0.712021
|
795271d979672aace5c79565e2cb6c238daa0259
| 5,788
|
py
|
Python
|
custom_components/sensor/folder.py
|
robmarkcole/HASS-folder-sensor
|
3d451415ef8e72270b37162b9d84eb746c068477
|
[
"MIT"
] | 3
|
2018-02-02T10:40:17.000Z
|
2018-02-23T00:38:58.000Z
|
custom_components/sensor/folder.py
|
robmarkcole/HASS-folder-sensor
|
3d451415ef8e72270b37162b9d84eb746c068477
|
[
"MIT"
] | 1
|
2018-02-01T09:17:16.000Z
|
2018-02-01T19:07:55.000Z
|
custom_components/sensor/folder.py
|
robmarkcole/HASS-folder-sensor
|
3d451415ef8e72270b37162b9d84eb746c068477
|
[
"MIT"
] | null | null | null |
"""
Sensor for monitoring the contents of a folder.
For more details about this platform, refer to the documentation at
https://home-assistant.io/components/sensor.folder/
"""
import datetime
from datetime import timedelta
import glob
import logging
import os
import voluptuous as vol
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.const import CONF_NAME
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_FOLDER_PATH = 'folder'
CONF_FILTER = 'filter'
CONF_RECURSIVE = 'recursive'
DEFAULT_FILTER = '*'
DEFAULT_NAME = ''
DEFAULT_RECURSIVE = False
FILE = 'file'
EVENT_FILE_ADDED = 'file_added'
EVENT_FILE_DELETED = 'file_deleted'
EVENT_FILE_MODIFIED = 'file_modified'
SCAN_INTERVAL = timedelta(seconds=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_FOLDER_PATH): cv.isdir,
vol.Optional(CONF_FILTER, default=DEFAULT_FILTER): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_RECURSIVE, default=DEFAULT_RECURSIVE): cv.boolean,
})
def get_timestamp(file_path):
"""Return the timestamp of file."""
mtime = os.stat(file_path).st_mtime
return datetime.datetime.fromtimestamp(mtime).isoformat()
def get_files_dict(folder_path, filter_term, recursive):
"""Return the dict of file paths and mod times, applying filter."""
if recursive:
query = folder_path + '**/' + filter_term
files_list = glob.glob(query, recursive=True)
else:
query = folder_path + filter_term
files_list = glob.glob(query, recursive=False)
files_list = [f for f in files_list if os.path.isfile(f)]
files_dict = {f: get_timestamp(f) for f in files_list}
return files_dict
def get_size(files_list):
"""Return the sum of the size in bytes of files in the list."""
size_list = [os.stat(f).st_size for f in files_list]
return sum(size_list)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the folder sensor."""
folder_path = config.get(CONF_FOLDER_PATH)
if not hass.config.is_allowed_path(folder_path):
_LOGGER.error("folder %s is not valid or allowed", folder_path)
else:
folder = Folder(
folder_path,
config.get(CONF_FILTER),
config.get(CONF_NAME),
config.get(CONF_RECURSIVE))
add_devices([folder], True)
class Folder(Entity):
"""Representation of a folder."""
ICON = 'mdi:folder'
def __init__(self, folder_path, filter_term, name, recursive):
"""Initialize the data object."""
folder_path = os.path.join(folder_path, '') # If no trailing / add it
self._folder_path = folder_path # Need to check its a valid path
self._filter_term = filter_term
if name == DEFAULT_NAME:
self._name = os.path.split(os.path.split(folder_path)[0])[1]
else:
self._name = name
self._recursive = recursive
self._files_record = get_files_dict(
folder_path, filter_term, recursive)
self._number_of_files = len(self._files_record)
self._size = get_size(list(self._files_record.keys()))
self._unit_of_measurement = 'MB'
self._last_added = None
self._last_deleted = None
self._last_modified = None
def update(self):
"""Update the sensor."""
current_files = get_files_dict(
self._folder_path, self._filter_term, self._recursive)
self._number_of_files = len(current_files)
self._size = get_size(list(current_files.keys()))
for file_path in set(
list(current_files.keys()) +
list(self._files_record.keys())):
if file_path not in self._files_record:
self.hass.bus.fire(
EVENT_FILE_ADDED, {FILE: file_path})
self._last_added = file_path
self._files_record[file_path] = current_files[file_path]
elif file_path not in current_files:
self.hass.bus.fire(
EVENT_FILE_DELETED, {FILE: file_path})
self._last_deleted = file_path
self._files_record.pop(file_path, None)
elif file_path in self._files_record and current_files:
if self._files_record[file_path] != current_files[file_path]:
self.hass.bus.fire(
EVENT_FILE_MODIFIED, {FILE: file_path})
self._last_modified = file_path
self._files_record[file_path] = current_files[file_path]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
decimals = 2
size_mb = round(self._size/1e6, decimals)
return size_mb
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attr = {
'folder': self._folder_path,
'filter': self._filter_term,
'recursive': self._recursive,
'number_of_files': self._number_of_files,
'bytes': self._size,
'last_added': self._last_added,
'last_deleted': self._last_deleted,
'last_modified': self._last_modified
}
return attr
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
| 33.456647
| 78
| 0.646856
|
795272827e309bb50ffc78a45ab9a5f53ebaf313
| 1,798
|
py
|
Python
|
scss/tests/functions/compass/test_gradients.py
|
ojengwa/pyScss
|
fcd3b4604bc5cd34b0d36e845f89e5abaa7058ea
|
[
"MIT"
] | 1
|
2016-02-13T15:46:39.000Z
|
2016-02-13T15:46:39.000Z
|
scss/tests/functions/compass/test_gradients.py
|
ojengwa/pyScss
|
fcd3b4604bc5cd34b0d36e845f89e5abaa7058ea
|
[
"MIT"
] | null | null | null |
scss/tests/functions/compass/test_gradients.py
|
ojengwa/pyScss
|
fcd3b4604bc5cd34b0d36e845f89e5abaa7058ea
|
[
"MIT"
] | null | null | null |
"""Tests for Compass gradient generation."""
from scss.expression import Calculator
from scss.functions.compass.gradients import COMPASS_GRADIENTS_LIBRARY, linear_gradient
from scss.rule import Namespace
from scss.types import String, List, Number, Color
import pytest
@pytest.fixture
def calc():
ns = Namespace(functions=COMPASS_GRADIENTS_LIBRARY)
return Calculator(ns).evaluate_expression
def test_linear_gradient():
# Set up some values
to = String.unquoted('to')
bottom = String.unquoted('bottom')
left = String.unquoted('left')
angle = Number(45, 'deg')
red = Color.from_name('red')
blue = Color.from_name('blue')
start = Number(0, "%")
middle = Number(50, "%")
end = Number(100, "%")
assert (
linear_gradient(left, List((red, start)), List((blue, middle)))
== String('linear-gradient(left, red, blue 50%)')
)
assert (
linear_gradient(List((to, bottom)), blue, List((red, end)))
== String('linear-gradient(to bottom, blue, red)')
)
@pytest.mark.xfail('True', reason="rainbow still has intermediate values added")
def test_linear_gradient_idempotent(calc):
# linear-gradient should leave valid syntax alone.
# Examples graciously stolen from MDN:
# https://developer.mozilla.org/en-US/docs/Web/CSS/linear-gradient
trials = [
'linear-gradient(45deg, blue, red)',
'linear-gradient(to left top, blue, red)',
'linear-gradient(0deg, blue, green 40%, red)',
'linear-gradient(to right, red, orange, yellow, green, blue, indigo, violet)',
'linear-gradient(to bottom right, red, rgba(255,0,0,0))',
'linear-gradient(to bottom, hsl(0, 80%, 70%), #bada55)',
]
for trial in trials:
assert calc(trial) == String(trial)
| 31
| 87
| 0.660178
|
795272d39e4a6dacdb4d5a3ddb7b34a1a3d778e7
| 4,270
|
py
|
Python
|
lib/ssds.py
|
iSmida/DetectionYolo
|
b7e1eb26ca874da797cee02cb3e1639cf3546e0c
|
[
"MIT"
] | 3
|
2019-08-28T10:08:24.000Z
|
2020-08-10T08:58:42.000Z
|
lib/ssds.py
|
iSmida/DetectionYolo
|
b7e1eb26ca874da797cee02cb3e1639cf3546e0c
|
[
"MIT"
] | null | null | null |
lib/ssds.py
|
iSmida/DetectionYolo
|
b7e1eb26ca874da797cee02cb3e1639cf3546e0c
|
[
"MIT"
] | 1
|
2020-04-29T11:01:43.000Z
|
2020-04-29T11:01:43.000Z
|
from __future__ import print_function
import numpy as np
import torch
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import timeit
from lib.layers import *
from lib.utils.timer import Timer
from lib.utils.data_augment import preproc
from lib.modeling.model_builder import create_model
from lib.utils.config_parse import cfg
class ObjectDetector:
def __init__(self, viz_arch=False):
self.cfg = cfg
# Build model
print('===> Building model')
self.model, self.priorbox = create_model(cfg.MODEL)
self.priors = Variable(self.priorbox.forward(), volatile=True)
print(self.model)
# Print the model architecture and parameters
if viz_arch is True:
print('Model architectures:\n{}\n'.format(self.model))
# Utilize GPUs for computation
self.use_gpu = torch.cuda.is_available()
#self.use_gpu = False
self.half = False
if self.use_gpu:
print('Utilize GPUs for computation')
print('Number of GPU available', torch.cuda.device_count())
self.model.cuda()
self.priors.cuda()
cudnn.benchmark = True
# self.model = torch.nn.DataParallel(self.model).module
# Utilize half precision
self.half = cfg.MODEL.HALF_PRECISION
if self.half:
self.model = self.model.half()
self.priors = self.priors.half()
# Build preprocessor and detector
self.preprocessor = preproc(cfg.MODEL.IMAGE_SIZE, cfg.DATASET.PIXEL_MEANS, -2)
self.detector = Detect(cfg.POST_PROCESS, self.priors)
# Load weight:
if cfg.RESUME_CHECKPOINT == '':
AssertionError('RESUME_CHECKPOINT can not be empty')
print('=> loading checkpoint {:s}'.format(cfg.RESUME_CHECKPOINT))
checkpoint = torch.load(cfg.RESUME_CHECKPOINT)
# checkpoint = torch.load(cfg.RESUME_CHECKPOINT, map_location='gpu' if self.use_gpu else 'cpu')
self.model.load_state_dict(checkpoint)
# test only
self.model.eval()
def predict(self, img, threshold=0.4, check_time=False):
# make sure the input channel is 3
assert img.shape[2] == 3
scale = torch.Tensor([img.shape[1::-1], img.shape[1::-1]])
_t = {'preprocess': Timer(), 'net_forward': Timer(), 'detect': Timer(), 'output': Timer()}
# preprocess image
_t['preprocess'].tic()
x = Variable(self.preprocessor(img)[0].unsqueeze(0),volatile=True)
if self.use_gpu:
x = x.cuda()
if self.half:
x = x.half()
preprocess_time = _t['preprocess'].toc()
t0 = timeit.default_timer()
# forward
_t['net_forward'].tic()
out = self.model(x) # forward pass
net_forward_time = _t['net_forward'].toc()
elapsed = timeit.default_timer() - t0
# detect
_t['detect'].tic()
detections = self.detector.forward(out)
detect_time = _t['detect'].toc()
# output
_t['output'].tic()
labels, scores, coords = [list() for _ in range(3)]
# for batch in range(detections.size(0)):
# print('Batch:', batch)
batch=0
for classes in range(detections.size(1)):
num = 0
while detections[batch,classes,num,0] >= threshold:
scores.append(detections[batch,classes,num,0])
labels.append(classes-1)
coords.append(detections[batch,classes,num,1:]*scale)
num+=1
output_time = _t['output'].toc()
total_time = preprocess_time + net_forward_time + detect_time + output_time
if check_time is True:
return labels, scores, coords, (total_time, preprocess_time, net_forward_time, detect_time, output_time)
# total_time = preprocess_time + net_forward_time + detect_time + output_time
# print('total time: {} \n preprocess: {} \n net_forward: {} \n detect: {} \n output: {}'.format(
# total_time, preprocess_time, net_forward_time, detect_time, output_time
# ))
return labels, scores, coords, elapsed
| 39.174312
| 116
| 0.605152
|
7952734c32f89b44a128eeda5c79b42a2ebda219
| 545
|
py
|
Python
|
manage.py
|
panda1994zhang/earthquake
|
1dead943782a73a535e04929a5302ef1db2b4d84
|
[
"MIT"
] | 3
|
2018-09-19T10:57:27.000Z
|
2019-05-08T02:10:41.000Z
|
manage.py
|
aoii103/EarthquakeSpy
|
437228128dbe40bd7976013d4187c062109360bc
|
[
"MIT"
] | 6
|
2020-08-05T11:08:13.000Z
|
2021-06-10T19:57:20.000Z
|
manage.py
|
panda1994zhang/earthquake
|
1dead943782a73a535e04929a5302ef1db2b4d84
|
[
"MIT"
] | 1
|
2020-08-31T11:33:54.000Z
|
2020-08-31T11:33:54.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "EarthquakeSpy.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.0625
| 77
| 0.689908
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.