repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/plotting/test_groupby.py | """ Test cases for GroupBy.plot """
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, Index, Series
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
def test_series_groupby_plotting_nominally_works(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
weight.groupby(gender).plot()
tm.close()
height.groupby(gender).hist()
tm.close()
# Regression test for GH8733
height.groupby(gender).plot(alpha=0.5)
tm.close()
def test_plotting_with_float_index_works(self):
# GH 7025
df = DataFrame(
{"def": [1, 1, 1, 2, 2, 2, 3, 3, 3], "val": np.random.randn(9)},
index=[1.0, 2.0, 3.0, 1.0, 2.0, 3.0, 1.0, 2.0, 3.0],
)
df.groupby("def")["val"].plot()
tm.close()
df.groupby("def")["val"].apply(lambda x: x.plot())
tm.close()
def test_hist_single_row(self):
# GH10214
bins = np.arange(80, 100 + 2, 1)
df = DataFrame({"Name": ["AAA", "BBB"], "ByCol": [1, 2], "Mark": [85, 89]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
df = DataFrame({"Name": ["AAA"], "ByCol": [1], "Mark": [85]})
df["Mark"].hist(by=df["ByCol"], bins=bins)
def test_plot_submethod_works(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
df.groupby("z").plot.scatter("x", "y")
tm.close()
df.groupby("z")["x"].plot.line()
tm.close()
def test_plot_kwargs(self):
df = DataFrame({"x": [1, 2, 3, 4, 5], "y": [1, 2, 3, 2, 1], "z": list("ababa")})
res = df.groupby("z").plot(kind="scatter", x="x", y="y")
# check that a scatter plot is effectively plotted: the axes should
# contain a PathCollection from the scatter plot (GH11805)
assert len(res["a"].collections) == 1
res = df.groupby("z").plot.scatter(x="x", y="y")
assert len(res["a"].collections) == 1
@pytest.mark.parametrize("column, expected_axes_num", [(None, 2), ("b", 1)])
def test_groupby_hist_frame_with_legend(self, column, expected_axes_num):
# GH 6279 - DataFrameGroupBy histogram can have a legend
expected_layout = (1, expected_axes_num)
expected_labels = column or [["a"], ["b"]]
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
for axes in g.hist(legend=True, column=column):
self._check_axes_shape(
axes, axes_num=expected_axes_num, layout=expected_layout
)
for ax, expected_label in zip(axes[0], expected_labels):
self._check_legend_labels(ax, expected_label)
@pytest.mark.parametrize("column", [None, "b"])
def test_groupby_hist_frame_with_legend_raises(self, column):
# GH 6279 - DataFrameGroupBy histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, column=column, label="d")
def test_groupby_hist_series_with_legend(self):
# GH 6279 - SeriesGroupBy histogram can have a legend
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
for ax in g["a"].hist(legend=True):
self._check_axes_shape(ax, axes_num=1, layout=(1, 1))
self._check_legend_labels(ax, ["1", "2"])
def test_groupby_hist_series_with_legend_raises(self):
# GH 6279 - SeriesGroupBy histogram with legend and label raises
index = Index(15 * ["1"] + 15 * ["2"], name="c")
df = DataFrame(np.random.randn(30, 2), index=index, columns=["a", "b"])
g = df.groupby("c")
with pytest.raises(ValueError, match="Cannot use both legend and label"):
g.hist(legend=True, label="d")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_image.py | from plotly.graph_objs import Image
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_cone.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_cone.py
from plotly.graph_objs import Cone
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scatter.py | from plotly.graph_objs import Scatter
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pip/_internal/distributions/base.py | import abc
from pip._vendor.six import add_metaclass
@add_metaclass(abc.ABCMeta)
class AbstractDistribution(object):
"""A base class for handling installable artifacts.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req):
super(AbstractDistribution, self).__init__()
self.req = req
@abc.abstractmethod
def get_pkg_resources_distribution(self):
raise NotImplementedError()
@abc.abstractmethod
def prepare_distribution_metadata(self, finder, build_isolation):
raise NotImplementedError()
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/tests/test_compile_function.py | """See https://github.com/numpy/numpy/pull/11937.
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import uuid
from importlib import import_module
import pytest
import numpy.f2py
from numpy.testing import assert_equal
from . import util
def setup_module():
if sys.platform == 'win32' and sys.version_info[0] < 3:
pytest.skip('Fails with MinGW64 Gfortran (Issue #9673)')
if not util.has_c_compiler():
pytest.skip("Needs C compiler")
if not util.has_f77_compiler():
pytest.skip('Needs FORTRAN 77 compiler')
# extra_args can be a list (since gh-11937) or string.
# also test absence of extra_args
@pytest.mark.parametrize(
"extra_args", [['--noopt', '--debug'], '--noopt --debug', '']
)
@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
def test_f2py_init_compile(extra_args):
# flush through the f2py __init__ compile() function code path as a
# crude test for input handling following migration from
# exec_command() to subprocess.check_output() in gh-11937
# the Fortran 77 syntax requires 6 spaces before any commands, but
# more space may be added/
fsource = """
integer function foo()
foo = 10 + 5
return
end
"""
# use various helper functions in util.py to enable robust build /
# compile and reimport cycle in test suite
moddir = util.get_module_dir()
modname = util.get_temp_module_name()
cwd = os.getcwd()
target = os.path.join(moddir, str(uuid.uuid4()) + '.f')
# try running compile() with and without a source_fn provided so
# that the code path where a temporary file for writing Fortran
# source is created is also explored
for source_fn in [target, None]:
# mimic the path changing behavior used by build_module() in
# util.py, but don't actually use build_module() because it has
# its own invocation of subprocess that circumvents the
# f2py.compile code block under test
try:
os.chdir(moddir)
ret_val = numpy.f2py.compile(
fsource,
modulename=modname,
extra_args=extra_args,
source_fn=source_fn
)
finally:
os.chdir(cwd)
# check for compile success return value
assert_equal(ret_val, 0)
# we are not currently able to import the Python-Fortran
# interface module on Windows / Appveyor, even though we do get
# successful compilation on that platform with Python 3.x
if sys.platform != 'win32':
# check for sensible result of Fortran function; that means
# we can import the module name in Python and retrieve the
# result of the sum operation
return_check = import_module(modname)
calc_result = return_check.foo()
assert_equal(calc_result, 15)
# Removal from sys.modules, is not as such necessary. Even with
# removal, the module (dict) stays alive.
del sys.modules[modname]
def test_f2py_init_compile_failure():
# verify an appropriate integer status value returned by
# f2py.compile() when invalid Fortran is provided
ret_val = numpy.f2py.compile(b"invalid")
assert_equal(ret_val, 1)
def test_f2py_init_compile_bad_cmd():
# verify that usage of invalid command in f2py.compile() returns
# status value of 127 for historic consistency with exec_command()
# error handling
# patch the sys Python exe path temporarily to induce an OSError
# downstream NOTE: how bad of an idea is this patching?
try:
temp = sys.executable
sys.executable = 'does not exist'
# the OSError should take precedence over invalid Fortran
ret_val = numpy.f2py.compile(b"invalid")
assert_equal(ret_val, 127)
finally:
sys.executable = temp
@pytest.mark.parametrize('fsource',
['program test_f2py\nend program test_f2py',
b'program test_f2py\nend program test_f2py',])
def test_compile_from_strings(tmpdir, fsource):
# Make sure we can compile str and bytes gh-12796
cwd = os.getcwd()
try:
os.chdir(str(tmpdir))
ret_val = numpy.f2py.compile(
fsource,
modulename='test_compile_from_strings',
extension='.f90')
assert_equal(ret_val, 0)
finally:
os.chdir(cwd)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexes/multi/test_get_set.py | <gh_stars>100-1000
import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, MultiIndex
import pandas._testing as tm
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
def test_get_level_number_integer(idx):
idx.names = [1, 0]
assert idx._get_level_number(1) == 0
assert idx._get_level_number(0) == 1
msg = "Too many levels: Index has only 2 levels, not 3"
with pytest.raises(IndexError, match=msg):
idx._get_level_number(2)
with pytest.raises(KeyError, match="Level fourth not found"):
idx._get_level_number("fourth")
def test_set_name_methods(idx, index_names):
# so long as these are synonyms, we don't need to test set_names
assert idx.rename == idx.set_names
new_names = [name + "SUFFIX" for name in index_names]
ind = idx.set_names(new_names)
assert idx.names == index_names
assert ind.names == new_names
msg = "Length of names must match number of levels in MultiIndex"
with pytest.raises(ValueError, match=msg):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = idx.set_names(new_names[0], level=0)
assert idx.names == index_names
assert ind.names == [new_names[0], index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], index_names[1]]
# set names for multiple levels
ind = idx.set_names(new_names, level=[0, 1])
assert idx.names == index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
def test_set_levels_codes_directly(idx):
# setting levels/codes directly raises AttributeError
levels = idx.levels
new_levels = [[lev + "a" for lev in level] for level in levels]
codes = idx.codes
major_codes, minor_codes = codes
major_codes = [(x + 1) % 3 for x in major_codes]
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
msg = "[Cc]an't set attribute"
with pytest.raises(AttributeError, match=msg):
idx.levels = new_levels
with pytest.raises(AttributeError, match=msg):
idx.codes = new_codes
def test_set_levels(idx):
# side note - you probably wouldn't want to use levels and codes
# directly like this - but it is possible.
levels = idx.levels
new_levels = [[lev + "a" for lev in level] for level in levels]
# level changing [w/o mutation]
ind2 = idx.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = idx.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = idx.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1], inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = idx.copy()
for inplace in [True, False]:
with pytest.raises(ValueError, match="^On"):
idx.set_levels(["c"], level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels, check_dtype=True)
with pytest.raises(ValueError, match="^On"):
idx.set_codes([0, 1, 2, 3, 4, 5], level=0, inplace=inplace)
assert_matching(idx.codes, original_index.codes, check_dtype=True)
with pytest.raises(TypeError, match="^Levels"):
idx.set_levels("c", level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels, check_dtype=True)
with pytest.raises(TypeError, match="^Codes"):
idx.set_codes(1, level=0, inplace=inplace)
assert_matching(idx.codes, original_index.codes, check_dtype=True)
def test_set_codes(idx):
# side note - you probably wouldn't want to use levels and codes
# directly like this - but it is possible.
codes = idx.codes
major_codes, minor_codes = codes
major_codes = [(x + 1) % 3 for x in major_codes]
minor_codes = [(x + 1) % 1 for x in minor_codes]
new_codes = [major_codes, minor_codes]
# changing codes w/o mutation
ind2 = idx.set_codes(new_codes)
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# changing label w/ mutation
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
# codes changing specific level w/o mutation
ind2 = idx.set_codes(new_codes[0], level=0)
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.set_codes(new_codes[1], level=1)
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels w/o mutation
ind2 = idx.set_codes(new_codes, level=[0, 1])
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# label changing specific level w/ mutation
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [new_codes[0], codes[1]])
assert_matching(idx.codes, codes)
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, [codes[0], new_codes[1]])
assert_matching(idx.codes, codes)
# codes changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_codes(new_codes, level=[0, 1], inplace=True)
assert inplace_return is None
assert_matching(ind2.codes, new_codes)
assert_matching(idx.codes, codes)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_codes = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples([(0, i) for i in new_codes])
# [w/o mutation]
result = ind.set_codes(codes=new_codes, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_codes(codes=new_codes, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_codes_names_bad_input(idx):
levels, codes = idx.levels, idx.codes
names = idx.names
with pytest.raises(ValueError, match="Length of levels"):
idx.set_levels([levels[0]])
with pytest.raises(ValueError, match="Length of codes"):
idx.set_codes([codes[0]])
with pytest.raises(ValueError, match="Length of names"):
idx.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match="list of lists-like"):
idx.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match="list of lists-like"):
idx.set_codes(codes[0])
# shouldn't scalar data error, instead should demand list-like
with pytest.raises(TypeError, match="list-like"):
idx.set_names(names[0])
# should have equal lengths
with pytest.raises(TypeError, match="list of lists-like"):
idx.set_levels(levels[0], level=[0, 1])
with pytest.raises(TypeError, match="list-like"):
idx.set_levels(levels, level=0)
# should have equal lengths
with pytest.raises(TypeError, match="list of lists-like"):
idx.set_codes(codes[0], level=[0, 1])
with pytest.raises(TypeError, match="list-like"):
idx.set_codes(codes, level=0)
# should have equal lengths
with pytest.raises(ValueError, match="Length of names"):
idx.set_names(names[0], level=[0, 1])
with pytest.raises(TypeError, match="Names must be a"):
idx.set_names(names, level=0)
@pytest.mark.parametrize("inplace", [True, False])
def test_set_names_with_nlevel_1(inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]], codes=[[0, 1]], names=["first"])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names("first", level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_set_levels_categorical(ordered):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]], codes=index.codes)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(
list("bacb"), categories=cidx.categories, ordered=cidx.ordered
)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_set_value_keeps_names():
# motivating example from #3742
lev1 = ["hans", "hans", "hans", "grethe", "grethe", "grethe"]
lev2 = ["1", "2", "3"] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=["Name", "Number"])
df = pd.DataFrame(
np.random.randn(6, 4), columns=["one", "two", "three", "four"], index=idx
)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ("Name", "Number")
df.at[("grethe", "4"), "one"] = 99.34
assert df._is_copy is None
assert df.index.names == ("Name", "Number")
def test_set_levels_with_iterable():
# GH23273
sizes = [1, 2, 3]
colors = ["black"] * 3
index = pd.MultiIndex.from_arrays([sizes, colors], names=["size", "color"])
result = index.set_levels(map(int, ["3", "2", "1"]), level="size")
expected_sizes = [3, 2, 1]
expected = pd.MultiIndex.from_arrays(
[expected_sizes, colors], names=["size", "color"]
)
tm.assert_index_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/volume/_surface.py | import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="surface", parent_name="volume", **kwargs):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
count
Sets the number of iso-surfaces between minimum
and maximum iso-values. By default this value
is 2 meaning that only minimum and maximum
surfaces would be drawn.
fill
Sets the fill ratio of the iso-surface. The
default fill value of the surface is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
pattern
Sets the surface pattern of the iso-surface 3-D
sections. The default pattern of the surface is
`all` meaning that the rest of surface elements
would be shaded. The check options (either 1 or
2) could be used to draw half of the squares on
the surface. Using various combinations of
capital `A`, `B`, `C`, `D` and `E` may also be
used to reduce the number of triangles on the
iso-surfaces and creating other patterns of
interest.
show
Hides/displays surfaces between minimum and
maximum iso-values.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_value_counts.py | import numpy as np
import pandas as pd
import pandas._testing as tm
def test_data_frame_value_counts_unsorted():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(sort=False)
expected = pd.Series(
data=[1, 2, 1],
index=pd.MultiIndex.from_arrays(
[(2, 4, 6), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_ascending():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(ascending=True)
expected = pd.Series(
data=[1, 1, 2],
index=pd.MultiIndex.from_arrays(
[(2, 6, 4), (2, 0, 0)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_default():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays(
[(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_normalize():
df = pd.DataFrame(
{"num_legs": [2, 4, 4, 6], "num_wings": [2, 0, 0, 0]},
index=["falcon", "dog", "cat", "ant"],
)
result = df.value_counts(normalize=True)
expected = pd.Series(
data=[0.5, 0.25, 0.25],
index=pd.MultiIndex.from_arrays(
[(4, 6, 2), (0, 0, 2)], names=["num_legs", "num_wings"]
),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_single_col_default():
df = pd.DataFrame({"num_legs": [2, 4, 4, 6]})
result = df.value_counts()
expected = pd.Series(
data=[2, 1, 1],
index=pd.MultiIndex.from_arrays([[4, 6, 2]], names=["num_legs"]),
)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts()
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(result, expected)
def test_data_frame_value_counts_empty_normalize():
df_no_cols = pd.DataFrame()
result = df_no_cols.value_counts(normalize=True)
expected = pd.Series([], dtype=np.float64)
tm.assert_series_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/pointcloud/marker/_border.py | import _plotly_utils.basevalidators
class BorderValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="border", parent_name="pointcloud.marker", **kwargs):
super(BorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Border"),
data_docs=kwargs.pop(
"data_docs",
"""
arearatio
Specifies what fraction of the marker area is
covered with the border.
color
Sets the stroke color. It accepts a specific
color. If the color is not fully opaque and
there are hundreds of thousands of points, it
may cause slower zooming and panning.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/waitress/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
from waitress.server import create_server
import logging
def serve(app, **kw):
_server = kw.pop("_server", create_server) # test shim
_quiet = kw.pop("_quiet", False) # test shim
_profile = kw.pop("_profile", False) # test shim
if not _quiet: # pragma: no cover
# idempotent if logging has already been set up
logging.basicConfig()
server = _server(app, **kw)
if not _quiet: # pragma: no cover
server.print_listen("Serving on http://{}:{}")
if _profile: # pragma: no cover
profile("server.run()", globals(), locals(), (), False)
else:
server.run()
def serve_paste(app, global_conf, **kw):
serve(app, **kw)
return 0
def profile(cmd, globals, locals, sort_order, callers): # pragma: no cover
# runs a command under the profiler and print profiling output at shutdown
import os
import profile
import pstats
import tempfile
fd, fn = tempfile.mkstemp()
try:
profile.runctx(cmd, globals, locals, fn)
stats = pstats.Stats(fn)
stats.strip_dirs()
# calls,time,cumulative and cumulative,calls,time are useful
stats.sort_stats(*(sort_order or ("cumulative", "calls", "time")))
if callers:
stats.print_callers(0.3)
else:
stats.print_stats(0.3)
finally:
os.remove(fn)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/slider/currentvalue/__init__.py | import sys
if sys.version_info < (3, 7):
from ._xanchor import XanchorValidator
from ._visible import VisibleValidator
from ._suffix import SuffixValidator
from ._prefix import PrefixValidator
from ._offset import OffsetValidator
from ._font import FontValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._xanchor.XanchorValidator",
"._visible.VisibleValidator",
"._suffix.SuffixValidator",
"._prefix.PrefixValidator",
"._offset.OffsetValidator",
"._font.FontValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/table/_columnwidthsrc.py | <filename>env/lib/python3.8/site-packages/plotly/validators/table/_columnwidthsrc.py
import _plotly_utils.basevalidators
class ColumnwidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="columnwidthsrc", parent_name="table", **kwargs):
super(ColumnwidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexes/categorical/test_map.py | import numpy as np
import pytest
import pandas as pd
from pandas import CategoricalIndex, Index
import pandas._testing as tm
class TestMap:
@pytest.mark.parametrize(
"data, categories",
[
(list("abcbca"), list("cab")),
(pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),
],
ids=["string", "interval"],
)
def test_map_str(self, data, categories, ordered):
# GH 31202 - override base class since we want to maintain categorical/ordered
index = CategoricalIndex(data, categories=categories, ordered=ordered)
result = index.map(str)
expected = CategoricalIndex(
map(str, data), categories=map(str, categories), ordered=ordered
)
tm.assert_index_equal(result, expected)
def test_map(self):
ci = pd.CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)
tm.assert_index_equal(result, exp)
ci = pd.CategoricalIndex(
list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"
)
result = ci.map(lambda x: x.lower())
exp = pd.CategoricalIndex(
list("ababc"), categories=list("bac"), ordered=False, name="XXX"
)
tm.assert_index_equal(result, exp)
# GH 12766: Return an index not an array
tm.assert_index_equal(
ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")
)
# change categories dtype
ci = pd.CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)
def f(x):
return {"A": 10, "B": 20, "C": 30}.get(x)
result = ci.map(f)
exp = pd.CategoricalIndex(
[10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False
)
tm.assert_index_equal(result, exp)
result = ci.map(pd.Series([10, 20, 30], index=["A", "B", "C"]))
tm.assert_index_equal(result, exp)
result = ci.map({"A": 10, "B": 20, "C": 30})
tm.assert_index_equal(result, exp)
def test_map_with_categorical_series(self):
# GH 12756
a = pd.Index([1, 2, 3, 4])
b = pd.Series(["even", "odd", "even", "odd"], dtype="category")
c = pd.Series(["even", "odd", "even", "odd"])
exp = CategoricalIndex(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(b), exp)
exp = pd.Index(["odd", "even", "odd", np.nan])
tm.assert_index_equal(a.map(c), exp)
@pytest.mark.parametrize(
("data", "f"),
(
([1, 1, np.nan], pd.isna),
([1, 2, np.nan], pd.isna),
([1, 1, np.nan], {1: False}),
([1, 2, np.nan], {1: False, 2: False}),
([1, 1, np.nan], pd.Series([False, False])),
([1, 2, np.nan], pd.Series([False, False, False])),
),
)
def test_map_with_nan(self, data, f): # GH 24241
values = pd.Categorical(data)
result = values.map(f)
if data[1] == 1:
expected = pd.Categorical([False, False, np.nan])
tm.assert_categorical_equal(result, expected)
else:
expected = pd.Index([False, False, np.nan])
tm.assert_index_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/carpet/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._ysrc import YsrcValidator
from ._yaxis import YaxisValidator
from ._y import YValidator
from ._xsrc import XsrcValidator
from ._xaxis import XaxisValidator
from ._x import XValidator
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._stream import StreamValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._font import FontValidator
from ._db import DbValidator
from ._da import DaValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._color import ColorValidator
from ._cheaterslope import CheaterslopeValidator
from ._carpet import CarpetValidator
from ._bsrc import BsrcValidator
from ._baxis import BaxisValidator
from ._b0 import B0Validator
from ._b import BValidator
from ._asrc import AsrcValidator
from ._aaxis import AaxisValidator
from ._a0 import A0Validator
from ._a import AValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._ysrc.YsrcValidator",
"._yaxis.YaxisValidator",
"._y.YValidator",
"._xsrc.XsrcValidator",
"._xaxis.XaxisValidator",
"._x.XValidator",
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._stream.StreamValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._font.FontValidator",
"._db.DbValidator",
"._da.DaValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._color.ColorValidator",
"._cheaterslope.CheaterslopeValidator",
"._carpet.CarpetValidator",
"._bsrc.BsrcValidator",
"._baxis.BaxisValidator",
"._b0.B0Validator",
"._b.BValidator",
"._asrc.AsrcValidator",
"._aaxis.AaxisValidator",
"._a0.A0Validator",
"._a.AValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test_scalarprint.py | # -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
from __future__ import division, absolute_import, print_function
import code, sys
import platform
import pytest
from tempfile import TemporaryFile
import numpy as np
from numpy.testing import assert_, assert_equal, suppress_warnings
class TestRealScalars(object):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
wanted = [
['0.0', '0.0', '0.0', '0.0' ],
['-0.0', '-0.0', '-0.0', '-0.0'],
['1.0', '1.0', '1.0', '1.0' ],
['-1.0', '-1.0', '-1.0', '-1.0'],
['inf', 'inf', 'inf', 'inf' ],
['-inf', '-inf', '-inf', '-inf'],
['nan', 'nan', 'nan', 'nan']]
for wants, val in zip(wanted, svals):
for want, styp in zip(wants, styps):
msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
assert_equal(str(styp(val)), want, err_msg=msg)
def test_scalar_cutoffs(self):
# test that both the str and repr of np.float64 behaves
# like python floats in python3. Note that in python2
# the str has truncated digits, but we do not do this
def check(v):
# we compare str to repr, to avoid python2 truncation behavior
assert_equal(str(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), repr(v))
# check we use the same number of significant digits
check(1.12345678901234567890)
check(0.0112345678901234567890)
# check switch from scientific output to positional and back
check(1e-5)
check(1e-4)
check(1e15)
check(1e16)
def test_py2_float_print(self):
# gh-10753
# In python2, the python float type implements an obsolete method
# tp_print, which overrides tp_repr and tp_str when using "print" to
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
x = np.double(0.1999999999999)
with TemporaryFile('r+t') as f:
print(x, file=f)
f.seek(0)
output = f.read()
assert_equal(output, str(x) + '\n')
# In python2 the value float('0.1999999999999') prints with reduced
# precision as '0.2', but we want numpy's np.double('0.1999999999999')
# to print the unique value, '0.1999999999999'.
# gh-11031
# Only in the python2 interactive shell and when stdout is a "real"
# file, the output of the last command is printed to stdout without
# Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
# x` are potentially different. Make sure they are the same. The only
# way I found to get prompt-like output is using an actual prompt from
# the 'code' module. Again, must use tempfile to get a "real" file.
# dummy user-input which enters one line and then ctrl-Ds.
def userinput():
yield 'np.sqrt(2)'
raise EOFError
gen = userinput()
input_func = lambda prompt="": next(gen)
with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
orig_stdout, orig_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = fo, fe
# py2 code.interact sends irrelevant internal DeprecationWarnings
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
code.interact(local={'np': np}, readfunc=input_func, banner='')
sys.stdout, sys.stderr = orig_stdout, orig_stderr
fo.seek(0)
capture = fo.read().strip()
assert_equal(capture, repr(np.sqrt(2)))
def test_dragon4(self):
# these tests are adapted from <NAME>'s dragon4 implementation,
# see dragon4.c for details.
fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
preckwd = lambda prec: {'unique': False, 'precision': prec}
assert_equal(fpos32('1.0'), "1.")
assert_equal(fsci32('1.0'), "1.e+00")
assert_equal(fpos32('10.234'), "10.234")
assert_equal(fpos32('-10.234'), "-10.234")
assert_equal(fsci32('10.234'), "1.0234e+01")
assert_equal(fsci32('-10.234'), "-1.0234e+01")
assert_equal(fpos32('1000.0'), "1000.")
assert_equal(fpos32('1.0', precision=0), "1.")
assert_equal(fsci32('1.0', precision=0), "1.e+00")
assert_equal(fpos32('10.234', precision=0), "10.")
assert_equal(fpos32('-10.234', precision=0), "-10.")
assert_equal(fsci32('10.234', precision=0), "1.e+01")
assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
assert_equal(fpos32('10.234', precision=2), "10.23")
assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
'9.9999999999999995e-08')
assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
'9.8813129168249309e-324')
assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
'9.9999999999999694e-311')
# test rounding
# 3.1415927410 is closest float32 to np.pi
assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
"3.1415927410")
assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
"3.1415927410e+00")
assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
"3.1415926536")
assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
"3.1415926536e+00")
# 299792448 is closest float32 to 299792458
assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
"3.1415927410125732421875000")
assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
"3.14159265358979311599796346854418516159057617187500")
assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
# smallest numbers
assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
"0.00000000000000000000000000000000000000000000140129846432"
"4817070923729583289916131280261941876515771757068283889791"
"08268586060148663818836212158203125")
assert_equal(fpos64(0.5**(1022 + 52), unique=False, precision=1074),
"0.00000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000000000"
"0000000000000000000000000000000000049406564584124654417656"
"8792868221372365059802614324764425585682500675507270208751"
"8652998363616359923797965646954457177309266567103559397963"
"9877479601078187812630071319031140452784581716784898210368"
"8718636056998730723050006387409153564984387312473397273169"
"6151400317153853980741262385655911710266585566867681870395"
"6031062493194527159149245532930545654440112748012970999954"
"1931989409080416563324524757147869014726780159355238611550"
"1348035264934720193790268107107491703332226844753335720832"
"4319360923828934583680601060115061698097530783422773183292"
"4790498252473077637592724787465608477820373446969953364701"
"7972677717585125660551199131504891101451037862738167250955"
"8373897335989936648099411642057026370902792427675445652290"
"87538682506419718265533447265625")
# largest numbers
assert_equal(fpos32(np.finfo(np.float32).max, **preckwd(0)),
"340282346638528859811704183484516925440.")
assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
"1797693134862315708145274237317043567980705675258449965989"
"1747680315726078002853876058955863276687817154045895351438"
"2464234321326889464182768467546703537516986049910576551282"
"0762454900903893289440758685084551339423045832369032229481"
"6580855933212334827479782620414472316873817718091929988125"
"0404026184124858368.")
# Warning: In unique mode only the integer digits necessary for
# uniqueness are computed, the rest are 0. Should we change this?
assert_equal(fpos32(np.finfo(np.float32).max, precision=0),
"340282350000000000000000000000000000000.")
# test trailing zeros
assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
# gh-10713
assert_equal(fpos64('324', unique=False, precision=5, fractional=False), "324.00")
def test_dragon4_interface(self):
tps = [np.float16, np.float32, np.float64]
if hasattr(np, 'float128'):
tps.append(np.float128)
fpos = np.format_float_positional
fsci = np.format_float_scientific
for tp in tps:
# test padding
assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
assert_equal(fpos(tp('-10.2'),
pad_left=4, pad_right=4), " -10.2 ")
# test exp_digits
assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
# test fixed (non-unique) mode
assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
assert_equal(fsci(tp('1.0'), unique=False, precision=4),
"1.0000e+00")
# test trimming
# trim of 'k' or '.' only affects non-unique mode, since unique
# mode will not output trailing 0s.
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
"1.0000")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
"1.")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
"1.0")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='0'), "1.0")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
"1")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
def test_ppc64_ibm_double_double128(self):
# check that the precision decreases once we get into the subnormal
# range. Unlike float64, this starts around 1e-292 instead of 1e-308,
# which happens when the first double is normal and the second is
# subnormal.
x = np.float128('2.123123123123123123123123123123123e-286')
got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
expected = [
"1.06156156156156156156156156156157e-286",
"1.06156156156156156156156156156158e-287",
"1.06156156156156156156156156156159e-288",
"1.0615615615615615615615615615616e-289",
"1.06156156156156156156156156156157e-290",
"1.06156156156156156156156156156156e-291",
"1.0615615615615615615615615615616e-292",
"1.0615615615615615615615615615615e-293",
"1.061561561561561561561561561562e-294",
"1.06156156156156156156156156155e-295",
"1.0615615615615615615615615616e-296",
"1.06156156156156156156156156e-297",
"1.06156156156156156156156157e-298",
"1.0615615615615615615615616e-299",
"1.06156156156156156156156e-300",
"1.06156156156156156156155e-301",
"1.0615615615615615615616e-302",
"1.061561561561561561562e-303",
"1.06156156156156156156e-304",
"1.0615615615615615618e-305",
"1.06156156156156156e-306",
"1.06156156156156157e-307",
"1.0615615615615616e-308",
"1.06156156156156e-309",
"1.06156156156157e-310",
"1.0615615615616e-311",
"1.06156156156e-312",
"1.06156156154e-313",
"1.0615615616e-314",
"1.06156156e-315",
"1.06156155e-316",
"1.061562e-317",
"1.06156e-318",
"1.06155e-319",
"1.0617e-320",
"1.06e-321",
"1.04e-322",
"1e-323",
"0.0",
"0.0"]
assert_equal(got, expected)
# Note: we follow glibc behavior, but it (or gcc) might not be right.
# In particular we can get two values that print the same but are not
# equal:
a = np.float128('2')/np.float128('3')
b = np.float128(str(a))
assert_equal(str(a), str(b))
assert_(a != b)
def float32_roundtrip(self):
# gh-9360
x = np.float32(1024 - 2**-14)
y = np.float32(1024 - 2**-13)
assert_(repr(x) != repr(y))
assert_equal(np.float32(repr(x)), x)
assert_equal(np.float32(repr(y)), y)
def float64_vs_python(self):
# gh-2643, gh-6136, gh-6908
assert_equal(repr(np.float64(0.1)), repr(0.1))
assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/polynomial/_polybase.py | <gh_stars>1000+
"""
Abstract base class for the various polynomial Classes.
The ABCPolyBase class provides the methods needed to implement the common API
for the various polynomial classes. It operates as a mixin, but uses the
abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
import abc
import numbers
import numpy as np
from . import polyutils as pu
__all__ = ['ABCPolyBase']
class ABCPolyBase(abc.ABC):
"""An abstract base class for immutable series classes.
ABCPolyBase provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the
methods listed below.
.. versionadded:: 1.9.0
Parameters
----------
coef : array_like
Series coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where
``P_i`` is the basis polynomials of degree ``i``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is the derived class domain.
window : (2,) array_like, optional
Window, see domain for its use. The default value is the
derived class window.
Attributes
----------
coef : (N,) ndarray
Series coefficients in order of increasing degree.
domain : (2,) ndarray
Domain that is mapped to window.
window : (2,) ndarray
Window that domain is mapped to.
Class Attributes
----------------
maxpower : int
Maximum power allowed, i.e., the largest number ``n`` such that
``p(x)**n`` is allowed. This is to limit runaway polynomial size.
domain : (2,) ndarray
Default domain of the class.
window : (2,) ndarray
Default window of the class.
"""
# Not hashable
__hash__ = None
# Opt out of numpy ufuncs and Python ops with ndarray subclasses.
__array_ufunc__ = None
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
@property
@abc.abstractmethod
def domain(self):
pass
@property
@abc.abstractmethod
def window(self):
pass
@property
@abc.abstractmethod
def nickname(self):
pass
@property
@abc.abstractmethod
def basis_name(self):
pass
@staticmethod
@abc.abstractmethod
def _add(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _sub(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _mul(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _div(c1, c2):
pass
@staticmethod
@abc.abstractmethod
def _pow(c, pow, maxpower=None):
pass
@staticmethod
@abc.abstractmethod
def _val(x, c):
pass
@staticmethod
@abc.abstractmethod
def _int(c, m, k, lbnd, scl):
pass
@staticmethod
@abc.abstractmethod
def _der(c, m, scl):
pass
@staticmethod
@abc.abstractmethod
def _fit(x, y, deg, rcond, full):
pass
@staticmethod
@abc.abstractmethod
def _line(off, scl):
pass
@staticmethod
@abc.abstractmethod
def _roots(c):
pass
@staticmethod
@abc.abstractmethod
def _fromroots(r):
pass
def has_samecoef(self, other):
"""Check if coefficients match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``coef`` attribute.
Returns
-------
bool : boolean
True if the coefficients are the same, False otherwise.
"""
if len(self.coef) != len(other.coef):
return False
elif not np.all(self.coef == other.coef):
return False
else:
return True
def has_samedomain(self, other):
"""Check if domains match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``domain`` attribute.
Returns
-------
bool : boolean
True if the domains are the same, False otherwise.
"""
return np.all(self.domain == other.domain)
def has_samewindow(self, other):
"""Check if windows match.
.. versionadded:: 1.6.0
Parameters
----------
other : class instance
The other class must have the ``window`` attribute.
Returns
-------
bool : boolean
True if the windows are the same, False otherwise.
"""
return np.all(self.window == other.window)
def has_sametype(self, other):
"""Check if types match.
.. versionadded:: 1.7.0
Parameters
----------
other : object
Class instance.
Returns
-------
bool : boolean
True if other is same class as self
"""
return isinstance(other, self.__class__)
def _get_coefficients(self, other):
"""Interpret other as polynomial coefficients.
The `other` argument is checked to see if it is of the same
class as self with identical domain and window. If so,
return its coefficients, otherwise return `other`.
.. versionadded:: 1.9.0
Parameters
----------
other : anything
Object to be checked.
Returns
-------
coef
The coefficients of`other` if it is a compatible instance,
of ABCPolyBase, otherwise `other`.
Raises
------
TypeError
When `other` is an incompatible instance of ABCPolyBase.
"""
if isinstance(other, ABCPolyBase):
if not isinstance(other, self.__class__):
raise TypeError("Polynomial types differ")
elif not np.all(self.domain == other.domain):
raise TypeError("Domains differ")
elif not np.all(self.window == other.window):
raise TypeError("Windows differ")
return other.coef
return other
def __init__(self, coef, domain=None, window=None):
[coef] = pu.as_series([coef], trim=False)
self.coef = coef
if domain is not None:
[domain] = pu.as_series([domain], trim=False)
if len(domain) != 2:
raise ValueError("Domain has wrong number of elements.")
self.domain = domain
if window is not None:
[window] = pu.as_series([window], trim=False)
if len(window) != 2:
raise ValueError("Window has wrong number of elements.")
self.window = window
def __repr__(self):
coef = repr(self.coef)[6:-1]
domain = repr(self.domain)[6:-1]
window = repr(self.window)[6:-1]
name = self.__class__.__name__
return f"{name}({coef}, domain={domain}, window={window})"
def __str__(self):
coef = str(self.coef)
name = self.nickname
return f"{name}({coef})"
@classmethod
def _repr_latex_term(cls, i, arg_str, needs_parens):
if cls.basis_name is None:
raise NotImplementedError(
"Subclasses must define either a basis name, or override "
"_repr_latex_term(i, arg_str, needs_parens)")
# since we always add parens, we don't care if the expression needs them
return f"{{{cls.basis_name}}}_{{{i}}}({arg_str})"
@staticmethod
def _repr_latex_scalar(x):
# TODO: we're stuck with disabling math formatting until we handle
# exponents in this function
return r'\text{{{}}}'.format(x)
def _repr_latex_(self):
# get the scaled argument string to the basis functions
off, scale = self.mapparms()
if off == 0 and scale == 1:
term = 'x'
needs_parens = False
elif scale == 1:
term = f"{self._repr_latex_scalar(off)} + x"
needs_parens = True
elif off == 0:
term = f"{self._repr_latex_scalar(scale)}x"
needs_parens = True
else:
term = (
f"{self._repr_latex_scalar(off)} + "
f"{self._repr_latex_scalar(scale)}x"
)
needs_parens = True
mute = r"\color{{LightGray}}{{{}}}".format
parts = []
for i, c in enumerate(self.coef):
# prevent duplication of + and - signs
if i == 0:
coef_str = f"{self._repr_latex_scalar(c)}"
elif not isinstance(c, numbers.Real):
coef_str = f" + ({self._repr_latex_scalar(c)})"
elif not np.signbit(c):
coef_str = f" + {self._repr_latex_scalar(c)}"
else:
coef_str = f" - {self._repr_latex_scalar(-c)}"
# produce the string for the term
term_str = self._repr_latex_term(i, term, needs_parens)
if term_str == '1':
part = coef_str
else:
part = rf"{coef_str}\,{term_str}"
if c == 0:
part = mute(part)
parts.append(part)
if parts:
body = ''.join(parts)
else:
# in case somehow there are no coefficients at all
body = '0'
return rf"$x \mapsto {body}$"
# Pickle and copy
def __getstate__(self):
ret = self.__dict__.copy()
ret['coef'] = self.coef.copy()
ret['domain'] = self.domain.copy()
ret['window'] = self.window.copy()
return ret
def __setstate__(self, dict):
self.__dict__ = dict
# Call
def __call__(self, arg):
off, scl = pu.mapparms(self.domain, self.window)
arg = off + scl*arg
return self._val(arg, self.coef)
def __iter__(self):
return iter(self.coef)
def __len__(self):
return len(self.coef)
# Numeric properties.
def __neg__(self):
return self.__class__(-self.coef, self.domain, self.window)
def __pos__(self):
return self
def __add__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._add(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __sub__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._sub(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __mul__(self, other):
othercoef = self._get_coefficients(other)
try:
coef = self._mul(self.coef, othercoef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __truediv__(self, other):
# there is no true divide if the rhs is not a Number, although it
# could return the first n elements of an infinite series.
# It is hard to see where n would come from, though.
if not isinstance(other, numbers.Number) or isinstance(other, bool):
raise TypeError(
f"unsupported types for true division: "
f"'{type(self)}', '{type(other)}'"
)
return self.__floordiv__(other)
def __floordiv__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[0]
def __mod__(self, other):
res = self.__divmod__(other)
if res is NotImplemented:
return res
return res[1]
def __divmod__(self, other):
othercoef = self._get_coefficients(other)
try:
quo, rem = self._div(self.coef, othercoef)
except ZeroDivisionError as e:
raise e
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __pow__(self, other):
coef = self._pow(self.coef, other, maxpower=self.maxpower)
res = self.__class__(coef, self.domain, self.window)
return res
def __radd__(self, other):
try:
coef = self._add(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rsub__(self, other):
try:
coef = self._sub(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rmul__(self, other):
try:
coef = self._mul(other, self.coef)
except Exception:
return NotImplemented
return self.__class__(coef, self.domain, self.window)
def __rdiv__(self, other):
# set to __floordiv__ /.
return self.__rfloordiv__(other)
def __rtruediv__(self, other):
# An instance of ABCPolyBase is not considered a
# Number.
return NotImplemented
def __rfloordiv__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[0]
def __rmod__(self, other):
res = self.__rdivmod__(other)
if res is NotImplemented:
return res
return res[1]
def __rdivmod__(self, other):
try:
quo, rem = self._div(other, self.coef)
except ZeroDivisionError as e:
raise e
except Exception:
return NotImplemented
quo = self.__class__(quo, self.domain, self.window)
rem = self.__class__(rem, self.domain, self.window)
return quo, rem
def __eq__(self, other):
res = (isinstance(other, self.__class__) and
np.all(self.domain == other.domain) and
np.all(self.window == other.window) and
(self.coef.shape == other.coef.shape) and
np.all(self.coef == other.coef))
return res
def __ne__(self, other):
return not self.__eq__(other)
#
# Extra methods.
#
def copy(self):
"""Return a copy.
Returns
-------
new_series : series
Copy of self.
"""
return self.__class__(self.coef, self.domain, self.window)
def degree(self):
"""The degree of the series.
.. versionadded:: 1.5.0
Returns
-------
degree : int
Degree of the series, one less than the number of coefficients.
"""
return len(self) - 1
def cutdeg(self, deg):
"""Truncate series to the given degree.
Reduce the degree of the series to `deg` by discarding the
high order terms. If `deg` is greater than the current degree a
copy of the current series is returned. This can be useful in least
squares where the coefficients of the high degree terms may be very
small.
.. versionadded:: 1.5.0
Parameters
----------
deg : non-negative int
The series is reduced to degree `deg` by discarding the high
order terms. The value of `deg` must be a non-negative integer.
Returns
-------
new_series : series
New instance of series with reduced degree.
"""
return self.truncate(deg + 1)
def trim(self, tol=0):
"""Remove trailing coefficients
Remove trailing coefficients until a coefficient is reached whose
absolute value greater than `tol` or the beginning of the series is
reached. If all the coefficients would be removed the series is set
to ``[0]``. A new series instance is returned with the new
coefficients. The current instance remains unchanged.
Parameters
----------
tol : non-negative number.
All trailing coefficients less than `tol` will be removed.
Returns
-------
new_series : series
Contains the new set of coefficients.
"""
coef = pu.trimcoef(self.coef, tol)
return self.__class__(coef, self.domain, self.window)
def truncate(self, size):
"""Truncate series to length `size`.
Reduce the series to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer. This
can be useful in least squares where the coefficients of the
high degree terms may be very small.
Parameters
----------
size : positive int
The series is reduced to length `size` by discarding the high
degree terms. The value of `size` must be a positive integer.
Returns
-------
new_series : series
New instance of series with truncated coefficients.
"""
isize = int(size)
if isize != size or isize < 1:
raise ValueError("size must be a positive integer")
if isize >= len(self.coef):
coef = self.coef
else:
coef = self.coef[:isize]
return self.__class__(coef, self.domain, self.window)
def convert(self, domain=None, kind=None, window=None):
"""Convert series to a different kind and/or domain and/or window.
Parameters
----------
domain : array_like, optional
The domain of the converted series. If the value is None,
the default domain of `kind` is used.
kind : class, optional
The polynomial series type class to which the current instance
should be converted. If kind is None, then the class of the
current instance is used.
window : array_like, optional
The window of the converted series. If the value is None,
the default window of `kind` is used.
Returns
-------
new_series : series
The returned class can be of different type than the current
instance and/or have a different domain and/or different
window.
Notes
-----
Conversion between domains and class types can result in
numerically ill defined series.
Examples
--------
"""
if kind is None:
kind = self.__class__
if domain is None:
domain = kind.domain
if window is None:
window = kind.window
return self(kind.identity(domain, window=window))
def mapparms(self):
"""Return the mapping parameters.
The returned values define a linear map ``off + scl*x`` that is
applied to the input arguments before the series is evaluated. The
map depends on the ``domain`` and ``window``; if the current
``domain`` is equal to the ``window`` the resulting map is the
identity. If the coefficients of the series instance are to be
used by themselves outside this class, then the linear function
must be substituted for the ``x`` in the standard representation of
the base polynomials.
Returns
-------
off, scl : float or complex
The mapping function is defined by ``off + scl*x``.
Notes
-----
If the current domain is the interval ``[l1, r1]`` and the window
is ``[l2, r2]``, then the linear mapping function ``L`` is
defined by the equations::
L(l1) = l2
L(r1) = r2
"""
return pu.mapparms(self.domain, self.window)
def integ(self, m=1, k=[], lbnd=None):
"""Integrate.
Return a series instance that is the definite integral of the
current series.
Parameters
----------
m : non-negative int
The number of integrations to perform.
k : array_like
Integration constants. The first constant is applied to the
first integration, the second to the second, and so on. The
list of values must less than or equal to `m` in length and any
missing values are set to zero.
lbnd : Scalar
The lower bound of the definite integral.
Returns
-------
new_series : series
A new series representing the integral. The domain is the same
as the domain of the integrated series.
"""
off, scl = self.mapparms()
if lbnd is None:
lbnd = 0
else:
lbnd = off + scl*lbnd
coef = self._int(self.coef, m, k, lbnd, 1./scl)
return self.__class__(coef, self.domain, self.window)
def deriv(self, m=1):
"""Differentiate.
Return a series instance of that is the derivative of the current
series.
Parameters
----------
m : non-negative int
Find the derivative of order `m`.
Returns
-------
new_series : series
A new series representing the derivative. The domain is the same
as the domain of the differentiated series.
"""
off, scl = self.mapparms()
coef = self._der(self.coef, m, scl)
return self.__class__(coef, self.domain, self.window)
def roots(self):
"""Return the roots of the series polynomial.
Compute the roots for the series. Note that the accuracy of the
roots decrease the further outside the domain they lie.
Returns
-------
roots : ndarray
Array containing the roots of the series.
"""
roots = self._roots(self.coef)
return pu.mapdomain(roots, self.window, self.domain)
def linspace(self, n=100, domain=None):
"""Return x, y values at equally spaced points in domain.
Returns the x, y values at `n` linearly spaced points across the
domain. Here y is the value of the polynomial at the points x. By
default the domain is the same as that of the series instance.
This method is intended mostly as a plotting aid.
.. versionadded:: 1.5.0
Parameters
----------
n : int, optional
Number of point pairs to return. The default value is 100.
domain : {None, array_like}, optional
If not None, the specified domain is used instead of that of
the calling instance. It should be of the form ``[beg,end]``.
The default is None which case the class domain is used.
Returns
-------
x, y : ndarray
x is equal to linspace(self.domain[0], self.domain[1], n) and
y is the series evaluated at element of x.
"""
if domain is None:
domain = self.domain
x = np.linspace(domain[0], domain[1], n)
y = self(x)
return x, y
@classmethod
def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None,
window=None):
"""Least squares fit to data.
Return a series instance that is the least squares fit to the data
`y` sampled at `x`. The domain of the returned instance can be
specified and this will often result in a superior fit with less
chance of ill conditioning.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
domain : {None, [beg, end], []}, optional
Domain to use for the returned series. If ``None``,
then a minimal domain that covers the points `x` is chosen. If
``[]`` the class domain is used. The default value was the
class domain in NumPy 1.4 and ``None`` in later versions.
The ``[]`` option was added in numpy 1.5.0.
rcond : float, optional
Relative condition number of the fit. Singular values smaller
than this relative to the largest singular value will be
ignored. The default value is len(x)*eps, where eps is the
relative precision of the float type, about 2e-16 in most
cases.
full : bool, optional
Switch determining nature of return value. When it is False
(the default) just the coefficients are returned, when True
diagnostic information from the singular value decomposition is
also returned.
w : array_like, shape (M,), optional
Weights. If not None the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products
``w[i]*y[i]`` all have the same variance. The default value is
None.
.. versionadded:: 1.5.0
window : {[beg, end]}, optional
Window to use for the returned series. The default
value is the default class domain
.. versionadded:: 1.6.0
Returns
-------
new_series : series
A series that represents the least squares fit to the data and
has the domain and window specified in the call. If the
coefficients for the unscaled and unshifted basis polynomials are
of interest, do ``new_series.convert().coef``.
[resid, rank, sv, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
"""
if domain is None:
domain = pu.getdomain(x)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
xnew = pu.mapdomain(x, domain, window)
res = cls._fit(xnew, y, deg, w=w, rcond=rcond, full=full)
if full:
[coef, status] = res
return cls(coef, domain=domain, window=window), status
else:
coef = res
return cls(coef, domain=domain, window=window)
@classmethod
def fromroots(cls, roots, domain=[], window=None):
"""Return series instance that has the specified roots.
Returns a series representing the product
``(x - r[0])*(x - r[1])*...*(x - r[n-1])``, where ``r`` is a
list of roots.
Parameters
----------
roots : array_like
List of roots.
domain : {[], None, array_like}, optional
Domain for the resulting series. If None the domain is the
interval from the smallest root to the largest. If [] the
domain is the class domain. The default is [].
window : {None, array_like}, optional
Window for the returned series. If None the class window is
used. The default is None.
Returns
-------
new_series : series
Series with the specified roots.
"""
[roots] = pu.as_series([roots], trim=False)
if domain is None:
domain = pu.getdomain(roots)
elif type(domain) is list and len(domain) == 0:
domain = cls.domain
if window is None:
window = cls.window
deg = len(roots)
off, scl = pu.mapparms(domain, window)
rnew = off + scl*roots
coef = cls._fromroots(rnew) / scl**deg
return cls(coef, domain=domain, window=window)
@classmethod
def identity(cls, domain=None, window=None):
"""Identity function.
If ``p`` is the returned series, then ``p(x) == x`` for all
values of x.
Parameters
----------
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
Series of representing the identity.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
off, scl = pu.mapparms(window, domain)
coef = cls._line(off, scl)
return cls(coef, domain, window)
@classmethod
def basis(cls, deg, domain=None, window=None):
"""Series basis polynomial of degree `deg`.
Returns the series representing the basis polynomial of degree `deg`.
.. versionadded:: 1.7.0
Parameters
----------
deg : int
Degree of the basis polynomial for the series. Must be >= 0.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series with the coefficient of the `deg` term set to one and
all others zero.
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
ideg = int(deg)
if ideg != deg or ideg < 0:
raise ValueError("deg must be non-negative integer")
return cls([0]*ideg + [1], domain, window)
@classmethod
def cast(cls, series, domain=None, window=None):
"""Convert series to series of this class.
The `series` is expected to be an instance of some polynomial
series of one of the types supported by by the numpy.polynomial
module, but could be some other class that supports the convert
method.
.. versionadded:: 1.7.0
Parameters
----------
series : series
The series instance to be converted.
domain : {None, array_like}, optional
If given, the array must be of the form ``[beg, end]``, where
``beg`` and ``end`` are the endpoints of the domain. If None is
given then the class domain is used. The default is None.
window : {None, array_like}, optional
If given, the resulting array must be if the form
``[beg, end]``, where ``beg`` and ``end`` are the endpoints of
the window. If None is given then the class window is used. The
default is None.
Returns
-------
new_series : series
A series of the same kind as the calling class and equal to
`series` when evaluated.
See Also
--------
convert : similar instance method
"""
if domain is None:
domain = cls.domain
if window is None:
window = cls.window
return series.convert(domain, cls, window)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/frame/_group.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class GroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="group", parent_name="frame", **kwargs):
super(GroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/compat/chainmap.py | from typing import ChainMap, MutableMapping, TypeVar, cast
_KT = TypeVar("_KT")
_VT = TypeVar("_VT")
class DeepChainMap(ChainMap[_KT, _VT]):
"""Variant of ChainMap that allows direct updates to inner scopes.
Only works when all passed mapping are mutable.
"""
def __setitem__(self, key: _KT, value: _VT) -> None:
for mapping in self.maps:
mutable_mapping = cast(MutableMapping[_KT, _VT], mapping)
if key in mutable_mapping:
mutable_mapping[key] = value
return
cast(MutableMapping[_KT, _VT], self.maps[0])[key] = value
def __delitem__(self, key: _KT) -> None:
"""
Raises
------
KeyError
If `key` doesn't exist.
"""
for mapping in self.maps:
mutable_mapping = cast(MutableMapping[_KT, _VT], mapping)
if key in mapping:
del mutable_mapping[key]
return
raise KeyError(key)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/waitress/receiver.py | <gh_stars>1-10
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Data Chunk Receiver
"""
from waitress.utilities import BadRequest, find_double_newline
class FixedStreamReceiver(object):
# See IStreamConsumer
completed = False
error = None
def __init__(self, cl, buf):
self.remain = cl
self.buf = buf
def __len__(self):
return self.buf.__len__()
def received(self, data):
"See IStreamConsumer"
rm = self.remain
if rm < 1:
self.completed = True # Avoid any chance of spinning
return 0
datalen = len(data)
if rm <= datalen:
self.buf.append(data[:rm])
self.remain = 0
self.completed = True
return rm
else:
self.buf.append(data)
self.remain -= datalen
return datalen
def getfile(self):
return self.buf.getfile()
def getbuf(self):
return self.buf
class ChunkedReceiver(object):
chunk_remainder = 0
validate_chunk_end = False
control_line = b""
chunk_end = b""
all_chunks_received = False
trailer = b""
completed = False
error = None
# max_control_line = 1024
# max_trailer = 65536
def __init__(self, buf):
self.buf = buf
def __len__(self):
return self.buf.__len__()
def received(self, s):
# Returns the number of bytes consumed.
if self.completed:
return 0
orig_size = len(s)
while s:
rm = self.chunk_remainder
if rm > 0:
# Receive the remainder of a chunk.
to_write = s[:rm]
self.buf.append(to_write)
written = len(to_write)
s = s[written:]
self.chunk_remainder -= written
if self.chunk_remainder == 0:
self.validate_chunk_end = True
elif self.validate_chunk_end:
s = self.chunk_end + s
pos = s.find(b"\r\n")
if pos < 0 and len(s) < 2:
self.chunk_end = s
s = b""
else:
self.chunk_end = b""
if pos == 0:
# Chop off the terminating CR LF from the chunk
s = s[2:]
else:
self.error = BadRequest("Chunk not properly terminated")
self.all_chunks_received = True
# Always exit this loop
self.validate_chunk_end = False
elif not self.all_chunks_received:
# Receive a control line.
s = self.control_line + s
pos = s.find(b"\r\n")
if pos < 0:
# Control line not finished.
self.control_line = s
s = b""
else:
# Control line finished.
line = s[:pos]
s = s[pos + 2 :]
self.control_line = b""
line = line.strip()
if line:
# Begin a new chunk.
semi = line.find(b";")
if semi >= 0:
# discard extension info.
line = line[:semi]
try:
sz = int(line.strip(), 16) # hexadecimal
except ValueError: # garbage in input
self.error = BadRequest("garbage in chunked encoding input")
sz = 0
if sz > 0:
# Start a new chunk.
self.chunk_remainder = sz
else:
# Finished chunks.
self.all_chunks_received = True
# else expect a control line.
else:
# Receive the trailer.
trailer = self.trailer + s
if trailer.startswith(b"\r\n"):
# No trailer.
self.completed = True
return orig_size - (len(trailer) - 2)
pos = find_double_newline(trailer)
if pos < 0:
# Trailer not finished.
self.trailer = trailer
s = b""
else:
# Finished the trailer.
self.completed = True
self.trailer = trailer[:pos]
return orig_size - (len(trailer) - pos)
return orig_size
def getfile(self):
return self.buf.getfile()
def getbuf(self):
return self.buf
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/choropleth/_locationmode.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/plotly/validators/choropleth/_locationmode.py
import _plotly_utils.basevalidators
class LocationmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="locationmode", parent_name="choropleth", **kwargs):
super(LocationmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values", ["ISO-3", "USA-states", "country names", "geojson-id"]
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_equals.py | import numpy as np
import pytest
from pandas import MultiIndex, Series
@pytest.mark.parametrize(
"arr, idx",
[
([1, 2, 3, 4], [0, 2, 1, 3]),
([1, np.nan, 3, np.nan], [0, 2, 1, 3]),
(
[1, np.nan, 3, np.nan],
MultiIndex.from_tuples([(0, "a"), (1, "b"), (2, "c"), (3, "c")]),
),
],
)
def test_equals(arr, idx):
s1 = Series(arr, index=idx)
s2 = s1.copy()
assert s1.equals(s2)
s1[1] = 9
assert not s1.equals(s2)
def test_equals_list_array():
# GH20676 Verify equals operator for list of Numpy arrays
arr = np.array([1, 2])
s1 = Series([arr, arr])
s2 = s1.copy()
assert s1.equals(s2)
# TODO: Series equals should also work between single value and list
# s1[1] = 9
# assert not s1.equals(s2)
def test_equals_false_negative():
# GH8437 Verify false negative behavior of equals function for dtype object
arr = [False, np.nan]
s1 = Series(arr)
s2 = s1.copy()
s3 = Series(index=range(2), dtype=object)
s4 = s3.copy()
s5 = s3.copy()
s6 = s3.copy()
s3[:-1] = s4[:-1] = s5[0] = s6[0] = False
assert s1.equals(s1)
assert s1.equals(s2)
assert s1.equals(s3)
assert s1.equals(s4)
assert s1.equals(s5)
assert s5.equals(s6)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/matplotlylib/mplexporter/renderers/vincent_renderer.py | <gh_stars>1000+
import warnings
from .base import Renderer
from ..exporter import Exporter
class VincentRenderer(Renderer):
def open_figure(self, fig, props):
self.chart = None
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
def draw_line(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
linedata = {'x': data[:, 0],
'y': data[:, 1]}
line = vincent.Line(linedata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
line.scales['color'].range = [style['color']]
if self.chart is None:
self.chart = line
else:
warnings.warn("Multiple plot elements not yet supported")
def draw_markers(self, data, coordinates, style, label, mplobj=None):
import vincent # only import if VincentRenderer is used
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
markerdata = {'x': data[:, 0],
'y': data[:, 1]}
markers = vincent.Scatter(markerdata, iter_idx='x',
width=self.figwidth, height=self.figheight)
# TODO: respect the other style settings
markers.scales['color'].range = [style['facecolor']]
if self.chart is None:
self.chart = markers
else:
warnings.warn("Multiple plot elements not yet supported")
def fig_to_vincent(fig):
"""Convert a matplotlib figure to a vincent object"""
renderer = VincentRenderer()
exporter = Exporter(renderer)
exporter.run(fig)
return renderer.chart
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_volume.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_volume.py<gh_stars>1000+
from plotly.graph_objs import Volume
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/tests/test_semicolon_split.py | from __future__ import division, absolute_import, print_function
import platform
import pytest
from . import util
from numpy.testing import assert_equal
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestMultiline(util.F2PyTest):
suffix = ".pyf"
module_name = "multiline"
code = """
python module {module}
usercode '''
void foo(int* x) {{
char dummy = ';';
*x = 42;
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
end subroutine foo
end interface
end python module {module}
""".format(module=module_name)
def test_multiline(self):
assert_equal(self.module.foo(), 42)
@pytest.mark.skipif(
platform.system() == 'Darwin',
reason="Prone to error when run with numpy/f2py/tests on mac os, "
"but not when run in isolation")
class TestCallstatement(util.F2PyTest):
suffix = ".pyf"
module_name = "callstatement"
code = """
python module {module}
usercode '''
void foo(int* x) {{
}}
'''
interface
subroutine foo(x)
intent(c) foo
integer intent(out) :: x
callprotoargument int*
callstatement {{ &
; &
x = 42; &
}}
end subroutine foo
end interface
end python module {module}
""".format(module=module_name)
def test_callstatement(self):
assert_equal(self.module.foo(), 42)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/indexing/test_xs.py | import numpy as np
import pandas as pd
def test_xs_datetimelike_wrapping():
# GH#31630 a case where we shouldn't wrap datetime64 in Timestamp
arr = pd.date_range("2016-01-01", periods=3)._data._data
ser = pd.Series(arr, dtype=object)
for i in range(len(ser)):
ser.iloc[i] = arr[i]
assert ser.dtype == object
assert isinstance(ser[0], np.datetime64)
result = ser.xs(0)
assert isinstance(result, np.datetime64)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_choroplethmapbox.py | import _plotly_utils.basevalidators
class ChoroplethmapboxValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="choroplethmapbox",
parent_name="layout.template.data",
**kwargs
):
super(ChoroplethmapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Choroplethmapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scattercarpet.py | from plotly.graph_objs import Scattercarpet
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/pointcloud/marker/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/pointcloud/marker/__init__.py
import sys
if sys.version_info < (3, 7):
from ._sizemin import SizeminValidator
from ._sizemax import SizemaxValidator
from ._opacity import OpacityValidator
from ._color import ColorValidator
from ._border import BorderValidator
from ._blend import BlendValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sizemin.SizeminValidator",
"._sizemax.SizemaxValidator",
"._opacity.OpacityValidator",
"._color.ColorValidator",
"._border.BorderValidator",
"._blend.BlendValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/splom/dimension/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/splom/dimension/__init__.py<gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._axis import Axis
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._axis.Axis"])
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_bar.py | from plotly.graph_objs import Bar
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._zaxis import ZaxisValidator
from ._yaxis import YaxisValidator
from ._xaxis import XaxisValidator
from ._uirevision import UirevisionValidator
from ._hovermode import HovermodeValidator
from ._dragmode import DragmodeValidator
from ._domain import DomainValidator
from ._camera import CameraValidator
from ._bgcolor import BgcolorValidator
from ._aspectratio import AspectratioValidator
from ._aspectmode import AspectmodeValidator
from ._annotationdefaults import AnnotationdefaultsValidator
from ._annotations import AnnotationsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zaxis.ZaxisValidator",
"._yaxis.YaxisValidator",
"._xaxis.XaxisValidator",
"._uirevision.UirevisionValidator",
"._hovermode.HovermodeValidator",
"._dragmode.DragmodeValidator",
"._domain.DomainValidator",
"._camera.CameraValidator",
"._bgcolor.BgcolorValidator",
"._aspectratio.AspectratioValidator",
"._aspectmode.AspectmodeValidator",
"._annotationdefaults.AnnotationdefaultsValidator",
"._annotations.AnnotationsValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/rangeselector/__init__.py | import sys
if sys.version_info < (3, 7):
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._visible import VisibleValidator
from ._font import FontValidator
from ._buttondefaults import ButtondefaultsValidator
from ._buttons import ButtonsValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
from ._activecolor import ActivecolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._visible.VisibleValidator",
"._font.FontValidator",
"._buttondefaults.ButtondefaultsValidator",
"._buttons.ButtonsValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
"._activecolor.ActivecolorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_sankey.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_sankey.py
from plotly.graph_objs import Sankey
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scattergl.py | from plotly.graph_objs import Scattergl
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/numpy_distribution.py | <reponame>acrucetta/Chicago_COVI_WebApp
# XXX: Handle setuptools ?
from __future__ import division, absolute_import, print_function
from distutils.core import Distribution
# This class is used because we add new files (sconscripts, and so on) with the
# scons command
class NumpyDistribution(Distribution):
def __init__(self, attrs = None):
# A list of (sconscripts, pre_hook, post_hook, src, parent_names)
self.scons_data = []
# A list of installable libraries
self.installed_libraries = []
# A dict of pkg_config files to generate/install
self.installed_pkg_config = {}
Distribution.__init__(self, attrs)
def has_scons_scripts(self):
return bool(self.scons_data)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/arrays/categorical/test_operators.py | <reponame>acrucetta/Chicago_COVI_WebApp
import operator
import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import Categorical, DataFrame, Series, date_range
import pandas._testing as tm
from pandas.tests.arrays.categorical.common import TestCategorical
class TestCategoricalOpsWithFactor(TestCategorical):
def test_categories_none_comparisons(self):
factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)
tm.assert_categorical_equal(factor, self.factor)
def test_comparisons(self):
result = self.factor[self.factor == "a"]
expected = self.factor[np.asarray(self.factor) == "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor != "a"]
expected = self.factor[np.asarray(self.factor) != "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor < "c"]
expected = self.factor[np.asarray(self.factor) < "c"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor > "a"]
expected = self.factor[np.asarray(self.factor) > "a"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor >= "b"]
expected = self.factor[np.asarray(self.factor) >= "b"]
tm.assert_categorical_equal(result, expected)
result = self.factor[self.factor <= "b"]
expected = self.factor[np.asarray(self.factor) <= "b"]
tm.assert_categorical_equal(result, expected)
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
tm.assert_numpy_array_equal(result, expected)
result = self.factor == "d"
expected = np.zeros(len(self.factor), dtype=bool)
tm.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)
cat_rev_base = Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True
)
cat = Categorical(["a", "b", "c"], ordered=True)
cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
tm.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
tm.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])
msg = (
"Categoricals can only be compared if 'categories' are the same. "
"Categories are different lengths"
)
with pytest.raises(TypeError, match=msg):
cat_rev > cat_rev_base2
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
assert not (cat > cat).any()
msg = "Categoricals can only be compared if 'ordered' is the same"
with pytest.raises(TypeError, match=msg):
cat > cat_unorderd
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
msg = (
"Cannot compare a Categorical for op __gt__ with type"
r" <class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
# Make sure that unequal comparison take the categories order in
# account
cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
tm.assert_numpy_array_equal(res, exp)
# check that zero-dim array gets unboxed
res = cat_rev > np.array("b")
tm.assert_numpy_array_equal(res, exp)
class TestCategoricalOps:
def test_compare_frame(self):
# GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame
data = ["a", "b", 2, "a"]
cat = Categorical(data)
df = DataFrame(cat)
result = cat == df.T
expected = DataFrame([[True, True, True, True]])
tm.assert_frame_equal(result, expected)
result = cat[::-1] != df.T
expected = DataFrame([[False, True, True, False]])
tm.assert_frame_equal(result, expected)
def test_compare_frame_raises(self, all_compare_operators):
# alignment raises unless we transpose
op = getattr(operator, all_compare_operators)
cat = Categorical(["a", "b", 2, "a"])
df = DataFrame(cat)
msg = "Unable to coerce to Series, length must be 1: given 4"
with pytest.raises(ValueError, match=msg):
op(cat, df)
def test_datetime_categorical_comparison(self):
dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)
tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))
def test_reflected_comparison_with_scalars(self):
# GH8658
cat = Categorical([1, 2, 3], ordered=True)
tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))
tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))
def test_comparison_with_unknown_scalars(self):
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Categorical([1, 2, 3], ordered=True)
msg = (
"Cannot compare a Categorical for op __{}__ with a scalar, "
"which is not a category"
)
with pytest.raises(TypeError, match=msg.format("lt")):
cat < 4
with pytest.raises(TypeError, match=msg.format("gt")):
cat > 4
with pytest.raises(TypeError, match=msg.format("gt")):
4 < cat
with pytest.raises(TypeError, match=msg.format("lt")):
4 > cat
tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))
tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))
def test_comparison_of_ordered_categorical_with_nan_to_scalar(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# BUG: fix ordered categorical comparison with missing values (#26504 )
# and following comparisons with scalars in categories with missing
# values should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
scalar = 2
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)
actual = getattr(cat, compare_operators_no_eq_ne)(scalar)
tm.assert_numpy_array_equal(actual, expected)
def test_comparison_of_ordered_categorical_with_nan_to_listlike(
self, compare_operators_no_eq_ne
):
# https://github.com/pandas-dev/pandas/issues/26504
# and following comparisons of missing values in ordered Categorical
# with listlike should be evaluated as False
cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)
other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)
actual = getattr(cat, compare_operators_no_eq_ne)(other)
tm.assert_numpy_array_equal(actual, expected)
@pytest.mark.parametrize(
"data,reverse,base",
[(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],
)
def test_comparisons(self, data, reverse, base):
cat_rev = Series(Categorical(data, categories=reverse, ordered=True))
cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))
cat = Series(Categorical(data, ordered=True))
cat_base = Series(
Categorical(base, categories=cat.cat.categories, ordered=True)
)
s = Series(base)
a = np.array(base)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = Series([True, False, False])
tm.assert_series_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = Series([False, False, True])
tm.assert_series_equal(res_rev, exp_rev)
res = cat > cat_base
exp = Series([False, False, True])
tm.assert_series_equal(res, exp)
scalar = base[1]
res = cat > scalar
exp = Series([False, False, True])
exp2 = cat.values > scalar
tm.assert_series_equal(res, exp)
tm.assert_numpy_array_equal(res.values, exp2)
res_rev = cat_rev > scalar
exp_rev = Series([True, False, False])
exp_rev2 = cat_rev.values > scalar
tm.assert_series_equal(res_rev, exp_rev)
tm.assert_numpy_array_equal(res_rev.values, exp_rev2)
# Only categories with same categories can be compared
msg = "Categoricals can only be compared if 'categories' are the same"
with pytest.raises(TypeError, match=msg):
cat > cat_rev
# categorical cannot be compared to Series or numpy array, and also
# not the other way around
msg = (
"Cannot compare a Categorical for op __gt__ with type"
r" <class 'numpy\.ndarray'>"
)
with pytest.raises(TypeError, match=msg):
cat > s
with pytest.raises(TypeError, match=msg):
cat_rev > s
with pytest.raises(TypeError, match=msg):
cat > a
with pytest.raises(TypeError, match=msg):
cat_rev > a
with pytest.raises(TypeError, match=msg):
s < cat
with pytest.raises(TypeError, match=msg):
s < cat_rev
with pytest.raises(TypeError, match=msg):
a < cat
with pytest.raises(TypeError, match=msg):
a < cat_rev
@pytest.mark.parametrize(
"ctor",
[
lambda *args, **kwargs: Categorical(*args, **kwargs),
lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),
],
)
def test_unordered_different_order_equal(self, ctor):
# https://github.com/pandas-dev/pandas/issues/16014
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
assert (c1 == c2).all()
c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False)
assert (c1 != c2).all()
c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)
c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)
result = c1 == c2
tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))
def test_unordered_different_categories_raises(self):
c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)
c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)
with pytest.raises(TypeError, match=("Categoricals can only be compared")):
c1 == c2
def test_compare_different_lengths(self):
c1 = Categorical([], categories=["a", "b"])
c2 = Categorical([], categories=["a"])
msg = "Categories are different lengths"
with pytest.raises(TypeError, match=msg):
c1 == c2
def test_compare_unordered_different_order(self):
# https://github.com/pandas-dev/pandas/issues/16603#issuecomment-
# 349290078
a = pd.Categorical(["a"], categories=["a", "b"])
b = pd.Categorical(["b"], categories=["b", "a"])
assert not a.equals(b)
def test_numeric_like_ops(self):
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = ["{0} - {1}".format(i, i + 499) for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
# numeric ops should not succeed
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = r"Series cannot perform the operation {}|unsupported operand".format(
str_rep
)
with pytest.raises(TypeError, match=msg):
getattr(df, op)(df)
# reduction ops should not succeed (unless specifically defined, e.g.
# min/max)
s = df["value_group"]
for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:
msg = "Categorical cannot perform the operation {}".format(op)
with pytest.raises(TypeError, match=msg):
getattr(s, op)(numeric_only=False)
# mad technically works because it takes always the numeric data
# numpy ops
s = Series(Categorical([1, 2, 3, 4]))
with pytest.raises(
TypeError, match="Categorical cannot perform the operation sum"
):
np.sum(s)
# numeric ops on a Series
for op, str_rep in [
("__add__", r"\+"),
("__sub__", "-"),
("__mul__", r"\*"),
("__truediv__", "/"),
]:
msg = r"Series cannot perform the operation {}|unsupported operand".format(
str_rep
)
with pytest.raises(TypeError, match=msg):
getattr(s, op)(2)
# invalid ufunc
msg = "Object with dtype category cannot perform the numpy op log"
with pytest.raises(TypeError, match=msg):
np.log(s)
def test_contains(self):
# GH21508
c = pd.Categorical(list("aabbca"), categories=list("cab"))
assert "b" in c
assert "z" not in c
assert np.nan not in c
with pytest.raises(TypeError, match="unhashable type: 'list'"):
assert [1] in c
# assert codes NOT in index
assert 0 not in c
assert 1 not in c
c = pd.Categorical(list("aabbca") + [np.nan], categories=list("cab"))
assert np.nan in c
@pytest.mark.parametrize(
"item, expected",
[
(pd.Interval(0, 1), True),
(1.5, True),
(pd.Interval(0.5, 1.5), False),
("a", False),
(pd.Timestamp(1), False),
(pd.Timedelta(1), False),
],
ids=str,
)
def test_contains_interval(self, item, expected):
# GH 23705
cat = Categorical(pd.IntervalIndex.from_breaks(range(3)))
result = item in cat
assert result is expected
def test_contains_list(self):
# GH#21729
cat = Categorical([1, 2, 3])
assert "a" not in cat
with pytest.raises(TypeError, match="unhashable type"):
["a"] in cat
with pytest.raises(TypeError, match="unhashable type"):
["a", "b"] in cat
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/treemap/_tiling.py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tiling(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap"
_path_str = "treemap.tiling"
_valid_props = {"flip", "packing", "pad", "squarifyratio"}
# flip
# ----
@property
def flip(self):
"""
Determines if the positions obtained from solver are flipped on
each axis.
The 'flip' property is a flaglist and may be specified
as a string containing:
- Any combination of ['x', 'y'] joined with '+' characters
(e.g. 'x+y')
Returns
-------
Any
"""
return self["flip"]
@flip.setter
def flip(self, val):
self["flip"] = val
# packing
# -------
@property
def packing(self):
"""
Determines d3 treemap solver. For more info please refer to
https://github.com/d3/d3-hierarchy#treemap-tiling
The 'packing' property is an enumeration that may be specified as:
- One of the following enumeration values:
['squarify', 'binary', 'dice', 'slice', 'slice-dice',
'dice-slice']
Returns
-------
Any
"""
return self["packing"]
@packing.setter
def packing(self, val):
self["packing"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the inner padding (in px).
The 'pad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# squarifyratio
# -------------
@property
def squarifyratio(self):
"""
When using "squarify" `packing` algorithm, according to https:/
/github.com/d3/d3-hierarchy/blob/master/README.md#squarify_rati
o this option specifies the desired aspect ratio of the
generated rectangles. The ratio must be specified as a number
greater than or equal to one. Note that the orientation of the
generated rectangles (tall or wide) is not implied by the
ratio; for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is either 2:1 or
1:2. When using "squarify", unlike d3 which uses the Golden
Ratio i.e. 1.618034, Plotly applies 1 to increase squares in
treemap layouts.
The 'squarifyratio' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["squarifyratio"]
@squarifyratio.setter
def squarifyratio(self, val):
self["squarifyratio"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
flip
Determines if the positions obtained from solver are
flipped on each axis.
packing
Determines d3 treemap solver. For more info please
refer to https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm, according to
https://github.com/d3/d3-hierarchy/blob/master/README.m
d#squarify_ratio this option specifies the desired
aspect ratio of the generated rectangles. The ratio
must be specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the ratio;
for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is
either 2:1 or 1:2. When using "squarify", unlike d3
which uses the Golden Ratio i.e. 1.618034, Plotly
applies 1 to increase squares in treemap layouts.
"""
def __init__(
self, arg=None, flip=None, packing=None, pad=None, squarifyratio=None, **kwargs
):
"""
Construct a new Tiling object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.Tiling`
flip
Determines if the positions obtained from solver are
flipped on each axis.
packing
Determines d3 treemap solver. For more info please
refer to https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm, according to
https://github.com/d3/d3-hierarchy/blob/master/README.m
d#squarify_ratio this option specifies the desired
aspect ratio of the generated rectangles. The ratio
must be specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the ratio;
for example, a ratio of two will attempt to produce a
mixture of rectangles whose width:height ratio is
either 2:1 or 1:2. When using "squarify", unlike d3
which uses the Golden Ratio i.e. 1.618034, Plotly
applies 1 to increase squares in treemap layouts.
Returns
-------
Tiling
"""
super(Tiling, self).__init__("tiling")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.Tiling
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.Tiling`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("flip", None)
_v = flip if flip is not None else _v
if _v is not None:
self["flip"] = _v
_v = arg.pop("packing", None)
_v = packing if packing is not None else _v
if _v is not None:
self["packing"] = _v
_v = arg.pop("pad", None)
_v = pad if pad is not None else _v
if _v is not None:
self["pad"] = _v
_v = arg.pop("squarifyratio", None)
_v = squarifyratio if squarifyratio is not None else _v
if _v is not None:
self["squarifyratio"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/frame/test_sort_values_level_as_str.py | <reponame>acrucetta/Chicago_COVI_WebApp
import numpy as np
import pytest
from pandas.errors import PerformanceWarning
from pandas import DataFrame
import pandas._testing as tm
@pytest.fixture
def df_none():
return DataFrame(
{
"outer": ["a", "a", "a", "b", "b", "b"],
"inner": [1, 2, 2, 2, 1, 1],
"A": np.arange(6, 0, -1),
("B", 5): ["one", "one", "two", "two", "one", "one"],
}
)
@pytest.fixture(params=[["outer"], ["outer", "inner"]])
def df_idx(request, df_none):
levels = request.param
return df_none.set_index(levels)
@pytest.fixture(
params=[
"inner", # index level
["outer"], # list of index level
"A", # column
[("B", 5)], # list of column
["inner", "outer"], # two index levels
[("B", 5), "outer"], # index level and column
["A", ("B", 5)], # Two columns
["inner", "outer"], # two index levels and column
]
)
def sort_names(request):
return request.param
@pytest.fixture(params=[True, False])
def ascending(request):
return request.param
def test_sort_index_level_and_column_label(df_none, df_idx, sort_names, ascending):
# GH 14353
# Get index levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on columns and the setting index
expected = df_none.sort_values(
by=sort_names, ascending=ascending, axis=0
).set_index(levels)
# Compute result sorting on mix on columns and index levels
result = df_idx.sort_values(by=sort_names, ascending=ascending, axis=0)
tm.assert_frame_equal(result, expected)
def test_sort_column_level_and_index_label(df_none, df_idx, sort_names, ascending):
# GH 14353
# Get levels from df_idx
levels = df_idx.index.names
# Compute expected by sorting on axis=0, setting index levels, and then
# transposing. For some cases this will result in a frame with
# multiple column levels
expected = (
df_none.sort_values(by=sort_names, ascending=ascending, axis=0)
.set_index(levels)
.T
)
# Compute result by transposing and sorting on axis=1.
result = df_idx.T.sort_values(by=sort_names, ascending=ascending, axis=1)
if len(levels) > 1:
# Accessing multi-level columns that are not lexsorted raises a
# performance warning
with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False):
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/sankey/node/__init__.py | <gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._ysrc import YsrcValidator
from ._y import YValidator
from ._xsrc import XsrcValidator
from ._x import XValidator
from ._thickness import ThicknessValidator
from ._pad import PadValidator
from ._line import LineValidator
from ._labelsrc import LabelsrcValidator
from ._label import LabelValidator
from ._hovertemplatesrc import HovertemplatesrcValidator
from ._hovertemplate import HovertemplateValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfo import HoverinfoValidator
from ._groups import GroupsValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._ysrc.YsrcValidator",
"._y.YValidator",
"._xsrc.XsrcValidator",
"._x.XValidator",
"._thickness.ThicknessValidator",
"._pad.PadValidator",
"._line.LineValidator",
"._labelsrc.LabelsrcValidator",
"._label.LabelValidator",
"._hovertemplatesrc.HovertemplatesrcValidator",
"._hovertemplate.HovertemplateValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfo.HoverinfoValidator",
"._groups.GroupsValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/window/common.py | <reponame>acrucetta/Chicago_COVI_WebApp
from datetime import datetime
import numpy as np
from numpy.random import randn
from pandas import DataFrame, Series, bdate_range, notna
import pandas._testing as tm
N, K = 100, 10
class Base:
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def _create_data(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = bdate_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng, columns=np.arange(K))
# create the data only once as we are not setting it
def _create_consistency_data():
def create_series():
return [
Series(dtype=object),
Series([np.nan]),
Series([np.nan, np.nan]),
Series([3.0]),
Series([np.nan, 3.0]),
Series([3.0, np.nan]),
Series([1.0, 3.0]),
Series([2.0, 2.0]),
Series([3.0, 1.0]),
Series(
[5.0, 5.0, 5.0, 5.0, np.nan, np.nan, np.nan, 5.0, 5.0, np.nan, np.nan]
),
Series(
[
np.nan,
5.0,
5.0,
5.0,
np.nan,
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
]
),
Series(
[
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
np.nan,
5.0,
5.0,
np.nan,
np.nan,
]
),
Series(
[
np.nan,
3.0,
np.nan,
3.0,
4.0,
5.0,
6.0,
np.nan,
np.nan,
7.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
np.nan,
5.0,
np.nan,
2.0,
4.0,
0.0,
9.0,
np.nan,
np.nan,
3.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
2.0,
3.0,
np.nan,
3.0,
4.0,
5.0,
6.0,
np.nan,
np.nan,
7.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(
[
2.0,
5.0,
np.nan,
2.0,
4.0,
0.0,
9.0,
np.nan,
np.nan,
3.0,
12.0,
13.0,
14.0,
15.0,
]
),
Series(range(10)),
Series(range(20, 0, -2)),
]
def create_dataframes():
return [
DataFrame(),
DataFrame(columns=["a"]),
DataFrame(columns=["a", "a"]),
DataFrame(columns=["a", "b"]),
DataFrame(np.arange(10).reshape((5, 2))),
DataFrame(np.arange(25).reshape((5, 5))),
DataFrame(np.arange(25).reshape((5, 5)), columns=["a", "b", 99, "d", "d"]),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel()
return len(set(values[notna(values)])) == 1
def no_nans(x):
return x.notna().all().all()
# data is a tuple(object, is_constant, no_nans)
data = create_series() + create_dataframes()
return [(x, is_constant(x), no_nans(x)) for x in data]
_consistency_data = _create_consistency_data()
class ConsistencyBase(Base):
base_functions = [
(lambda v: Series(v).count(), None, "count"),
(lambda v: Series(v).max(), None, "max"),
(lambda v: Series(v).min(), None, "min"),
(lambda v: Series(v).sum(), None, "sum"),
(lambda v: Series(v).mean(), None, "mean"),
(lambda v: Series(v).std(), 1, "std"),
(lambda v: Series(v).cov(Series(v)), None, "cov"),
(lambda v: Series(v).corr(Series(v)), None, "corr"),
(lambda v: Series(v).var(), 1, "var"),
# restore once GH 8086 is fixed
# lambda v: Series(v).skew(), 3, 'skew'),
# (lambda v: Series(v).kurt(), 4, 'kurt'),
# restore once GH 8084 is fixed
# lambda v: Series(v).quantile(0.3), None, 'quantile'),
(lambda v: Series(v).median(), None, "median"),
(np.nanmax, 1, "max"),
(np.nanmin, 1, "min"),
(np.nansum, 1, "sum"),
(np.nanmean, 1, "mean"),
(lambda v: np.nanstd(v, ddof=1), 1, "std"),
(lambda v: np.nanvar(v, ddof=1), 1, "var"),
(np.nanmedian, 1, "median"),
]
no_nan_functions = [
(np.max, None, "max"),
(np.min, None, "min"),
(np.sum, None, "sum"),
(np.mean, None, "mean"),
(lambda v: np.std(v, ddof=1), 1, "std"),
(lambda v: np.var(v, ddof=1), 1, "var"),
(np.median, None, "median"),
]
def _create_data(self):
super()._create_data()
self.data = _consistency_data
def _test_moments_consistency_mock_mean(self, mean, mock_mean):
for (x, is_constant, no_nans) in self.data:
mean_x = mean(x)
# check that correlation of a series with itself is either 1 or NaN
if mock_mean:
# check that mean equals mock_mean
expected = mock_mean(x)
tm.assert_equal(mean_x, expected.astype("float64"))
def _test_moments_consistency_is_constant(self, min_periods, count, mean, corr):
for (x, is_constant, no_nans) in self.data:
count_x = count(x)
mean_x = mean(x)
# check that correlation of a series with itself is either 1 or NaN
corr_x_x = corr(x, x)
if is_constant:
exp = x.max() if isinstance(x, Series) else x.max().max()
# check mean of constant series
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = exp
tm.assert_equal(mean_x, expected)
# check correlation of constant series with itself is NaN
expected[:] = np.nan
tm.assert_equal(corr_x_x, expected)
def _test_moments_consistency_var_debiasing_factors(
self, var_biased=None, var_unbiased=None, var_debiasing_factors=None
):
for (x, is_constant, no_nans) in self.data:
if var_unbiased and var_biased and var_debiasing_factors:
# check variance debiasing factors
var_unbiased_x = var_unbiased(x)
var_biased_x = var_biased(x)
var_debiasing_factors_x = var_debiasing_factors(x)
tm.assert_equal(var_unbiased_x, var_biased_x * var_debiasing_factors_x)
def _test_moments_consistency(
self,
min_periods,
count,
mean,
corr,
var_unbiased=None,
std_unbiased=None,
cov_unbiased=None,
var_biased=None,
std_biased=None,
cov_biased=None,
):
for (x, is_constant, no_nans) in self.data:
count_x = count(x)
mean_x = mean(x)
for (std, var, cov) in [
(std_biased, var_biased, cov_biased),
(std_unbiased, var_unbiased, cov_unbiased),
]:
# check that var(x), std(x), and cov(x) are all >= 0
var_x = var(x)
std_x = std(x)
assert not (var_x < 0).any().any()
assert not (std_x < 0).any().any()
if cov:
cov_x_x = cov(x, x)
assert not (cov_x_x < 0).any().any()
# check that var(x) == cov(x, x)
tm.assert_equal(var_x, cov_x_x)
# check that var(x) == std(x)^2
tm.assert_equal(var_x, std_x * std_x)
if var is var_biased:
# check that biased var(x) == mean(x^2) - mean(x)^2
mean_x2 = mean(x * x)
tm.assert_equal(var_x, mean_x2 - (mean_x * mean_x))
if is_constant:
# check that variance of constant series is identically 0
assert not (var_x > 0).any().any()
expected = x * np.nan
expected[count_x >= max(min_periods, 1)] = 0.0
if var is var_unbiased:
expected[count_x < 2] = np.nan
tm.assert_equal(var_x, expected)
if isinstance(x, Series):
for (y, is_constant, no_nans) in self.data:
if not x.isna().equals(y.isna()):
# can only easily test two Series with similar
# structure
continue
# check that cor(x, y) is symmetric
corr_x_y = corr(x, y)
corr_y_x = corr(y, x)
tm.assert_equal(corr_x_y, corr_y_x)
if cov:
# check that cov(x, y) is symmetric
cov_x_y = cov(x, y)
cov_y_x = cov(y, x)
tm.assert_equal(cov_x_y, cov_y_x)
# check that cov(x, y) == (var(x+y) - var(x) -
# var(y)) / 2
var_x_plus_y = var(x + y)
var_y = var(y)
tm.assert_equal(
cov_x_y, 0.5 * (var_x_plus_y - var_x - var_y)
)
# check that corr(x, y) == cov(x, y) / (std(x) *
# std(y))
std_y = std(y)
tm.assert_equal(corr_x_y, cov_x_y / (std_x * std_y))
if cov is cov_biased:
# check that biased cov(x, y) == mean(x*y) -
# mean(x)*mean(y)
mean_y = mean(y)
mean_x_times_y = mean(x * y)
tm.assert_equal(
cov_x_y, mean_x_times_y - (mean_x * mean_y)
)
def _check_pairwise_moment(self, dispatch, name, **kwargs):
def get_result(obj, obj2=None):
return getattr(getattr(obj, dispatch)(**kwargs), name)(obj2)
result = get_result(self.frame)
result = result.loc[(slice(None), 1), 5]
result.index = result.index.droplevel(1)
expected = get_result(self.frame[1], self.frame[5])
tm.assert_series_equal(result, expected, check_names=False)
def ew_func(A, B, com, name, **kwargs):
return getattr(A.ewm(com, **kwargs), name)(B)
def check_binary_ew(name, A, B):
result = ew_func(A=A, B=B, com=20, name=name, min_periods=5)
assert np.isnan(result.values[:14]).all()
assert not np.isnan(result.values[14:]).any()
def check_binary_ew_min_periods(name, min_periods, A, B):
# GH 7898
result = ew_func(A, B, 20, name=name, min_periods=min_periods)
# binary functions (ewmcov, ewmcorr) with bias=False require at
# least two values
assert np.isnan(result.values[:11]).all()
assert not np.isnan(result.values[11:]).any()
# check series of length 0
empty = Series([], dtype=np.float64)
result = ew_func(empty, empty, 50, name=name, min_periods=min_periods)
tm.assert_series_equal(result, empty)
# check series of length 1
result = ew_func(
Series([1.0]), Series([1.0]), 50, name=name, min_periods=min_periods
)
tm.assert_series_equal(result, Series([np.NaN]))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/base/test_conversion.py | import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_timedelta64_dtype
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import CategoricalIndex, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
IntervalArray,
PandasArray,
PeriodArray,
SparseArray,
TimedeltaArray,
)
class TestToIterable:
# test that we convert an iterable to python types
dtypes = [
("int8", int),
("int16", int),
("int32", int),
("int64", int),
("uint8", int),
("uint16", int),
("uint32", int),
("uint64", int),
("float16", float),
("float32", float),
("float64", float),
("datetime64[ns]", Timestamp),
("datetime64[ns, US/Eastern]", Timestamp),
("timedelta64[ns]", Timedelta),
]
@pytest.mark.parametrize("dtype, rdtype", dtypes)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable(self, index_or_series, method, dtype, rdtype):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype, obj",
[
("object", object, "a"),
("object", int, 1),
("category", object, "a"),
("category", int, 1),
],
)
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_iterable_object_and_category(
self, index_or_series, method, dtype, rdtype, obj
):
# gh-10904
# gh-13258
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([obj], dtype=dtype)
result = method(s)[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize("dtype, rdtype", dtypes)
def test_iterable_items(self, dtype, rdtype):
# gh-13258
# test if items yields the correct boxed scalars
# this only applies to series
s = Series([1], dtype=dtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
_, result = list(s.items())[0]
assert isinstance(result, rdtype)
@pytest.mark.parametrize(
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
# TODO(GH-24559): Remove the filterwarnings
def test_iterable_map(self, index_or_series, dtype, rdtype):
# gh-13236
# coerce iteration to underlying python / pandas types
typ = index_or_series
s = typ([1], dtype=dtype)
result = s.map(type)[0]
if not isinstance(rdtype, tuple):
rdtype = tuple([rdtype])
assert result in rdtype
@pytest.mark.parametrize(
"method",
[
lambda x: x.tolist(),
lambda x: x.to_list(),
lambda x: list(x),
lambda x: list(x.__iter__()),
],
ids=["tolist", "to_list", "list", "iter"],
)
def test_categorial_datetimelike(self, method):
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
result = method(i)[0]
assert isinstance(result, Timestamp)
def test_iter_box(self):
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
s = Series(vals)
assert s.dtype == "datetime64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz is None
assert res == exp
vals = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
]
s = Series(vals)
assert s.dtype == "datetime64[ns, US/Eastern]"
for res, exp in zip(s, vals):
assert isinstance(res, Timestamp)
assert res.tz == exp.tz
assert res == exp
# timedelta
vals = [Timedelta("1 days"), Timedelta("2 days")]
s = Series(vals)
assert s.dtype == "timedelta64[ns]"
for res, exp in zip(s, vals):
assert isinstance(res, Timedelta)
assert res == exp
# period
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
s = Series(vals)
assert s.dtype == "Period[M]"
for res, exp in zip(s, vals):
assert isinstance(res, pd.Period)
assert res.freq == "M"
assert res == exp
@pytest.mark.parametrize(
"array, expected_type, dtype",
[
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
(np.array(["a", "b"]), np.ndarray, "object"),
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
(
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
DatetimeArray,
"datetime64[ns, US/Central]",
),
(
pd.PeriodIndex([2018, 2019], freq="A"),
PeriodArray,
pd.core.dtypes.dtypes.PeriodDtype("A-DEC"),
),
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval",),
# This test is currently failing for datetime64[ns] and timedelta64[ns].
# The NumPy type system is sufficient for representing these types, so
# we just use NumPy for Series / DataFrame columns of these types (so
# we get consolidation and so on).
# However, DatetimeIndex and TimedeltaIndex use the DateLikeArray
# abstraction to for code reuse.
# At the moment, we've judged that allowing this test to fail is more
# practical that overriding Series._values to special case
# Series[M8[ns]] and Series[m8[ns]] to return a DateLikeArray.
pytest.param(
pd.DatetimeIndex(["2017", "2018"]),
np.ndarray,
"datetime64[ns]",
marks=[pytest.mark.xfail(reason="datetime _values", strict=True)],
),
pytest.param(
pd.TimedeltaIndex([10 ** 10]),
np.ndarray,
"m8[ns]",
marks=[pytest.mark.xfail(reason="timedelta _values", strict=True)],
),
],
)
def test_values_consistent(array, expected_type, dtype):
l_values = pd.Series(array)._values
r_values = pd.Index(array)._values
assert type(l_values) is expected_type
assert type(l_values) is type(r_values)
tm.assert_equal(l_values, r_values)
@pytest.mark.parametrize(
"array, expected",
[
(np.array([0, 1], dtype=np.int64), np.array([0, 1], dtype=np.int64)),
(np.array(["0", "1"]), np.array(["0", "1"], dtype=object)),
(pd.Categorical(["a", "a"]), np.array([0, 0], dtype="int8")),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"]),
np.array(["2017-01-01T00:00:00"], dtype="M8[ns]"),
),
(
pd.DatetimeIndex(["2017-01-01T00:00:00"], tz="US/Eastern"),
np.array(["2017-01-01T05:00:00"], dtype="M8[ns]"),
),
(pd.TimedeltaIndex([10 ** 10]), np.array([10 ** 10], dtype="m8[ns]")),
(
pd.PeriodIndex(["2017", "2018"], freq="D"),
np.array([17167, 17532], dtype=np.int64),
),
],
)
def test_ndarray_values(array, expected):
l_values = pd.Series(array)._ndarray_values
r_values = pd.Index(array)._ndarray_values
tm.assert_numpy_array_equal(l_values, r_values)
tm.assert_numpy_array_equal(l_values, expected)
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
def test_numpy_array(arr):
ser = pd.Series(arr)
result = ser.array
expected = PandasArray(arr)
tm.assert_extension_array_equal(result, expected)
def test_numpy_array_all_dtypes(any_numpy_dtype):
ser = pd.Series(dtype=any_numpy_dtype)
result = ser.array
if is_datetime64_dtype(any_numpy_dtype):
assert isinstance(result, DatetimeArray)
elif is_timedelta64_dtype(any_numpy_dtype):
assert isinstance(result, TimedeltaArray)
else:
assert isinstance(result, PandasArray)
@pytest.mark.parametrize(
"array, attr",
[
(pd.Categorical(["a", "b"]), "_codes"),
(pd.core.arrays.period_array(["2000", "2001"], freq="D"), "_data"),
(pd.core.arrays.integer_array([0, np.nan]), "_data"),
(IntervalArray.from_breaks([0, 1]), "_left"),
(SparseArray([0, 1]), "_sparse_values"),
(DatetimeArray(np.array([1, 2], dtype="datetime64[ns]")), "_data"),
# tz-aware Datetime
(
DatetimeArray(
np.array(
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
"_data",
),
],
)
def test_array(array, attr, index_or_series):
box = index_or_series
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = box(array, copy=False).array
if attr:
array = getattr(array, attr)
result = getattr(result, attr)
assert result is array
def test_array_multiindex_raises():
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
with pytest.raises(ValueError, match="MultiIndex"):
idx.array
@pytest.mark.parametrize(
"array, expected",
[
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
(
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
),
(
pd.core.arrays.integer_array([0, np.nan]),
np.array([0, pd.NA], dtype=object),
),
(
IntervalArray.from_breaks([0, 1, 2]),
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
),
(SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),
# tz-naive datetime
(
DatetimeArray(np.array(["2000", "2001"], dtype="M8[ns]")),
np.array(["2000", "2001"], dtype="M8[ns]"),
),
# tz-aware stays tz`-aware
(
DatetimeArray(
np.array(
["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]"
),
dtype=DatetimeTZDtype(tz="US/Central"),
),
np.array(
[
pd.Timestamp("2000-01-01", tz="US/Central"),
pd.Timestamp("2000-01-02", tz="US/Central"),
]
),
),
# Timedelta
(
TimedeltaArray(np.array([0, 3600000000000], dtype="i8"), freq="H"),
np.array([0, 3600000000000], dtype="m8[ns]"),
),
],
)
def test_to_numpy(array, expected, index_or_series):
box = index_or_series
thing = box(array)
if array.dtype.name in ("Int64", "Sparse[int64, 0]") and box is pd.Index:
pytest.skip(f"No index type for {array.dtype}")
result = thing.to_numpy()
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("as_series", [True, False])
@pytest.mark.parametrize(
"arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)]
)
def test_to_numpy_copy(arr, as_series):
obj = pd.Index(arr, copy=False)
if as_series:
obj = pd.Series(obj.values, copy=False)
# no copy by default
result = obj.to_numpy()
assert np.shares_memory(arr, result) is True
result = obj.to_numpy(copy=False)
assert np.shares_memory(arr, result) is True
# copy=True
result = obj.to_numpy(copy=True)
assert np.shares_memory(arr, result) is False
@pytest.mark.parametrize("as_series", [True, False])
def test_to_numpy_dtype(as_series):
tz = "US/Eastern"
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
if as_series:
obj = pd.Series(obj)
# preserve tz by default
result = obj.to_numpy()
expected = np.array(
[pd.Timestamp("2000", tz=tz), pd.Timestamp("2001", tz=tz)], dtype=object
)
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="object")
tm.assert_numpy_array_equal(result, expected)
result = obj.to_numpy(dtype="M8[ns]")
expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"values, dtype, na_value, expected",
[
([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]),
(
[pd.Timestamp("2000"), pd.Timestamp("2000"), pd.NaT],
None,
pd.Timestamp("2000"),
[np.datetime64("2000-01-01T00:00:00.000000000")] * 3,
),
],
)
@pytest.mark.parametrize("container", [pd.Series, pd.Index]) # type: ignore
def test_to_numpy_na_value_numpy_dtype(container, values, dtype, na_value, expected):
s = container(values)
result = s.to_numpy(dtype=dtype, na_value=na_value)
expected = np.array(expected)
tm.assert_numpy_array_equal(result, expected)
def test_to_numpy_kwargs_raises():
# numpy
s = pd.Series([1, 2, 3])
match = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=match):
s.to_numpy(foo=True)
# extension
s = pd.Series([1, 2, 3], dtype="Int64")
with pytest.raises(TypeError, match=match):
s.to_numpy(foo=True)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_shapedefaults.py | import _plotly_utils.basevalidators
class ShapedefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="shapedefaults", parent_name="layout", **kwargs):
super(ShapedefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Shape"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/scatter/marker/colorbar/_title.py | <filename>env/lib/python3.8/site-packages/plotly/validators/scatter/marker/colorbar/_title.py<gh_stars>1000+
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name="title", parent_name="scatter.marker.colorbar", **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/_asarray.py | <gh_stars>1000+
"""
Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
from __future__ import division, absolute_import, print_function
from .overrides import set_module
from .multiarray import array
__all__ = [
"asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require",
]
@set_module('numpy')
def asarray(a, dtype=None, order=None):
"""Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray with matching dtype and order. If `a` is a
subclass of ndarray, a base class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.recarray, np.ndarray)
True
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
@set_module('numpy')
def asanyarray(a, dtype=None, order=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or column-major
(Fortran-style) memory representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
@set_module('numpy')
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array (ndim >= 1) in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
Note: This function returns an array with at least one-dimension (1-d)
so it will not preserve 0-d arrays.
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
@set_module('numpy')
def asfortranarray(a, dtype=None):
"""
Return an array (ndim >= 1) laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
Note: This function returns an array with at least one-dimension (1-d)
so it will not preserve 0-d arrays.
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
@set_module('numpy')
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type. If None preserve the current dtype. If your
application requires the data to be in native byteorder, include
a byteorder specification as a part of the dtype specification.
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
Returns
-------
out : ndarray
Array with specified requirements and type if given.
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
'A': 'A', 'ALIGNED': 'A',
'W': 'W', 'WRITEABLE': 'W',
'O': 'O', 'OWNDATA': 'O',
'E': 'E', 'ENSUREARRAY': 'E'}
if not requirements:
return asanyarray(a, dtype=dtype)
else:
requirements = {possible_flags[x.upper()] for x in requirements}
if 'E' in requirements:
requirements.remove('E')
subok = False
else:
subok = True
order = 'A'
if requirements >= {'C', 'F'}:
raise ValueError('Cannot specify both "C" and "F" order')
elif 'F' in requirements:
order = 'F'
requirements.remove('F')
elif 'C' in requirements:
order = 'C'
requirements.remove('C')
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(order)
break
return arr
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/splom/_dimensions.py | import _plotly_utils.basevalidators
class DimensionsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="dimensions", parent_name="splom", **kwargs):
super(DimensionsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Dimension"),
data_docs=kwargs.pop(
"data_docs",
"""
axis
:class:`plotly.graph_objects.splom.dimension.Ax
is` instance or dict with compatible properties
label
Sets the label corresponding to this splom
dimension.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the dimension values to be plotted.
valuessrc
Sets the source reference on Chart Studio Cloud
for values .
visible
Determines whether or not this dimension is
shown on the graph. Note that even visible
false dimension contribute to the default grid
generate by this splom trace.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_sankey.py | import _plotly_utils.basevalidators
class SankeyValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="sankey", parent_name="layout.template.data", **kwargs
):
super(SankeyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Sankey"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/legend/_traceorder.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>10-100
import _plotly_utils.basevalidators
class TraceorderValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="traceorder", parent_name="layout.legend", **kwargs):
super(TraceorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "legend"),
extras=kwargs.pop("extras", ["normal"]),
flags=kwargs.pop("flags", ["reversed", "grouped"]),
role=kwargs.pop("role", "style"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/sankey/link/__init__.py | import sys
if sys.version_info < (3, 7):
from ._valuesrc import ValuesrcValidator
from ._value import ValueValidator
from ._targetsrc import TargetsrcValidator
from ._target import TargetValidator
from ._sourcesrc import SourcesrcValidator
from ._source import SourceValidator
from ._line import LineValidator
from ._labelsrc import LabelsrcValidator
from ._label import LabelValidator
from ._hovertemplatesrc import HovertemplatesrcValidator
from ._hovertemplate import HovertemplateValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfo import HoverinfoValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._colorsrc import ColorsrcValidator
from ._colorscaledefaults import ColorscaledefaultsValidator
from ._colorscales import ColorscalesValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._valuesrc.ValuesrcValidator",
"._value.ValueValidator",
"._targetsrc.TargetsrcValidator",
"._target.TargetValidator",
"._sourcesrc.SourcesrcValidator",
"._source.SourceValidator",
"._line.LineValidator",
"._labelsrc.LabelsrcValidator",
"._label.LabelValidator",
"._hovertemplatesrc.HovertemplatesrcValidator",
"._hovertemplate.HovertemplateValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfo.HoverinfoValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._colorsrc.ColorsrcValidator",
"._colorscaledefaults.ColorscaledefaultsValidator",
"._colorscales.ColorscalesValidator",
"._color.ColorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/_plotly_utils/colors/sequential.py | """
Sequential color scales are appropriate for most continuous data, but in some cases it \
can be helpful to use a `plotly.colors.diverging` or \
`plotly.colors.cyclical` scale instead. The color scales in this module are \
mostly meant to be passed in as the `color_continuous_scale` argument to various functions.
"""
from ._swatches import _swatches
def swatches(template=None):
return _swatches(__name__, globals(), template)
swatches.__doc__ = _swatches.__doc__
Plotly3 = [
"#0508b8",
"#1910d8",
"#3c19f0",
"#6b1cfb",
"#981cfd",
"#bf1cfd",
"#dd2bfd",
"#f246fe",
"#fc67fd",
"#fe88fc",
"#fea5fd",
"#febefe",
"#fec3fe",
]
Viridis = [
"#440154",
"#482878",
"#3e4989",
"#31688e",
"#26828e",
"#1f9e89",
"#35b779",
"#6ece58",
"#b5de2b",
"#fde725",
]
Cividis = [
"#00224e",
"#123570",
"#3b496c",
"#575d6d",
"#707173",
"#8a8678",
"#a59c74",
"#c3b369",
"#e1cc55",
"#fee838",
]
Inferno = [
"#000004",
"#1b0c41",
"#4a0c6b",
"#781c6d",
"#a52c60",
"#cf4446",
"#ed6925",
"#fb9b06",
"#f7d13d",
"#fcffa4",
]
Magma = [
"#000004",
"#180f3d",
"#440f76",
"#721f81",
"#9e2f7f",
"#cd4071",
"#f1605d",
"#fd9668",
"#feca8d",
"#fcfdbf",
]
Plasma = [
"#0d0887",
"#46039f",
"#7201a8",
"#9c179e",
"#bd3786",
"#d8576b",
"#ed7953",
"#fb9f3a",
"#fdca26",
"#f0f921",
]
from .plotlyjs import Blackbody, Bluered, Electric, Hot, Jet, Rainbow # noqa: F401
from .colorbrewer import ( # noqa: F401
Blues,
BuGn,
BuPu,
GnBu,
Greens,
Greys,
OrRd,
Oranges,
PuBu,
PuBuGn,
PuRd,
Purples,
RdBu,
RdPu,
Reds,
YlGn,
YlGnBu,
YlOrBr,
YlOrRd,
)
from .cmocean import ( # noqa: F401
turbid,
thermal,
haline,
solar,
ice,
gray,
deep,
dense,
algae,
matter,
speed,
amp,
tempo,
)
from .carto import ( # noqa: F401
Burg,
Burgyl,
Redor,
Oryel,
Peach,
Pinkyl,
Mint,
Blugrn,
Darkmint,
Emrld,
Aggrnyl,
Bluyl,
Teal,
Tealgrn,
Purp,
Purpor,
Sunset,
Magenta,
Sunsetdark,
Agsunset,
Brwnyl,
)
# Prefix variable names with _ so that they will not be added to the swatches
_contents = dict(globals())
for _k, _cols in _contents.items():
if _k.startswith("_") or _k == "swatches" or _k.endswith("_r"):
continue
globals()[_k + "_r"] = _cols[::-1]
__all__ = ["swatches"]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/layer/symbol/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/layer/symbol/__init__.py
import sys
if sys.version_info < (3, 7):
from ._textposition import TextpositionValidator
from ._textfont import TextfontValidator
from ._text import TextValidator
from ._placement import PlacementValidator
from ._iconsize import IconsizeValidator
from ._icon import IconValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._textposition.TextpositionValidator",
"._textfont.TextfontValidator",
"._text.TextValidator",
"._placement.PlacementValidator",
"._iconsize.IconsizeValidator",
"._icon.IconValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/yaxis/_rangebreakdefaults.py | import _plotly_utils.basevalidators
class RangebreakdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="rangebreakdefaults", parent_name="layout.yaxis", **kwargs
):
super(RangebreakdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/config.py | from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("config")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pip/_vendor/certifi/__init__.py | from .core import where
__version__ = "2019.06.16"
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/streamtube/starts/__init__.py | import sys
if sys.version_info < (3, 7):
from ._zsrc import ZsrcValidator
from ._z import ZValidator
from ._ysrc import YsrcValidator
from ._y import YValidator
from ._xsrc import XsrcValidator
from ._x import XValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zsrc.ZsrcValidator",
"._z.ZValidator",
"._ysrc.YsrcValidator",
"._y.YValidator",
"._xsrc.XsrcValidator",
"._x.XValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/groupby/categorical.py | import numpy as np
from pandas.core.algorithms import unique1d
from pandas.core.arrays.categorical import (
Categorical,
CategoricalDtype,
recode_for_categories,
)
def recode_for_groupby(c: Categorical, sort: bool, observed: bool):
"""
Code the categories to ensure we can groupby for categoricals.
If observed=True, we return a new Categorical with the observed
categories only.
If sort=False, return a copy of self, coded with categories as
returned by .unique(), followed by any categories not appearing in
the data. If sort=True, return self.
This method is needed solely to ensure the categorical index of the
GroupBy result has categories in the order of appearance in the data
(GH-8868).
Parameters
----------
c : Categorical
sort : boolean
The value of the sort parameter groupby was called with.
observed : boolean
Account only for the observed values
Returns
-------
New Categorical
If sort=False, the new categories are set to the order of
appearance in codes (unless ordered=True, in which case the
original order is preserved), followed by any unrepresented
categories in the original order.
Categorical or None
If we are observed, return the original categorical, otherwise None
"""
# we only care about observed values
if observed:
unique_codes = unique1d(c.codes)
take_codes = unique_codes[unique_codes != -1]
if c.ordered:
take_codes = np.sort(take_codes)
# we recode according to the uniques
categories = c.categories.take(take_codes)
codes = recode_for_categories(c.codes, c.categories, categories)
# return a new categorical that maps our new codes
# and categories
dtype = CategoricalDtype(categories, ordered=c.ordered)
return Categorical(codes, dtype=dtype, fastpath=True), c
# Already sorted according to c.categories; all is fine
if sort:
return c, None
# sort=False should order groups in as-encountered order (GH-8868)
cat = c.unique()
# But for groupby to work, all categories should be present,
# including those missing from the data (GH-13179), which .unique()
# above dropped
cat = cat.add_categories(c.categories[~c.categories.isin(cat.categories)])
return c.reorder_categories(cat.categories), None
def recode_from_groupby(c: Categorical, sort: bool, ci):
"""
Reverse the codes_to_groupby to account for sort / observed.
Parameters
----------
c : Categorical
sort : boolean
The value of the sort parameter groupby was called with.
ci : CategoricalIndex
The codes / categories to recode
Returns
-------
CategoricalIndex
"""
# we re-order to the original category orderings
if sort:
return ci.set_categories(c.categories)
# we are not sorting, so add unobserved to the end
return ci.add_categories(c.categories[~c.categories.isin(ci.categories)])
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pip/_internal/utils/setuptools_build.py | import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List
# Shim to wrap setup.py invocation with setuptools
#
# We set sys.argv[0] to the path to the underlying setup.py file so
# setuptools / distutils don't take the path to the setup.py to be "-c" when
# invoking via the shim. This avoids e.g. the following manifest_maker
# warning: "warning: manifest_maker: standard file '-c' not found".
_SETUPTOOLS_SHIM = (
"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};"
"f=getattr(tokenize, 'open', open)(__file__);"
"code=f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
def make_setuptools_shim_args(setup_py_path, unbuffered_output=False):
# type: (str, bool) -> List[str]
"""
Get setuptools command arguments with shim wrapped setup file invocation.
:param setup_py_path: The path to setup.py to be wrapped.
:param unbuffered_output: If True, adds the unbuffered switch to the
argument list.
"""
args = [sys.executable]
if unbuffered_output:
args.append('-u')
args.extend(['-c', _SETUPTOOLS_SHIM.format(setup_py_path)])
return args
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_streamtube.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
from plotly.graph_objs import Streamtube
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/pie/marker/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._line import LineValidator
from ._colorssrc import ColorssrcValidator
from ._colors import ColorsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._line.LineValidator",
"._colorssrc.ColorssrcValidator",
"._colors.ColorsValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/polynomial/polyutils.py | """
Utility classes and functions for the polynomial modules.
This module provides: error and warning objects; a polynomial base class;
and some routines used in both the `polynomial` and `chebyshev` modules.
Error objects
-------------
.. autosummary::
:toctree: generated/
PolyError base class for this sub-package's errors.
PolyDomainError raised when domains are mismatched.
Warning objects
---------------
.. autosummary::
:toctree: generated/
RankWarning raised in least-squares fit for rank-deficient matrix.
Base class
----------
.. autosummary::
:toctree: generated/
PolyBase Obsolete base class for the polynomial classes. Do not use.
Functions
---------
.. autosummary::
:toctree: generated/
as_series convert list of array_likes into 1-D arrays of common type.
trimseq remove trailing zeros.
trimcoef remove small trailing coefficients.
getdomain return the domain appropriate for a given set of abscissae.
mapdomain maps points between domains.
mapparms parameters of the linear map between domains.
"""
from __future__ import division, absolute_import, print_function
import operator
import functools
import warnings
import numpy as np
__all__ = [
'RankWarning', 'PolyError', 'PolyDomainError', 'as_series', 'trimseq',
'trimcoef', 'getdomain', 'mapdomain', 'mapparms', 'PolyBase']
#
# Warnings and Exceptions
#
class RankWarning(UserWarning):
"""Issued by chebfit when the design matrix is rank deficient."""
pass
class PolyError(Exception):
"""Base class for errors in this module."""
pass
class PolyDomainError(PolyError):
"""Issued by the generic Poly class when two domains don't match.
This is raised when an binary operation is passed Poly objects with
different domains.
"""
pass
#
# Base class for all polynomial types
#
class PolyBase(object):
"""
Base class for all polynomial types.
Deprecated in numpy 1.9.0, use the abstract
ABCPolyBase class instead. Note that the latter
requires a number of virtual functions to be
implemented.
"""
pass
#
# Helper functions to convert inputs to 1-D arrays
#
def trimseq(seq):
"""Remove small Poly series coefficients.
Parameters
----------
seq : sequence
Sequence of Poly series coefficients. This routine fails for
empty sequences.
Returns
-------
series : sequence
Subsequence with trailing zeros removed. If the resulting sequence
would be empty, return the first element. The returned sequence may
or may not be a view.
Notes
-----
Do not lose the type info if the sequence contains unknown objects.
"""
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
def as_series(alist, trim=True):
"""
Return argument as a list of 1-d arrays.
The returned list contains array(s) of dtype double, complex double, or
object. A 1-d argument of shape ``(N,)`` is parsed into ``N`` arrays of
size one; a 2-d argument of shape ``(M,N)`` is parsed into ``M`` arrays
of size ``N`` (i.e., is "parsed by row"); and a higher dimensional array
raises a Value Error if it is not first reshaped into either a 1-d or 2-d
array.
Parameters
----------
alist : array_like
A 1- or 2-d array_like
trim : boolean, optional
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1-D arrays
A copy of the input data as a list of 1-d arrays.
Raises
------
ValueError
Raised when `as_series` cannot convert its input to 1-d arrays, or at
least one of the resulting arrays is empty.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> a = np.arange(4)
>>> pu.as_series(a)
[array([0.]), array([1.]), array([2.]), array([3.])]
>>> b = np.arange(6).reshape((2,3))
>>> pu.as_series(b)
[array([0., 1., 2.]), array([3., 4., 5.])]
>>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
[array([1.]), array([0., 1., 2.]), array([0., 1.])]
>>> pu.as_series([2, [1.1, 0.]])
[array([2.]), array([1.1])]
>>> pu.as_series([2, [1.1, 0.]], trim=False)
[array([2.]), array([1.1, 0. ])]
"""
arrays = [np.array(a, ndmin=1, copy=False) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except Exception:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=True, dtype=dtype) for a in arrays]
return ret
def trimcoef(c, tol=0):
"""
Remove "small" "trailing" coefficients from a polynomial.
"Small" means "small in absolute value" and is controlled by the
parameter `tol`; "trailing" means highest order coefficient(s), e.g., in
``[0, 1, 1, 0, 0]`` (which represents ``0 + x + x**2 + 0*x**3 + 0*x**4``)
both the 3-rd and 4-th order coefficients would be "trimmed."
Parameters
----------
c : array_like
1-d array of coefficients, ordered from lowest order to highest.
tol : number, optional
Trailing (i.e., highest order) elements with absolute value less
than or equal to `tol` (default value is zero) are removed.
Returns
-------
trimmed : ndarray
1-d array with trailing zeros removed. If the resulting series
would be empty, a series containing a single zero is returned.
Raises
------
ValueError
If `tol` < 0
See Also
--------
trimseq
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
array([0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
array([0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
array([0.0003+0.j , 0.001 -0.001j])
"""
if tol < 0:
raise ValueError("tol must be non-negative")
[c] = as_series([c])
[ind] = np.nonzero(np.abs(c) > tol)
if len(ind) == 0:
return c[:1]*0
else:
return c[:ind[-1] + 1].copy()
def getdomain(x):
"""
Return a domain suitable for given abscissae.
Find a domain suitable for a polynomial or Chebyshev series
defined at the values supplied.
Parameters
----------
x : array_like
1-d array of abscissae whose domain will be determined.
Returns
-------
domain : ndarray
1-d array containing two values. If the inputs are complex, then
the two returned points are the lower left and upper right corners
of the smallest rectangle (aligned with the axes) in the complex
plane containing the points `x`. If the inputs are real, then the
two points are the ends of the smallest interval containing the
points `x`.
See Also
--------
mapparms, mapdomain
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> points = np.arange(4)**2 - 5; points
array([-5, -4, -1, 4])
>>> pu.getdomain(points)
array([-5., 4.])
>>> c = np.exp(complex(0,1)*np.pi*np.arange(12)/6) # unit circle
>>> pu.getdomain(c)
array([-1.-1.j, 1.+1.j])
"""
[x] = as_series([x], trim=False)
if x.dtype.char in np.typecodes['Complex']:
rmin, rmax = x.real.min(), x.real.max()
imin, imax = x.imag.min(), x.imag.max()
return np.array((complex(rmin, imin), complex(rmax, imax)))
else:
return np.array((x.min(), x.max()))
def mapparms(old, new):
"""
Linear map parameters between domains.
Return the parameters of the linear map ``offset + scale*x`` that maps
`old` to `new` such that ``old[i] -> new[i]``, ``i = 0, 1``.
Parameters
----------
old, new : array_like
Domains. Each domain must (successfully) convert to a 1-d array
containing precisely two values.
Returns
-------
offset, scale : scalars
The map ``L(x) = offset + scale*x`` maps the first domain to the
second.
See Also
--------
getdomain, mapdomain
Notes
-----
Also works for complex numbers, and thus can be used to calculate the
parameters required to map any line in the complex plane to any other
line therein.
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> pu.mapparms((1,-1),(-1,1))
(-0.0, -1.0)
>>> i = complex(0,1)
>>> pu.mapparms((-i,-1),(1,i))
((1+1j), (1-0j))
"""
oldlen = old[1] - old[0]
newlen = new[1] - new[0]
off = (old[1]*new[0] - old[0]*new[1])/oldlen
scl = newlen/oldlen
return off, scl
def mapdomain(x, old, new):
"""
Apply linear map to input points.
The linear map ``offset + scale*x`` that maps the domain `old` to
the domain `new` is applied to the points `x`.
Parameters
----------
x : array_like
Points to be mapped. If `x` is a subtype of ndarray the subtype
will be preserved.
old, new : array_like
The two domains that determine the map. Each must (successfully)
convert to 1-d arrays containing precisely two values.
Returns
-------
x_out : ndarray
Array of points of the same shape as `x`, after application of the
linear map between the two domains.
See Also
--------
getdomain, mapparms
Notes
-----
Effectively, this implements:
.. math ::
x\\_out = new[0] + m(x - old[0])
where
.. math ::
m = \\frac{new[1]-new[0]}{old[1]-old[0]}
Examples
--------
>>> from numpy.polynomial import polyutils as pu
>>> old_domain = (-1,1)
>>> new_domain = (0,2*np.pi)
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary
6.28318531])
>>> x - pu.mapdomain(x_out, new_domain, old_domain)
array([0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
>>> i = complex(0,1)
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ])
>>> new_z = pu.mapdomain(z, old, new); new_z
array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
def _nth_slice(i, ndim):
sl = [np.newaxis] * ndim
sl[i] = slice(None)
return tuple(sl)
def _vander_nd(vander_fs, points, degrees):
r"""
A generalization of the Vandermonde matrix for N dimensions
The result is built by combining the results of 1d Vandermonde matrices,
.. math::
W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{V_k(x_k)[i_0, \ldots, i_M, j_k]}
where
.. math::
N &= \texttt{len(points)} = \texttt{len(degrees)} = \texttt{len(vander\_fs)} \\
M &= \texttt{points[k].ndim} \\
V_k &= \texttt{vander\_fs[k]} \\
x_k &= \texttt{points[k]} \\
0 \le j_k &\le \texttt{degrees[k]}
Expanding the one-dimensional :math:`V_k` functions gives:
.. math::
W[i_0, \ldots, i_M, j_0, \ldots, j_N] = \prod_{k=0}^N{B_{k, j_k}(x_k[i_0, \ldots, i_M])}
where :math:`B_{k,m}` is the m'th basis of the polynomial construction used along
dimension :math:`k`. For a regular polynomial, :math:`B_{k, m}(x) = P_m(x) = x^m`.
Parameters
----------
vander_fs : Sequence[function(array_like, int) -> ndarray]
The 1d vander function to use for each axis, such as ``polyvander``
points : Sequence[array_like]
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
This must be the same length as `vander_fs`.
degrees : Sequence[int]
The maximum degree (inclusive) to use for each axis.
This must be the same length as `vander_fs`.
Returns
-------
vander_nd : ndarray
An array of shape ``points[0].shape + tuple(d + 1 for d in degrees)``.
"""
n_dims = len(vander_fs)
if n_dims != len(points):
raise ValueError(
"Expected {} dimensions of sample points, got {}".format(n_dims, len(points)))
if n_dims != len(degrees):
raise ValueError(
"Expected {} dimensions of degrees, got {}".format(n_dims, len(degrees)))
if n_dims == 0:
raise ValueError("Unable to guess a dtype or shape when no points are given")
# convert to the same shape and type
points = tuple(np.array(tuple(points), copy=False) + 0.0)
# produce the vandermonde matrix for each dimension, placing the last
# axis of each in an independent trailing axis of the output
vander_arrays = (
vander_fs[i](points[i], degrees[i])[(...,) + _nth_slice(i, n_dims)]
for i in range(n_dims)
)
# we checked this wasn't empty already, so no `initial` needed
return functools.reduce(operator.mul, vander_arrays)
def _vander_nd_flat(vander_fs, points, degrees):
"""
Like `_vander_nd`, but flattens the last ``len(degrees)`` axes into a single axis
Used to implement the public ``<type>vander<n>d`` functions.
"""
v = _vander_nd(vander_fs, points, degrees)
return v.reshape(v.shape[:-len(degrees)] + (-1,))
def _fromroots(line_f, mul_f, roots):
"""
Helper function used to implement the ``<type>fromroots`` functions.
Parameters
----------
line_f : function(float, float) -> ndarray
The ``<type>line`` function, such as ``polyline``
mul_f : function(array_like, array_like) -> ndarray
The ``<type>mul`` function, such as ``polymul``
roots :
See the ``<type>fromroots`` functions for more detail
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = as_series([roots], trim=False)
roots.sort()
p = [line_f(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [mul_f(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = mul_f(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def _valnd(val_f, c, *args):
"""
Helper function used to implement the ``<type>val<n>d`` functions.
Parameters
----------
val_f : function(array_like, array_like, tensor: bool) -> array_like
The ``<type>val`` function, such as ``polyval``
c, args :
See the ``<type>val<n>d`` functions for more detail
"""
try:
args = tuple(np.array(args, copy=False))
except Exception:
# preserve the old error message
if len(args) == 2:
raise ValueError('x, y, z are incompatible')
elif len(args) == 3:
raise ValueError('x, y are incompatible')
else:
raise ValueError('ordinates are incompatible')
it = iter(args)
x0 = next(it)
# use tensor on only the first
c = val_f(x0, c)
for xi in it:
c = val_f(xi, c, tensor=False)
return c
def _gridnd(val_f, c, *args):
"""
Helper function used to implement the ``<type>grid<n>d`` functions.
Parameters
----------
val_f : function(array_like, array_like, tensor: bool) -> array_like
The ``<type>val`` function, such as ``polyval``
c, args :
See the ``<type>grid<n>d`` functions for more detail
"""
for xi in args:
c = val_f(xi, c)
return c
def _div(mul_f, c1, c2):
"""
Helper function used to implement the ``<type>div`` functions.
Implementation uses repeated subtraction of c2 multiplied by the nth basis.
For some polynomial types, a more efficient approach may be possible.
Parameters
----------
mul_f : function(array_like, array_like) -> array_like
The ``<type>mul`` function, such as ``polymul``
c1, c2 :
See the ``<type>div`` functions for more detail
"""
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = mul_f([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, trimseq(rem)
def _add(c1, c2):
""" Helper function used to implement the ``<type>add`` functions. """
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return trimseq(ret)
def _sub(c1, c2):
""" Helper function used to implement the ``<type>sub`` functions. """
# c1, c2 are trimmed copies
[c1, c2] = as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return trimseq(ret)
def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None):
"""
Helper function used to implement the ``<type>fit`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
c1, c2 :
See the ``<type>fit`` functions for more detail
"""
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
deg = np.asarray(deg)
# check arguments.
if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
raise TypeError("deg must be an int or non-empty 1-D array of int")
if deg.min() < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
if deg.ndim == 0:
lmax = deg
order = lmax + 1
van = vander_f(x, lmax)
else:
deg = np.sort(deg)
lmax = deg[-1]
order = len(deg)
van = vander_f(x, lmax)[:, deg]
# set up the least squares matrices in transposed form
lhs = van.T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# Expand c to include non-fitted coefficients which are set to zero
if deg.ndim > 0:
if c.ndim == 2:
cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
else:
cc = np.zeros(lmax+1, dtype=c.dtype)
cc[deg] = c
c = cc
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def _pow(mul_f, c, pow, maxpower):
"""
Helper function used to implement the ``<type>pow`` functions.
Parameters
----------
vander_f : function(array_like, int) -> ndarray
The 1d vander function, such as ``polyvander``
pow, maxpower :
See the ``<type>pow`` functions for more detail
mul_f : function(array_like, array_like) -> ndarray
The ``<type>mul`` function, such as ``polymul``
"""
# c is a trimmed copy
[c] = as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = mul_f(prd, c)
return prd
def _deprecate_as_int(x, desc):
"""
Like `operator.index`, but emits a deprecation warning when passed a float
Parameters
----------
x : int-like, or float with integral value
Value to interpret as an integer
desc : str
description to include in any error message
Raises
------
TypeError : if x is a non-integral float or non-numeric
DeprecationWarning : if x is an integral float
"""
try:
return operator.index(x)
except TypeError:
# Numpy 1.17.0, 2019-03-11
try:
ix = int(x)
except TypeError:
pass
else:
if ix == x:
warnings.warn(
"In future, this will raise TypeError, as {} will need to "
"be an integer not just an integral float."
.format(desc),
DeprecationWarning,
stacklevel=3
)
return ix
raise TypeError("{} must be an integer".format(desc))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/_updatemenu.py | <reponame>acrucetta/Chicago_COVI_WebApp
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Updatemenu(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.updatemenu"
_valid_props = {
"active",
"bgcolor",
"bordercolor",
"borderwidth",
"buttondefaults",
"buttons",
"direction",
"font",
"name",
"pad",
"showactive",
"templateitemname",
"type",
"visible",
"x",
"xanchor",
"y",
"yanchor",
}
# active
# ------
@property
def active(self):
"""
Determines which button (by index starting from 0) is
considered active.
The 'active' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
Returns
-------
int
"""
return self["active"]
@active.setter
def active(self, val):
self["active"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the update menu buttons.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the update menu.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the update menu.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# buttons
# -------
@property
def buttons(self):
"""
The 'buttons' property is a tuple of instances of
Button that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.updatemenu.Button
- A list or tuple of dicts of string/value properties that
will be passed to the Button constructor
Supported dict properties:
args
Sets the arguments values to be passed to the
Plotly method set in `method` on click.
args2
Sets a 2nd set of `args`, these arguments
values are passed to the Plotly method set in
`method` when clicking this button while in the
active state. Use this to create toggle
buttons.
execute
When true, the API method is executed. When
false, all other behaviors are the same and
command execution is skipped. This may be
useful when hooking into, for example, the
`plotly_buttonclicked` method and executing the
API command manually without losing the benefit
of the updatemenu automatically binding to the
state of the plot through the specification of
`method` and `args`.
label
Sets the text label to appear on the button.
method
Sets the Plotly method to be called on click.
If the `skip` method is used, the API
updatemenu will function as normal but will
perform no API calls and will not bind
automatically to state updates. This may be
used to create a component interface and attach
to updatemenu events manually via JavaScript.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
visible
Determines whether or not this button is
visible.
Returns
-------
tuple[plotly.graph_objs.layout.updatemenu.Button]
"""
return self["buttons"]
@buttons.setter
def buttons(self, val):
self["buttons"] = val
# buttondefaults
# --------------
@property
def buttondefaults(self):
"""
When used in a template (as
layout.template.layout.updatemenu.buttondefaults), sets the
default property values to use for elements of
layout.updatemenu.buttons
The 'buttondefaults' property is an instance of Button
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.updatemenu.Button`
- A dict of string/value properties that will be passed
to the Button constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.updatemenu.Button
"""
return self["buttondefaults"]
@buttondefaults.setter
def buttondefaults(self, val):
self["buttondefaults"] = val
# direction
# ---------
@property
def direction(self):
"""
Determines the direction in which the buttons are laid out,
whether in a dropdown menu or a row/column of buttons. For
`left` and `up`, the buttons will still appear in left-to-right
or top-to-bottom order respectively.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'up', 'down']
Returns
-------
Any
"""
return self["direction"]
@direction.setter
def direction(self, val):
self["direction"] = val
# font
# ----
@property
def font(self):
"""
Sets the font of the update menu button text.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.updatemenu.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.updatemenu.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# pad
# ---
@property
def pad(self):
"""
Sets the padding around the buttons or dropdown menu.
The 'pad' property is an instance of Pad
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.updatemenu.Pad`
- A dict of string/value properties that will be passed
to the Pad constructor
Supported dict properties:
b
The amount of padding (in px) along the bottom
of the component.
l
The amount of padding (in px) on the left side
of the component.
r
The amount of padding (in px) on the right side
of the component.
t
The amount of padding (in px) along the top of
the component.
Returns
-------
plotly.graph_objs.layout.updatemenu.Pad
"""
return self["pad"]
@pad.setter
def pad(self, val):
self["pad"] = val
# showactive
# ----------
@property
def showactive(self):
"""
Highlights active dropdown item or active button if true.
The 'showactive' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showactive"]
@showactive.setter
def showactive(self, val):
self["showactive"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# type
# ----
@property
def type(self):
"""
Determines whether the buttons are accessible via a dropdown
menu or whether the buttons are stacked horizontally or
vertically
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['dropdown', 'buttons']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not the update menu is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# x
# -
@property
def x(self):
"""
Sets the x position (in normalized coordinates) of the update
menu.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets the update menu's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the range selector.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# y
# -
@property
def y(self):
"""
Sets the y position (in normalized coordinates) of the update
menu.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets the update menu's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the range selector.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
active
Determines which button (by index starting from 0) is
considered active.
bgcolor
Sets the background color of the update menu buttons.
bordercolor
Sets the color of the border enclosing the update menu.
borderwidth
Sets the width (in px) of the border enclosing the
update menu.
buttons
A tuple of
:class:`plotly.graph_objects.layout.updatemenu.Button`
instances or dicts with compatible properties
buttondefaults
When used in a template (as
layout.template.layout.updatemenu.buttondefaults), sets
the default property values to use for elements of
layout.updatemenu.buttons
direction
Determines the direction in which the buttons are laid
out, whether in a dropdown menu or a row/column of
buttons. For `left` and `up`, the buttons will still
appear in left-to-right or top-to-bottom order
respectively.
font
Sets the font of the update menu button text.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pad
Sets the padding around the buttons or dropdown menu.
showactive
Highlights active dropdown item or active button if
true.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Determines whether the buttons are accessible via a
dropdown menu or whether the buttons are stacked
horizontally or vertically
visible
Determines whether or not the update menu is visible.
x
Sets the x position (in normalized coordinates) of the
update menu.
xanchor
Sets the update menu's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the range selector.
y
Sets the y position (in normalized coordinates) of the
update menu.
yanchor
Sets the update menu's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the range selector.
"""
def __init__(
self,
arg=None,
active=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
buttons=None,
buttondefaults=None,
direction=None,
font=None,
name=None,
pad=None,
showactive=None,
templateitemname=None,
type=None,
visible=None,
x=None,
xanchor=None,
y=None,
yanchor=None,
**kwargs
):
"""
Construct a new Updatemenu object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Updatemenu`
active
Determines which button (by index starting from 0) is
considered active.
bgcolor
Sets the background color of the update menu buttons.
bordercolor
Sets the color of the border enclosing the update menu.
borderwidth
Sets the width (in px) of the border enclosing the
update menu.
buttons
A tuple of
:class:`plotly.graph_objects.layout.updatemenu.Button`
instances or dicts with compatible properties
buttondefaults
When used in a template (as
layout.template.layout.updatemenu.buttondefaults), sets
the default property values to use for elements of
layout.updatemenu.buttons
direction
Determines the direction in which the buttons are laid
out, whether in a dropdown menu or a row/column of
buttons. For `left` and `up`, the buttons will still
appear in left-to-right or top-to-bottom order
respectively.
font
Sets the font of the update menu button text.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pad
Sets the padding around the buttons or dropdown menu.
showactive
Highlights active dropdown item or active button if
true.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
type
Determines whether the buttons are accessible via a
dropdown menu or whether the buttons are stacked
horizontally or vertically
visible
Determines whether or not the update menu is visible.
x
Sets the x position (in normalized coordinates) of the
update menu.
xanchor
Sets the update menu's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the range selector.
y
Sets the y position (in normalized coordinates) of the
update menu.
yanchor
Sets the update menu's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the range selector.
Returns
-------
Updatemenu
"""
super(Updatemenu, self).__init__("updatemenus")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Updatemenu
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Updatemenu`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("active", None)
_v = active if active is not None else _v
if _v is not None:
self["active"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("buttons", None)
_v = buttons if buttons is not None else _v
if _v is not None:
self["buttons"] = _v
_v = arg.pop("buttondefaults", None)
_v = buttondefaults if buttondefaults is not None else _v
if _v is not None:
self["buttondefaults"] = _v
_v = arg.pop("direction", None)
_v = direction if direction is not None else _v
if _v is not None:
self["direction"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("pad", None)
_v = pad if pad is not None else _v
if _v is not None:
self["pad"] = _v
_v = arg.pop("showactive", None)
_v = showactive if showactive is not None else _v
if _v is not None:
self["showactive"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_funnel.py | from plotly.graph_objs import Funnel
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/geo/projection/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/geo/projection/__init__.py<gh_stars>10-100
import sys
if sys.version_info < (3, 7):
from ._type import TypeValidator
from ._scale import ScaleValidator
from ._rotation import RotationValidator
from ._parallels import ParallelsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._type.TypeValidator",
"._scale.ScaleValidator",
"._rotation.RotationValidator",
"._parallels.ParallelsValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/__main__.py | # See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
from numpy.f2py.f2py2e import main
main()
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexes/categorical/test_fillna.py | <filename>.venv/lib/python3.8/site-packages/pandas/tests/indexes/categorical/test_fillna.py
import numpy as np
import pytest
from pandas import CategoricalIndex
import pandas._testing as tm
class TestFillNA:
def test_fillna_categorical(self):
# GH#11343
idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x")
# fill by value in categories
exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x")
tm.assert_index_equal(idx.fillna(1.0), exp)
# fill by value not in categories raises ValueError
msg = "fill value must be in categories"
with pytest.raises(ValueError, match=msg):
idx.fillna(2.0)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/pathccompiler.py | <filename>env/lib/python3.8/site-packages/numpy/distutils/pathccompiler.py
from __future__ import division, absolute_import, print_function
from distutils.unixccompiler import UnixCCompiler
class PathScaleCCompiler(UnixCCompiler):
"""
PathScale compiler compatible with an gcc built Python.
"""
compiler_type = 'pathcc'
cc_exe = 'pathcc'
cxx_exe = 'pathCC'
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
cc_compiler = self.cc_exe
cxx_compiler = self.cxx_exe
self.set_executables(compiler=cc_compiler,
compiler_so=cc_compiler,
compiler_cxx=cxx_compiler,
linker_exe=cc_compiler,
linker_so=cc_compiler + ' -shared')
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/series/methods/test_count.py | <filename>env/lib/python3.8/site-packages/pandas/tests/series/methods/test_count.py
import numpy as np
import pandas as pd
from pandas import Categorical, MultiIndex, Series
import pandas._testing as tm
class TestSeriesCount:
def test_count(self, datetime_series):
assert datetime_series.count() == len(datetime_series)
datetime_series[::2] = np.NaN
assert datetime_series.count() == np.isfinite(datetime_series).sum()
mi = MultiIndex.from_arrays([list("aabbcc"), [1, 2, 2, np.nan, 1, 2]])
ts = Series(np.arange(len(mi)), index=mi)
left = ts.count(level=1)
right = Series([2, 3, 1], index=[1, 2, np.nan])
tm.assert_series_equal(left, right)
ts.iloc[[0, 3, 5]] = np.nan
tm.assert_series_equal(ts.count(level=1), right - 1)
# GH#29478
with pd.option_context("use_inf_as_na", True):
assert pd.Series([pd.Timestamp("1990/1/1")]).count() == 1
def test_count_categorical(self):
ser = Series(
Categorical(
[np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True
)
)
result = ser.count()
assert result == 2
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/base/test_drop_duplicates.py | from datetime import datetime
import numpy as np
import pandas as pd
import pandas._testing as tm
def test_drop_duplicates_series_vs_dataframe():
# GH 14192
df = pd.DataFrame(
{
"a": [1, 1, 1, "one", "one"],
"b": [2, 2, np.nan, np.nan, np.nan],
"c": [3, 3, np.nan, np.nan, "three"],
"d": [1, 2, 3, 4, 4],
"e": [
datetime(2015, 1, 1),
datetime(2015, 1, 1),
datetime(2015, 2, 1),
pd.NaT,
pd.NaT,
],
}
)
for column in df.columns:
for keep in ["first", "last", False]:
dropped_frame = df[[column]].drop_duplicates(keep=keep)
dropped_series = df[column].drop_duplicates(keep=keep)
tm.assert_frame_equal(dropped_frame, dropped_series.to_frame())
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_waterfallmode.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/_waterfallmode.py<gh_stars>10-100
import _plotly_utils.basevalidators
class WaterfallmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="waterfallmode", parent_name="layout", **kwargs):
super(WaterfallmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["group", "overlay"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/shape/_type.py | import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="layout.shape", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["circle", "rect", "path", "line"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/plotly/__init__.py | from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("plotly")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scatter3d.py | from plotly.graph_objs import Scatter3d
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py | import warnings
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
@pytest.mark.slow
@pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
def test_multiindex_get_loc(): # GH7724, GH2646
with warnings.catch_warnings(record=True):
# test indexing into a multi-index before & past the lexsort depth
from numpy.random import randint, choice, randn
cols = ["jim", "joe", "jolie", "joline", "jolia"]
def validate(mi, df, key):
mask = np.ones(len(df)).astype("bool")
# test for all partials of this key
for i, k in enumerate(key):
mask &= df.iloc[:, i] == k
if not mask.any():
assert key[: i + 1] not in mi.index
continue
assert key[: i + 1] in mi.index
right = df[mask].copy()
if i + 1 != len(key): # partial key
return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
assert return_value is None
return_value = right.set_index(cols[i + 1 : -1], inplace=True)
assert return_value is None
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
else: # full key
return_value = right.set_index(cols[:-1], inplace=True)
assert return_value is None
if len(right) == 1: # single hit
right = Series(
right["jolia"].values, name=right.index[0], index=["jolia"]
)
tm.assert_series_equal(mi.loc[key[: i + 1]], right)
else: # multi hit
tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
def loop(mi, df, keys):
for key in keys:
validate(mi, df, key)
n, m = 1000, 50
vals = [
randint(0, 10, n),
choice(list("abcdefghij"), n),
choice(pd.date_range("20141009", periods=10).tolist(), n),
choice(list("ZYXWVUTSRQ"), n),
randn(n),
]
vals = list(map(tuple, zip(*vals)))
# bunch of keys for testing
keys = [
randint(0, 11, m),
choice(list("abcdefghijk"), m),
choice(pd.date_range("20141009", periods=11).tolist(), m),
choice(list("ZYXWVUTSRQP"), m),
]
keys = list(map(tuple, zip(*keys)))
keys += list(map(lambda t: t[:-1], vals[:: n // m]))
# covers both unique index and non-unique index
df = DataFrame(vals, columns=cols)
a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
for frame in a, b:
for i in range(5): # lexsort depth
df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
mi = df.set_index(cols[:-1])
assert not mi.index.lexsort_depth < i
loop(mi, df, keys)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/generic/test_to_xarray.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>.venv/lib/python3.8/site-packages/pandas/tests/generic/test_to_xarray.py
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestDataFrameToXArray:
@td.skip_if_no("xarray", "0.10.0")
def test_to_xarray_index_types(self, index):
if isinstance(index, pd.MultiIndex):
pytest.skip("MultiIndex is tested separately")
if len(index) == 0:
pytest.skip("Test doesn't make sense for empty index")
from xarray import Dataset
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
df.index = index[:3]
df.index.name = "foo"
df.columns.name = "bar"
result = df.to_xarray()
assert result.dims["foo"] == 3
assert len(result.coords) == 1
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, Dataset)
# idempotency
# datetimes w/tz are preserved
# column names are lost
expected = df.copy()
expected["f"] = expected["f"].astype(object)
expected.columns.name = None
tm.assert_frame_equal(
result.to_dataframe(), expected,
)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray(self):
from xarray import Dataset
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
}
)
df.index.name = "foo"
result = df[0:0].to_xarray()
assert result.dims["foo"] == 0
assert isinstance(result, Dataset)
# available in 0.7.1
# MultiIndex
df.index = pd.MultiIndex.from_product([["a"], range(3)], names=["one", "two"])
result = df.to_xarray()
assert result.dims["one"] == 1
assert result.dims["two"] == 3
assert len(result.coords) == 2
assert len(result.data_vars) == 8
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, Dataset)
result = result.to_dataframe()
expected = df.copy()
expected["f"] = expected["f"].astype(object)
expected.columns.name = None
tm.assert_frame_equal(result, expected, check_index_type=False)
class TestSeriesToXArray:
@td.skip_if_no("xarray", "0.10.0")
def test_to_xarray_index_types(self, index):
if isinstance(index, pd.MultiIndex):
pytest.skip("MultiIndex is tested separately")
from xarray import DataArray
s = Series(range(len(index)), index=index, dtype="int64")
s.index.name = "foo"
result = s.to_xarray()
repr(result)
assert len(result) == len(index)
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
# idempotency
tm.assert_series_equal(result.to_series(), s, check_index_type=False)
@td.skip_if_no("xarray", min_version="0.7.0")
def test_to_xarray(self):
from xarray import DataArray
s = Series([], dtype=object)
s.index.name = "foo"
result = s.to_xarray()
assert len(result) == 0
assert len(result.coords) == 1
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
assert isinstance(result, DataArray)
s = Series(range(6), dtype="int64")
s.index.name = "foo"
s.index = pd.MultiIndex.from_product(
[["a", "b"], range(3)], names=["one", "two"]
)
result = s.to_xarray()
assert len(result) == 2
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
assert isinstance(result, DataArray)
tm.assert_series_equal(result.to_series(), s)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/violin/marker/__init__.py | import sys
if sys.version_info < (3, 7):
from ._symbol import SymbolValidator
from ._size import SizeValidator
from ._outliercolor import OutliercolorValidator
from ._opacity import OpacityValidator
from ._line import LineValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._symbol.SymbolValidator",
"._size.SizeValidator",
"._outliercolor.OutliercolorValidator",
"._opacity.OpacityValidator",
"._line.LineValidator",
"._color.ColorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_table.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_table.py
import _plotly_utils.basevalidators
class TableValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="table", parent_name="layout.template.data", **kwargs
):
super(TableValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Table"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_pie.py | <gh_stars>1000+
from plotly.graph_objs import Pie
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/graph_objs.py | from plotly.graph_objs import *
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/_layout.py | from plotly.graph_objs import Layout
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/lib/tests/test_histograms.py | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose,
assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
import pytest
class TestHistogram(object):
def setup(self):
pass
def teardown(self):
pass
def test_simple(self):
n = 100
v = np.random.rand(n)
(a, b) = histogram(v)
# check if the sum of the bins equals the number of samples
assert_equal(np.sum(a, axis=0), n)
# check that the bin counts are evenly spaced when the data is from
# a linear function
(a, b) = histogram(np.linspace(0, 10, 100))
assert_array_equal(a, 10)
def test_one_bin(self):
# Ticket 632
hist, edges = histogram([1, 2, 3, 4], [1, 2])
assert_array_equal(hist, [2, ])
assert_array_equal(edges, [1, 2])
assert_raises(ValueError, histogram, [1, 2], bins=0)
h, e = histogram([1, 2], bins=1)
assert_equal(h, np.array([2]))
assert_allclose(e, np.array([1., 2.]))
def test_normed(self):
sup = suppress_warnings()
with sup:
rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
# Check that the integral of the density equals 1.
n = 100
v = np.random.rand(n)
a, b = histogram(v, normed=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
assert_equal(len(rec), 1)
sup = suppress_warnings()
with sup:
rec = sup.record(np.VisibleDeprecationWarning, '.*normed.*')
# Check with non-constant bin widths (buggy but backwards
# compatible)
v = np.arange(10)
bins = [0, 1, 5, 9, 10]
a, b = histogram(v, bins, normed=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
assert_equal(len(rec), 1)
def test_density(self):
# Check that the integral of the density equals 1.
n = 100
v = np.random.rand(n)
a, b = histogram(v, density=True)
area = np.sum(a * np.diff(b))
assert_almost_equal(area, 1)
# Check with non-constant bin widths
v = np.arange(10)
bins = [0, 1, 3, 6, 10]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, .1)
assert_equal(np.sum(a * np.diff(b)), 1)
# Test that passing False works too
a, b = histogram(v, bins, density=False)
assert_array_equal(a, [1, 2, 3, 4])
# Variale bin widths are especially useful to deal with
# infinities.
v = np.arange(10)
bins = [0, 1, 3, 6, np.inf]
a, b = histogram(v, bins, density=True)
assert_array_equal(a, [.1, .1, .1, 0.])
# Taken from a bug report from <NAME> on the numpy-discussion
# mailing list Aug. 6, 2010.
counts, dmy = np.histogram(
[1, 2, 3, 4], [0.5, 1.5, np.inf], density=True)
assert_equal(counts, [.25, 0])
def test_outliers(self):
# Check that outliers are not tallied
a = np.arange(10) + .5
# Lower outliers
h, b = histogram(a, range=[0, 9])
assert_equal(h.sum(), 9)
# Upper outliers
h, b = histogram(a, range=[1, 10])
assert_equal(h.sum(), 9)
# Normalization
h, b = histogram(a, range=[1, 9], density=True)
assert_almost_equal((h * np.diff(b)).sum(), 1, decimal=15)
# Weights
w = np.arange(10) + .5
h, b = histogram(a, range=[1, 9], weights=w, density=True)
assert_equal((h * np.diff(b)).sum(), 1)
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
def test_arr_weights_mismatch(self):
a = np.arange(10) + .5
w = np.arange(11) + .5
with assert_raises_regex(ValueError, "same shape as"):
h, b = histogram(a, range=[1, 9], weights=w, density=True)
def test_type(self):
# Check the type of the returned histogram
a = np.arange(10) + .5
h, b = histogram(a)
assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, density=True)
assert_(np.issubdtype(h.dtype, np.floating))
h, b = histogram(a, weights=np.ones(10, int))
assert_(np.issubdtype(h.dtype, np.integer))
h, b = histogram(a, weights=np.ones(10, float))
assert_(np.issubdtype(h.dtype, np.floating))
def test_f32_rounding(self):
# gh-4799, check that the rounding of the edges works with float32
x = np.array([276.318359, -69.593948, 21.329449], dtype=np.float32)
y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32)
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
assert_equal(counts_hist.sum(), 3.)
def test_bool_conversion(self):
# gh-12107
# Reference integer histogram
a = np.array([1, 1, 0], dtype=np.uint8)
int_hist, int_edges = np.histogram(a)
# Should raise an warning on booleans
# Ensure that the histograms are equivalent, need to suppress
# the warnings to get the actual outputs
with suppress_warnings() as sup:
rec = sup.record(RuntimeWarning, 'Converting input from .*')
hist, edges = np.histogram([True, True, False])
# A warning should be issued
assert_equal(len(rec), 1)
assert_array_equal(hist, int_hist)
assert_array_equal(edges, int_edges)
def test_weights(self):
v = np.random.rand(100)
w = np.ones(100) * 5
a, b = histogram(v)
na, nb = histogram(v, density=True)
wa, wb = histogram(v, weights=w)
nwa, nwb = histogram(v, weights=w, density=True)
assert_array_almost_equal(a * 5, wa)
assert_array_almost_equal(na, nwa)
# Check weights are properly applied.
v = np.linspace(0, 10, 10)
w = np.concatenate((np.zeros(5), np.ones(5)))
wa, wb = histogram(v, bins=np.arange(11), weights=w)
assert_array_almost_equal(wa, w)
# Check with integer weights
wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1])
assert_array_equal(wa, [4, 5, 0, 1])
wa, wb = histogram(
[1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], density=True)
assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4)
# Check weights with non-uniform bin widths
a, b = histogram(
np.arange(9), [0, 1, 3, 6, 10],
weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True)
assert_almost_equal(a, [.2, .1, .1, .075])
def test_exotic_weights(self):
# Test the use of weights that are not integer or floats, but e.g.
# complex numbers or object types.
# Complex weights
values = np.array([1.3, 2.5, 2.3])
weights = np.array([1, -1, 2]) + 1j * np.array([2, 1, 2])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, np.array([1, 1]) + 1j * np.array([2, 3]))
# Decimal weights
from decimal import Decimal
values = np.array([1.3, 2.5, 2.3])
weights = np.array([Decimal(1), Decimal(2), Decimal(3)])
# Check with custom bins
wa, wb = histogram(values, bins=[0, 2, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
# Check with even bins
wa, wb = histogram(values, bins=2, range=[1, 3], weights=weights)
assert_array_almost_equal(wa, [Decimal(1), Decimal(5)])
def test_no_side_effects(self):
# This is a regression test that ensures that values passed to
# ``histogram`` are unchanged.
values = np.array([1.3, 2.5, 2.3])
np.histogram(values, range=[-10, 10], bins=100)
assert_array_almost_equal(values, [1.3, 2.5, 2.3])
def test_empty(self):
a, b = histogram([], bins=([0, 1]))
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_error_binnum_type (self):
# Tests if right Error is raised if bins argument is float
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, 5)
assert_raises(TypeError, histogram, vals, 2.4)
def test_finite_range(self):
# Normal ranges should be fine
vals = np.linspace(0.0, 1.0, num=100)
histogram(vals, range=[0.25,0.75])
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
def test_invalid_range(self):
# start of range must be < end of range
vals = np.linspace(0.0, 1.0, num=100)
with assert_raises_regex(ValueError, "max must be larger than"):
np.histogram(vals, range=[0.1, 0.01])
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
hist, edges = np.histogram(arr, bins=8296, range=(2, 2280))
mask = hist > 0
left_edges = edges[:-1][mask]
right_edges = edges[1:][mask]
for x, left, right in zip(arr, left_edges, right_edges):
assert_(x >= left)
assert_(x < right)
def test_last_bin_inclusive_range(self):
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
assert_equal(hist[-1], 1)
def test_bin_array_dims(self):
# gracefully handle bins object > 1 dimension
vals = np.linspace(0.0, 1.0, num=100)
bins = np.array([[0, 0.5], [0.6, 1.0]])
with assert_raises_regex(ValueError, "must be 1d"):
np.histogram(vals, bins=bins)
def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222)
arr = np.array([2])
bins = np.array([1, 3, 1], dtype='uint64')
with assert_raises(ValueError):
hist, edges = np.histogram(arr, bins=bins)
def test_object_array_of_0d(self):
# gh-7864
assert_raises(ValueError,
histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
assert_raises(ValueError,
histogram, [np.array(0.4) for i in range(10)] + [np.inf])
# these should not crash
np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
np.histogram([np.array(0.5) for i in range(10)] + [.5])
def test_some_nan_values(self):
# gh-7503
one_nan = np.array([0, 1, np.nan])
all_nan = np.array([np.nan, np.nan])
# the internal comparisons with NaN give warnings
sup = suppress_warnings()
sup.filter(RuntimeWarning)
with sup:
# can't infer range with nan
assert_raises(ValueError, histogram, one_nan, bins='auto')
assert_raises(ValueError, histogram, all_nan, bins='auto')
# explicit range solves the problem
h, b = histogram(one_nan, bins='auto', range=(0, 1))
assert_equal(h.sum(), 2) # nan is not counted
h, b = histogram(all_nan, bins='auto', range=(0, 1))
assert_equal(h.sum(), 0) # nan is not counted
# as does an explicit set of bins
h, b = histogram(one_nan, bins=[0, 1])
assert_equal(h.sum(), 2) # nan is not counted
h, b = histogram(all_nan, bins=[0, 1])
assert_equal(h.sum(), 0) # nan is not counted
def test_datetime(self):
begin = np.datetime64('2000-01-01', 'D')
offsets = np.array([0, 0, 1, 1, 2, 3, 5, 10, 20])
bins = np.array([0, 2, 7, 20])
dates = begin + offsets
date_bins = begin + bins
td = np.dtype('timedelta64[D]')
# Results should be the same for integer offsets or datetime values.
# For now, only explicit bins are supported, since linspace does not
# work on datetimes or timedeltas
d_count, d_edge = histogram(dates, bins=date_bins)
t_count, t_edge = histogram(offsets.astype(td), bins=bins.astype(td))
i_count, i_edge = histogram(offsets, bins=bins)
assert_equal(d_count, i_count)
assert_equal(t_count, i_count)
assert_equal((d_edge - begin).astype(int), i_edge)
assert_equal(t_edge.astype(int), i_edge)
assert_equal(d_edge.dtype, dates.dtype)
assert_equal(t_edge.dtype, td)
def do_signed_overflow_bounds(self, dtype):
exponent = 8 * np.dtype(dtype).itemsize - 1
arr = np.array([-2**exponent + 4, 2**exponent - 4], dtype=dtype)
hist, e = histogram(arr, bins=2)
assert_equal(e, [-2**exponent + 4, 0, 2**exponent - 4])
assert_equal(hist, [1, 1])
def test_signed_overflow_bounds(self):
self.do_signed_overflow_bounds(np.byte)
self.do_signed_overflow_bounds(np.short)
self.do_signed_overflow_bounds(np.intc)
self.do_signed_overflow_bounds(np.int_)
self.do_signed_overflow_bounds(np.longlong)
def do_precision_lower_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([1.0 + eps, 2.0], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[0] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
def do_precision_upper_bound(self, float_small, float_large):
eps = np.finfo(float_large).eps
arr = np.array([1.0], float_small)
range = np.array([0.0, 1.0 - eps], float_large)
# test is looking for behavior when the bounds change between dtypes
if range.astype(float_small)[-1] != 1:
return
# previously crashed
count, x_loc = np.histogram(arr, bins=1, range=range)
assert_equal(count, [1])
# gh-10322 means that the type comes from arr - this may change
assert_equal(x_loc.dtype, float_small)
def do_precision(self, float_small, float_large):
self.do_precision_lower_bound(float_small, float_large)
self.do_precision_upper_bound(float_small, float_large)
def test_precision(self):
# not looping results in a useful stack trace upon failure
self.do_precision(np.half, np.single)
self.do_precision(np.half, np.double)
self.do_precision(np.half, np.longdouble)
self.do_precision(np.single, np.double)
self.do_precision(np.single, np.longdouble)
self.do_precision(np.double, np.longdouble)
def test_histogram_bin_edges(self):
hist, e = histogram([1, 2, 3, 4], [1, 2])
edges = histogram_bin_edges([1, 2, 3, 4], [1, 2])
assert_array_equal(edges, e)
arr = np.array([0., 0., 0., 1., 2., 3., 3., 4., 5.])
hist, e = histogram(arr, bins=30, range=(-0.5, 5))
edges = histogram_bin_edges(arr, bins=30, range=(-0.5, 5))
assert_array_equal(edges, e)
hist, e = histogram(arr, bins='auto', range=(0, 1))
edges = histogram_bin_edges(arr, bins='auto', range=(0, 1))
assert_array_equal(edges, e)
class TestHistogramOptimBinNums(object):
"""
Provide test coverage when using provided estimators for optimal number of
bins
"""
def test_empty(self):
estimator_list = ['fd', 'scott', 'rice', 'sturges',
'doane', 'sqrt', 'auto', 'stone']
# check it can deal with empty data
for estimator in estimator_list:
a, b = histogram([], bins=estimator)
assert_array_equal(a, np.array([0]))
assert_array_equal(b, np.array([0, 1]))
def test_simple(self):
"""
Straightforward testing with a mixture of linspace data (for
consistency). All test values have been precomputed and the values
shouldn't change
"""
# Some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
for testlen, expectedResults in basic_test.items():
# Create some sort of non uniform data to test with
# (2 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x = np.concatenate((x1, x2))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator)
assert_equal(len(a), numbins, err_msg="For the {0} estimator "
"with datasize of {1}".format(estimator, testlen))
def test_small(self):
"""
Smaller datasets have the potential to cause issues with the data
adaptive methods, especially the FD method. All bin numbers have been
precalculated.
"""
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
'doane': 1, 'sqrt': 1, 'stone': 1},
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
'doane': 1, 'sqrt': 2, 'stone': 1},
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
'doane': 3, 'sqrt': 2, 'stone': 1}}
for testlen, expectedResults in small_dat.items():
testdat = np.arange(testlen)
for estimator, expbins in expectedResults.items():
a, b = np.histogram(testdat, estimator)
assert_equal(len(a), expbins, err_msg="For the {0} estimator "
"with datasize of {1}".format(estimator, testlen))
def test_incorrect_methods(self):
"""
Check a Value Error is thrown when an unknown string is passed in
"""
check_list = ['mad', 'freeman', 'histograms', 'IQR']
for estimator in check_list:
assert_raises(ValueError, histogram, [1, 2, 3], estimator)
def test_novariance(self):
"""
Check that methods handle no variance in data
Primarily for Scott and FD as the SD and IQR are both 0 in this case
"""
novar_dataset = np.ones(100)
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
for estimator, numbins in novar_resultdict.items():
a, b = np.histogram(novar_dataset, estimator)
assert_equal(len(a), numbins, err_msg="{0} estimator, "
"No Variance test".format(estimator))
def test_limited_variance(self):
"""
Check when IQR is 0, but variance exists, we return the sturges value
and not the fd value.
"""
lim_var_data = np.ones(1000)
lim_var_data[:3] = 0
lim_var_data[-4:] = 100
edges_auto = histogram_bin_edges(lim_var_data, 'auto')
assert_equal(edges_auto, np.linspace(0, 100, 12))
edges_fd = histogram_bin_edges(lim_var_data, 'fd')
assert_equal(edges_fd, np.array([0, 100]))
edges_sturges = histogram_bin_edges(lim_var_data, 'sturges')
assert_equal(edges_sturges, np.linspace(0, 100, 12))
def test_outlier(self):
"""
Check the FD, Scott and Doane with outliers.
The FD estimates a smaller binwidth since it's less affected by
outliers. Since the range is so (artificially) large, this means more
bins, most of which will be empty, but the data of interest usually is
unaffected. The Scott estimator is more affected and returns fewer bins,
despite most of the variance being in one area of the data. The Doane
estimator lies somewhere between the other two.
"""
xcenter = np.linspace(-10, 10, 50)
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
for estimator, numbins in outlier_resultdict.items():
a, b = np.histogram(outlier_dataset, estimator)
assert_equal(len(a), numbins)
def test_scott_vs_stone(self):
"""Verify that Scott's rule and Stone's rule converges for normally distributed data"""
def nbins_ratio(seed, size):
rng = np.random.RandomState(seed)
x = rng.normal(loc=0, scale=2, size=size)
a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
return a / (a + b)
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
for seed in range(10)]
# the average difference between the two methods decreases as the dataset size increases.
avg = abs(np.mean(ll, axis=0) - 0.5)
assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
def test_simple_range(self):
"""
Straightforward testing with a mixture of linspace data (for
consistency). Adding in a 3rd mixture that will then be
completely ignored. All test values have been precomputed and
the shouldn't change.
"""
# some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {
50: {'fd': 8, 'scott': 8, 'rice': 15,
'sturges': 14, 'auto': 14, 'stone': 8},
500: {'fd': 15, 'scott': 16, 'rice': 32,
'sturges': 20, 'auto': 20, 'stone': 80},
5000: {'fd': 33, 'scott': 33, 'rice': 69,
'sturges': 27, 'auto': 33, 'stone': 80}
}
for testlen, expectedResults in basic_test.items():
# create some sort of non uniform data to test with
# (3 peak uniform mixture)
x1 = np.linspace(-10, -1, testlen // 5 * 2)
x2 = np.linspace(1, 10, testlen // 5 * 3)
x3 = np.linspace(-100, -50, testlen)
x = np.hstack((x1, x2, x3))
for estimator, numbins in expectedResults.items():
a, b = np.histogram(x, estimator, range = (-20, 20))
msg = "For the {0} estimator".format(estimator)
msg += " with datasize of {0}".format(testlen)
assert_equal(len(a), numbins, err_msg=msg)
@pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott',
'stone', 'rice', 'sturges'])
def test_signed_integer_data(self, bins):
# Regression test for gh-14379.
a = np.array([-2, 0, 127], dtype=np.int8)
hist, edges = np.histogram(a, bins=bins)
hist32, edges32 = np.histogram(a.astype(np.int32), bins=bins)
assert_array_equal(hist, hist32)
assert_array_equal(edges, edges32)
def test_simple_weighted(self):
"""
Check that weighted data raises a TypeError
"""
estimator_list = ['fd', 'scott', 'rice', 'sturges', 'auto']
for estimator in estimator_list:
assert_raises(TypeError, histogram, [1, 2, 3],
estimator, weights=[1, 2, 3])
class TestHistogramdd(object):
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
[.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]])
H, edges = histogramdd(x, (2, 3, 3),
range=[[-1, 1], [0, 3], [0, 3]])
answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]],
[[0, 1, 0], [0, 0, 1], [0, 0, 1]]])
assert_array_equal(H, answer)
# Check normalization
ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]]
H, edges = histogramdd(x, bins=ed, density=True)
assert_(np.all(H == answer / 12.))
# Check that H has the correct shape.
H, edges = histogramdd(x, (2, 3, 4),
range=[[-1, 1], [0, 3], [0, 4]],
density=True)
answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]],
[[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]])
assert_array_almost_equal(H, answer / 6., 4)
# Check that a sequence of arrays is accepted and H has the correct
# shape.
z = [np.squeeze(y) for y in np.split(x, 3, axis=1)]
H, edges = histogramdd(
z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]])
answer = np.array([[[0, 0], [0, 0], [0, 0]],
[[0, 1], [0, 0], [1, 0]],
[[0, 1], [0, 0], [0, 0]],
[[0, 0], [0, 0], [0, 0]]])
assert_array_equal(H, answer)
Z = np.zeros((5, 5, 5))
Z[list(range(5)), list(range(5)), list(range(5))] = 1.
H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5)
assert_array_equal(H, Z)
def test_shape_3d(self):
# All possible permutations for bins of different lengths in 3D.
bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4),
(4, 5, 6))
r = np.random.rand(10, 3)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_shape_4d(self):
# All possible permutations for bins of different lengths in 4D.
bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4),
(5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6),
(7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7),
(4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5),
(6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5),
(5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4))
r = np.random.rand(10, 4)
for b in bins:
H, edges = histogramdd(r, b)
assert_(H.shape == b)
def test_weights(self):
v = np.random.rand(100, 2)
hist, edges = histogramdd(v)
n_hist, edges = histogramdd(v, density=True)
w_hist, edges = histogramdd(v, weights=np.ones(100))
assert_array_equal(w_hist, hist)
w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, density=True)
assert_array_equal(w_hist, n_hist)
w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2)
assert_array_equal(w_hist, 2 * hist)
def test_identical_samples(self):
x = np.zeros((10, 2), int)
hist, edges = histogramdd(x, bins=2)
assert_array_equal(edges[0], np.array([-0.5, 0., 0.5]))
def test_empty(self):
a, b = histogramdd([[], []], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, np.array([[0.]]))
a, b = np.histogramdd([[], [], []], bins=2)
assert_array_max_ulp(a, np.zeros((2, 2, 2)))
def test_bins_errors(self):
# There are two ways to specify bins. Check for the right errors
# when mixing those.
x = np.arange(8).reshape(2, 4)
assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5])
assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1])
assert_raises(
ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]])
assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]]))
def test_inf_edges(self):
# Test using +/-inf bin edges works. See #1788.
with np.errstate(invalid='ignore'):
x = np.arange(6).reshape(3, 2)
expected = np.array([[1, 0], [0, 1], [0, 1]])
h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])])
assert_allclose(h, expected)
h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]])
assert_allclose(h, expected)
def test_rightmost_binedge(self):
# Test event very close to rightmost binedge. See Github issue #4266
x = [0.9999999995]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 1.)
x = [1.0000000001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
x = [1.0001]
bins = [[0., 0.5, 1.0]]
hist, _ = histogramdd(x, bins=bins)
assert_(hist[0] == 0.0)
assert_(hist[1] == 0.0)
def test_finite_range(self):
vals = np.random.random((100, 3))
histogramdd(vals, range=[[0.0, 1.0], [0.25, 0.75], [0.25, 0.5]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [0.25, 0.75], [0.25, np.inf]])
assert_raises(ValueError, histogramdd, vals,
range=[[0.0, 1.0], [np.nan, 0.75], [0.25, 0.5]])
def test_equal_edges(self):
""" Test that adjacent entries in an edge array can be equal """
x = np.array([0, 1, 2])
y = np.array([0, 1, 2])
x_edges = np.array([0, 2, 2])
y_edges = 1
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
hist_expected = np.array([
[2.],
[1.], # x == 2 falls in the final bin
])
assert_equal(hist, hist_expected)
def test_edge_dtype(self):
""" Test that if an edge array is input, its type is preserved """
x = np.array([0, 10, 20])
y = x / 10
x_edges = np.array([0, 5, 15, 20])
y_edges = x_edges / 10
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
assert_equal(edges[0].dtype, x_edges.dtype)
assert_equal(edges[1].dtype, y_edges.dtype)
def test_large_integers(self):
big = 2**60 # Too large to represent with a full precision float
x = np.array([0], np.int64)
x_edges = np.array([-1, +1], np.int64)
y = big + x
y_edges = big + x_edges
hist, edges = histogramdd((x, y), bins=(x_edges, y_edges))
assert_equal(hist[0, 0], 1)
def test_density_non_uniform_2d(self):
# Defines the following grid:
#
# 0 2 8
# 0+-+-----+
# + | +
# + | +
# 6+-+-----+
# 8+-+-----+
x_edges = np.array([0, 2, 8])
y_edges = np.array([0, 6, 8])
relative_areas = np.array([
[3, 9],
[1, 3]])
# ensure the number of points in each region is proportional to its area
x = np.array([1] + [1]*3 + [7]*3 + [7]*9)
y = np.array([7] + [1]*3 + [7]*3 + [1]*9)
# sanity check that the above worked as intended
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
assert_equal(hist, relative_areas)
# resulting histogram should be uniform, since counts and areas are proportional
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
assert_equal(hist, 1 / (8*8))
def test_density_non_uniform_1d(self):
# compare to histogram to show the results are the same
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
hist, edges = histogram(v, bins, density=True)
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
def test_density_via_normed(self):
# normed should simply alias to density argument
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
hist, edges = histogram(v, bins, density=True)
hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
def test_density_normed_redundancy(self):
v = np.arange(10)
bins = np.array([0, 1, 3, 6, 10])
with assert_raises_regex(TypeError, "Cannot specify both"):
hist_dd, edges_dd = histogramdd((v,), (bins,),
density=True,
normed=True)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/figure_factory/_violin.py | from __future__ import absolute_import
from numbers import Number
from plotly import exceptions, optional_imports
import plotly.colors as clrs
from plotly.graph_objs import graph_objs
from plotly.subplots import make_subplots
pd = optional_imports.get_module("pandas")
np = optional_imports.get_module("numpy")
scipy_stats = optional_imports.get_module("scipy.stats")
def calc_stats(data):
"""
Calculate statistics for use in violin plot.
"""
x = np.asarray(data, np.float)
vals_min = np.min(x)
vals_max = np.max(x)
q2 = np.percentile(x, 50, interpolation="linear")
q1 = np.percentile(x, 25, interpolation="lower")
q3 = np.percentile(x, 75, interpolation="higher")
iqr = q3 - q1
whisker_dist = 1.5 * iqr
# in order to prevent drawing whiskers outside the interval
# of data one defines the whisker positions as:
d1 = np.min(x[x >= (q1 - whisker_dist)])
d2 = np.max(x[x <= (q3 + whisker_dist)])
return {
"min": vals_min,
"max": vals_max,
"q1": q1,
"q2": q2,
"q3": q3,
"d1": d1,
"d2": d2,
}
def make_half_violin(x, y, fillcolor="#1f77b4", linecolor="rgb(0, 0, 0)"):
"""
Produces a sideways probability distribution fig violin plot.
"""
text = [
"(pdf(y), y)=(" + "{:0.2f}".format(x[i]) + ", " + "{:0.2f}".format(y[i]) + ")"
for i in range(len(x))
]
return graph_objs.Scatter(
x=x,
y=y,
mode="lines",
name="",
text=text,
fill="tonextx",
fillcolor=fillcolor,
line=graph_objs.scatter.Line(width=0.5, color=linecolor, shape="spline"),
hoverinfo="text",
opacity=0.5,
)
def make_violin_rugplot(vals, pdf_max, distance, color="#1f77b4"):
"""
Returns a rugplot fig for a violin plot.
"""
return graph_objs.Scatter(
y=vals,
x=[-pdf_max - distance] * len(vals),
marker=graph_objs.scatter.Marker(color=color, symbol="line-ew-open"),
mode="markers",
name="",
showlegend=False,
hoverinfo="y",
)
def make_non_outlier_interval(d1, d2):
"""
Returns the scatterplot fig of most of a violin plot.
"""
return graph_objs.Scatter(
x=[0, 0],
y=[d1, d2],
name="",
mode="lines",
line=graph_objs.scatter.Line(width=1.5, color="rgb(0,0,0)"),
)
def make_quartiles(q1, q3):
"""
Makes the upper and lower quartiles for a violin plot.
"""
return graph_objs.Scatter(
x=[0, 0],
y=[q1, q3],
text=[
"lower-quartile: " + "{:0.2f}".format(q1),
"upper-quartile: " + "{:0.2f}".format(q3),
],
mode="lines",
line=graph_objs.scatter.Line(width=4, color="rgb(0,0,0)"),
hoverinfo="text",
)
def make_median(q2):
"""
Formats the 'median' hovertext for a violin plot.
"""
return graph_objs.Scatter(
x=[0],
y=[q2],
text=["median: " + "{:0.2f}".format(q2)],
mode="markers",
marker=dict(symbol="square", color="rgb(255,255,255)"),
hoverinfo="text",
)
def make_XAxis(xaxis_title, xaxis_range):
"""
Makes the x-axis for a violin plot.
"""
xaxis = graph_objs.layout.XAxis(
title=xaxis_title,
range=xaxis_range,
showgrid=False,
zeroline=False,
showline=False,
mirror=False,
ticks="",
showticklabels=False,
)
return xaxis
def make_YAxis(yaxis_title):
"""
Makes the y-axis for a violin plot.
"""
yaxis = graph_objs.layout.YAxis(
title=yaxis_title,
showticklabels=True,
autorange=True,
ticklen=4,
showline=True,
zeroline=False,
showgrid=False,
mirror=False,
)
return yaxis
def violinplot(vals, fillcolor="#1f77b4", rugplot=True):
"""
Refer to FigureFactory.create_violin() for docstring.
"""
vals = np.asarray(vals, np.float)
# summary statistics
vals_min = calc_stats(vals)["min"]
vals_max = calc_stats(vals)["max"]
q1 = calc_stats(vals)["q1"]
q2 = calc_stats(vals)["q2"]
q3 = calc_stats(vals)["q3"]
d1 = calc_stats(vals)["d1"]
d2 = calc_stats(vals)["d2"]
# kernel density estimation of pdf
pdf = scipy_stats.gaussian_kde(vals)
# grid over the data interval
xx = np.linspace(vals_min, vals_max, 100)
# evaluate the pdf at the grid xx
yy = pdf(xx)
max_pdf = np.max(yy)
# distance from the violin plot to rugplot
distance = (2.0 * max_pdf) / 10 if rugplot else 0
# range for x values in the plot
plot_xrange = [-max_pdf - distance - 0.1, max_pdf + 0.1]
plot_data = [
make_half_violin(-yy, xx, fillcolor=fillcolor),
make_half_violin(yy, xx, fillcolor=fillcolor),
make_non_outlier_interval(d1, d2),
make_quartiles(q1, q3),
make_median(q2),
]
if rugplot:
plot_data.append(
make_violin_rugplot(vals, max_pdf, distance=distance, color=fillcolor)
)
return plot_data, plot_xrange
def violin_no_colorscale(
data,
data_header,
group_header,
colors,
use_colorscale,
group_stats,
rugplot,
sort,
height,
width,
title,
):
"""
Refer to FigureFactory.create_violin() for docstring.
Returns fig for violin plot without colorscale.
"""
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
if sort:
group_name.sort()
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(
rows=1, cols=L, shared_yaxes=True, horizontal_spacing=0.025, print_grid=False
)
color_index = 0
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
if color_index >= len(colors):
color_index = 0
plot_data, plot_xrange = violinplot(
vals, fillcolor=colors[color_index], rugplot=rugplot
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
color_index += 1
# add violin plot labels
fig["layout"].update(
{"xaxis{}".format(k + 1): make_XAxis(group_name[k], plot_xrange)}
)
# set the sharey axis style
fig["layout"].update({"yaxis{}".format(1): make_YAxis("")})
fig["layout"].update(
title=title,
showlegend=False,
hovermode="closest",
autosize=False,
height=height,
width=width,
)
return fig
def violin_colorscale(
data,
data_header,
group_header,
colors,
use_colorscale,
group_stats,
rugplot,
sort,
height,
width,
title,
):
"""
Refer to FigureFactory.create_violin() for docstring.
Returns fig for violin plot with colorscale.
"""
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
if sort:
group_name.sort()
# make sure all group names are keys in group_stats
for group in group_name:
if group not in group_stats:
raise exceptions.PlotlyError(
"All values/groups in the index "
"column must be represented "
"as a key in group_stats."
)
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(
rows=1, cols=L, shared_yaxes=True, horizontal_spacing=0.025, print_grid=False
)
# prepare low and high color for colorscale
lowcolor = clrs.color_parser(colors[0], clrs.unlabel_rgb)
highcolor = clrs.color_parser(colors[1], clrs.unlabel_rgb)
# find min and max values in group_stats
group_stats_values = []
for key in group_stats:
group_stats_values.append(group_stats[key])
max_value = max(group_stats_values)
min_value = min(group_stats_values)
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
# find intermediate color from colorscale
intermed = (group_stats[gr] - min_value) / (max_value - min_value)
intermed_color = clrs.find_intermediate_color(lowcolor, highcolor, intermed)
plot_data, plot_xrange = violinplot(
vals, fillcolor="rgb{}".format(intermed_color), rugplot=rugplot
)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
fig["layout"].update(
{"xaxis{}".format(k + 1): make_XAxis(group_name[k], plot_xrange)}
)
# add colorbar to plot
trace_dummy = graph_objs.Scatter(
x=[0],
y=[0],
mode="markers",
marker=dict(
size=2,
cmin=min_value,
cmax=max_value,
colorscale=[[0, colors[0]], [1, colors[1]]],
showscale=True,
),
showlegend=False,
)
fig.append_trace(trace_dummy, 1, L)
# set the sharey axis style
fig["layout"].update({"yaxis{}".format(1): make_YAxis("")})
fig["layout"].update(
title=title,
showlegend=False,
hovermode="closest",
autosize=False,
height=height,
width=width,
)
return fig
def violin_dict(
data,
data_header,
group_header,
colors,
use_colorscale,
group_stats,
rugplot,
sort,
height,
width,
title,
):
"""
Refer to FigureFactory.create_violin() for docstring.
Returns fig for violin plot without colorscale.
"""
# collect all group names
group_name = []
for name in data[group_header]:
if name not in group_name:
group_name.append(name)
if sort:
group_name.sort()
# check if all group names appear in colors dict
for group in group_name:
if group not in colors:
raise exceptions.PlotlyError(
"If colors is a dictionary, all "
"the group names must appear as "
"keys in colors."
)
gb = data.groupby([group_header])
L = len(group_name)
fig = make_subplots(
rows=1, cols=L, shared_yaxes=True, horizontal_spacing=0.025, print_grid=False
)
for k, gr in enumerate(group_name):
vals = np.asarray(gb.get_group(gr)[data_header], np.float)
plot_data, plot_xrange = violinplot(vals, fillcolor=colors[gr], rugplot=rugplot)
layout = graph_objs.Layout()
for item in plot_data:
fig.append_trace(item, 1, k + 1)
# add violin plot labels
fig["layout"].update(
{"xaxis{}".format(k + 1): make_XAxis(group_name[k], plot_xrange)}
)
# set the sharey axis style
fig["layout"].update({"yaxis{}".format(1): make_YAxis("")})
fig["layout"].update(
title=title,
showlegend=False,
hovermode="closest",
autosize=False,
height=height,
width=width,
)
return fig
def create_violin(
data,
data_header=None,
group_header=None,
colors=None,
use_colorscale=False,
group_stats=None,
rugplot=True,
sort=False,
height=450,
width=600,
title="Violin and Rug Plot",
):
"""
**deprecated**, use instead the plotly.graph_objects trace
:class:`plotly.graph_objects.Violin`.
:param (list|array) data: accepts either a list of numerical values,
a list of dictionaries all with identical keys and at least one
column of numeric values, or a pandas dataframe with at least one
column of numbers.
:param (str) data_header: the header of the data column to be used
from an inputted pandas dataframe. Not applicable if 'data' is
a list of numeric values.
:param (str) group_header: applicable if grouping data by a variable.
'group_header' must be set to the name of the grouping variable.
:param (str|tuple|list|dict) colors: either a plotly scale name,
an rgb or hex color, a color tuple, a list of colors or a
dictionary. An rgb color is of the form 'rgb(x, y, z)' where
x, y and z belong to the interval [0, 255] and a color tuple is a
tuple of the form (a, b, c) where a, b and c belong to [0, 1].
If colors is a list, it must contain valid color types as its
members.
:param (bool) use_colorscale: only applicable if grouping by another
variable. Will implement a colorscale based on the first 2 colors
of param colors. This means colors must be a list with at least 2
colors in it (Plotly colorscales are accepted since they map to a
list of two rgb colors). Default = False
:param (dict) group_stats: a dictioanry where each key is a unique
value from the group_header column in data. Each value must be a
number and will be used to color the violin plots if a colorscale
is being used.
:param (bool) rugplot: determines if a rugplot is draw on violin plot.
Default = True
:param (bool) sort: determines if violins are sorted
alphabetically (True) or by input order (False). Default = False
:param (float) height: the height of the violin plot.
:param (float) width: the width of the violin plot.
:param (str) title: the title of the violin plot.
Example 1: Single Violin Plot
>>> from plotly.figure_factory import create_violin
>>> import plotly.graph_objs as graph_objects
>>> import numpy as np
>>> from scipy import stats
>>> # create list of random values
>>> data_list = np.random.randn(100)
>>> # create violin fig
>>> fig = create_violin(data_list, colors='#604d9e')
>>> # plot
>>> fig.show()
Example 2: Multiple Violin Plots with Qualitative Coloring
>>> from plotly.figure_factory import create_violin
>>> import plotly.graph_objs as graph_objects
>>> import numpy as np
>>> import pandas as pd
>>> from scipy import stats
>>> # create dataframe
>>> np.random.seed(619517)
>>> Nr=250
>>> y = np.random.randn(Nr)
>>> gr = np.random.choice(list("ABCDE"), Nr)
>>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
>>> for i, letter in enumerate("ABCDE"):
... y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
>>> df = pd.DataFrame(dict(Score=y, Group=gr))
>>> # create violin fig
>>> fig = create_violin(df, data_header='Score', group_header='Group',
... sort=True, height=600, width=1000)
>>> # plot
>>> fig.show()
Example 3: Violin Plots with Colorscale
>>> from plotly.figure_factory import create_violin
>>> import plotly.graph_objs as graph_objects
>>> import numpy as np
>>> import pandas as pd
>>> from scipy import stats
>>> # create dataframe
>>> np.random.seed(619517)
>>> Nr=250
>>> y = np.random.randn(Nr)
>>> gr = np.random.choice(list("ABCDE"), Nr)
>>> norm_params=[(0, 1.2), (0.7, 1), (-0.5, 1.4), (0.3, 1), (0.8, 0.9)]
>>> for i, letter in enumerate("ABCDE"):
... y[gr == letter] *=norm_params[i][1]+ norm_params[i][0]
>>> df = pd.DataFrame(dict(Score=y, Group=gr))
>>> # define header params
>>> data_header = 'Score'
>>> group_header = 'Group'
>>> # make groupby object with pandas
>>> group_stats = {}
>>> groupby_data = df.groupby([group_header])
>>> for group in "ABCDE":
... data_from_group = groupby_data.get_group(group)[data_header]
... # take a stat of the grouped data
... stat = np.median(data_from_group)
... # add to dictionary
... group_stats[group] = stat
>>> # create violin fig
>>> fig = create_violin(df, data_header='Score', group_header='Group',
... height=600, width=1000, use_colorscale=True,
... group_stats=group_stats)
>>> # plot
>>> fig.show()
"""
# Validate colors
if isinstance(colors, dict):
valid_colors = clrs.validate_colors_dict(colors, "rgb")
else:
valid_colors = clrs.validate_colors(colors, "rgb")
# validate data and choose plot type
if group_header is None:
if isinstance(data, list):
if len(data) <= 0:
raise exceptions.PlotlyError(
"If data is a list, it must be "
"nonempty and contain either "
"numbers or dictionaries."
)
if not all(isinstance(element, Number) for element in data):
raise exceptions.PlotlyError(
"If data is a list, it must " "contain only numbers."
)
if pd and isinstance(data, pd.core.frame.DataFrame):
if data_header is None:
raise exceptions.PlotlyError(
"data_header must be the "
"column name with the "
"desired numeric data for "
"the violin plot."
)
data = data[data_header].values.tolist()
# call the plotting functions
plot_data, plot_xrange = violinplot(
data, fillcolor=valid_colors[0], rugplot=rugplot
)
layout = graph_objs.Layout(
title=title,
autosize=False,
font=graph_objs.layout.Font(size=11),
height=height,
showlegend=False,
width=width,
xaxis=make_XAxis("", plot_xrange),
yaxis=make_YAxis(""),
hovermode="closest",
)
layout["yaxis"].update(dict(showline=False, showticklabels=False, ticks=""))
fig = graph_objs.Figure(data=plot_data, layout=layout)
return fig
else:
if not isinstance(data, pd.core.frame.DataFrame):
raise exceptions.PlotlyError(
"Error. You must use a pandas "
"DataFrame if you are using a "
"group header."
)
if data_header is None:
raise exceptions.PlotlyError(
"data_header must be the column "
"name with the desired numeric "
"data for the violin plot."
)
if use_colorscale is False:
if isinstance(valid_colors, dict):
# validate colors dict choice below
fig = violin_dict(
data,
data_header,
group_header,
valid_colors,
use_colorscale,
group_stats,
rugplot,
sort,
height,
width,
title,
)
return fig
else:
fig = violin_no_colorscale(
data,
data_header,
group_header,
valid_colors,
use_colorscale,
group_stats,
rugplot,
sort,
height,
width,
title,
)
return fig
else:
if isinstance(valid_colors, dict):
raise exceptions.PlotlyError(
"The colors param cannot be "
"a dictionary if you are "
"using a colorscale."
)
if len(valid_colors) < 2:
raise exceptions.PlotlyError(
"colors must be a list with "
"at least 2 colors. A "
"Plotly scale is allowed."
)
if not isinstance(group_stats, dict):
raise exceptions.PlotlyError(
"Your group_stats param " "must be a dictionary."
)
fig = violin_colorscale(
data,
data_header,
group_header,
valid_colors,
use_colorscale,
group_stats,
rugplot,
sort,
height,
width,
title,
)
return fig
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/_steps.py | <filename>env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/_steps.py
import _plotly_utils.basevalidators
class StepsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="steps", parent_name="indicator.gauge", **kwargs):
super(StepsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Step"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the background color of the arc.
line
:class:`plotly.graph_objects.indicator.gauge.st
ep.Line` instance or dict with compatible
properties
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
range
Sets the range of this axis.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
thickness
Sets the thickness of the bar as a fraction of
the total thickness of the gauge.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/tslibs/test_timezones.py | from datetime import datetime
import dateutil.tz
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
from pandas import Timestamp
@pytest.mark.parametrize("tz_name", list(pytz.common_timezones))
def test_cache_keys_are_distinct_for_pytz_vs_dateutil(tz_name):
if tz_name == "UTC":
pytest.skip("UTC: special case in dateutil")
tz_p = timezones.maybe_get_tz(tz_name)
tz_d = timezones.maybe_get_tz("dateutil/" + tz_name)
if tz_d is None:
pytest.skip(tz_name + ": dateutil does not know about this one")
assert timezones._p_tz_cache_key(tz_p) != timezones._p_tz_cache_key(tz_d)
def test_tzlocal_repr():
# see gh-13583
ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal())
assert ts.tz == dateutil.tz.tzlocal()
assert "tz='tzlocal()')" in repr(ts)
def test_tzlocal_maybe_get_tz():
# see gh-13583
tz = timezones.maybe_get_tz("tzlocal()")
assert tz == dateutil.tz.tzlocal()
def test_tzlocal_offset():
# see gh-13583
#
# Get offset using normal datetime for test.
ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal())
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = offset.total_seconds() * 1000000000
assert ts.value + offset == Timestamp("2011-01-01").value
@pytest.fixture(
params=[
(pytz.timezone("US/Eastern"), lambda tz, x: tz.localize(x)),
(dateutil.tz.gettz("US/Eastern"), lambda tz, x: x.replace(tzinfo=tz)),
]
)
def infer_setup(request):
eastern, localize = request.param
start_naive = datetime(2001, 1, 1)
end_naive = datetime(2009, 1, 1)
start = localize(eastern, start_naive)
end = localize(eastern, end_naive)
return eastern, localize, start, end, start_naive, end_naive
def test_infer_tz_compat(infer_setup):
eastern, _, start, end, start_naive, end_naive = infer_setup
assert (
timezones.infer_tzinfo(start, end)
is conversion.localize_pydatetime(start_naive, eastern).tzinfo
)
assert (
timezones.infer_tzinfo(start, None)
is conversion.localize_pydatetime(start_naive, eastern).tzinfo
)
assert (
timezones.infer_tzinfo(None, end)
is conversion.localize_pydatetime(end_naive, eastern).tzinfo
)
def test_infer_tz_utc_localize(infer_setup):
_, _, start, end, start_naive, end_naive = infer_setup
utc = pytz.utc
start = utc.localize(start_naive)
end = utc.localize(end_naive)
assert timezones.infer_tzinfo(start, end) is utc
@pytest.mark.parametrize("ordered", [True, False])
def test_infer_tz_mismatch(infer_setup, ordered):
eastern, _, _, _, start_naive, end_naive = infer_setup
msg = "Inputs must both have the same timezone"
utc = pytz.utc
start = utc.localize(start_naive)
end = conversion.localize_pydatetime(end_naive, eastern)
args = (start, end) if ordered else (end, start)
with pytest.raises(AssertionError, match=msg):
timezones.infer_tzinfo(*args)
def test_maybe_get_tz_invalid_types():
with pytest.raises(TypeError, match="<class 'float'>"):
timezones.maybe_get_tz(44.0)
with pytest.raises(TypeError, match="<class 'module'>"):
timezones.maybe_get_tz(pytz)
msg = "<class 'pandas._libs.tslibs.timestamps.Timestamp'>"
with pytest.raises(TypeError, match=msg):
timezones.maybe_get_tz(Timestamp.now("UTC"))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/geo/_fitbounds.py | import _plotly_utils.basevalidators
class FitboundsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="fitbounds", parent_name="layout.geo", **kwargs):
super(FitboundsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [False, "locations", "geojson"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_hoverdistance.py | import _plotly_utils.basevalidators
class HoverdistanceValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="hoverdistance", parent_name="layout", **kwargs):
super(HoverdistanceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_sliderdefaults.py | import _plotly_utils.basevalidators
class SliderdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="sliderdefaults", parent_name="layout", **kwargs):
super(SliderdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Slider"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/_layout.py | import _plotly_utils.basevalidators
class LayoutValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="layout", parent_name="layout.template", **kwargs):
super(LayoutValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Layout"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/distutils/tests/test_system_info.py | <gh_stars>1000+
import os
import shutil
import pytest
from tempfile import mkstemp, mkdtemp
from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
from numpy.testing import assert_, assert_equal, assert_raises
from numpy.distutils import ccompiler, customized_ccompiler
from numpy.distutils.system_info import system_info, ConfigParser, mkl_info
from numpy.distutils.system_info import AliasedOptionError
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
from numpy.distutils import _shell_utils
def get_class(name, notfound_action=1):
"""
notfound_action:
0 - do nothing
1 - display warning message
2 - raise error
"""
cl = {'temp1': Temp1Info,
'temp2': Temp2Info,
'duplicate_options': DuplicateOptionInfo,
}.get(name.lower(), _system_info)
return cl()
simple_site = """
[ALL]
library_dirs = {dir1:s}{pathsep:s}{dir2:s}
libraries = {lib1:s},{lib2:s}
extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os
runtime_library_dirs = {dir1:s}
[temp1]
library_dirs = {dir1:s}
libraries = {lib1:s}
runtime_library_dirs = {dir1:s}
[temp2]
library_dirs = {dir2:s}
libraries = {lib2:s}
extra_link_args = -Wl,-rpath={lib2_escaped:s}
rpath = {dir2:s}
[duplicate_options]
mylib_libs = {lib1:s}
libraries = {lib2:s}
"""
site_cfg = simple_site
fakelib_c_text = """
/* This file is generated from numpy/distutils/testing/test_system_info.py */
#include<stdio.h>
void foo(void) {
printf("Hello foo");
}
void bar(void) {
printf("Hello bar");
}
"""
def have_compiler():
""" Return True if there appears to be an executable compiler
"""
compiler = customized_ccompiler()
try:
cmd = compiler.compiler # Unix compilers
except AttributeError:
try:
if not compiler.initialized:
compiler.initialize() # MSVC is different
except (DistutilsError, ValueError):
return False
cmd = [compiler.cc]
try:
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
p.stdout.close()
p.stderr.close()
p.wait()
except OSError:
return False
return True
HAVE_COMPILER = have_compiler()
class _system_info(system_info):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
defaults = {'library_dirs': '',
'include_dirs': '',
'runtime_library_dirs': '',
'rpath': '',
'src_dirs': '',
'search_static_first': "0",
'extra_compile_args': '',
'extra_link_args': ''}
self.cp = ConfigParser(defaults)
# We have to parse the config files afterwards
# to have a consistent temporary filepath
def _check_libs(self, lib_dirs, libs, opt_libs, exts):
"""Override _check_libs to return with all dirs """
info = {'libraries': libs, 'library_dirs': lib_dirs}
return info
class Temp1Info(_system_info):
"""For testing purposes"""
section = 'temp1'
class Temp2Info(_system_info):
"""For testing purposes"""
section = 'temp2'
class DuplicateOptionInfo(_system_info):
"""For testing purposes"""
section = 'duplicate_options'
class TestSystemInfoReading:
def setup(self):
""" Create the libraries """
# Create 2 sources and 2 libraries
self._dir1 = mkdtemp()
self._src1 = os.path.join(self._dir1, 'foo.c')
self._lib1 = os.path.join(self._dir1, 'libfoo.so')
self._dir2 = mkdtemp()
self._src2 = os.path.join(self._dir2, 'bar.c')
self._lib2 = os.path.join(self._dir2, 'libbar.so')
# Update local site.cfg
global simple_site, site_cfg
site_cfg = simple_site.format(**{
'dir1': self._dir1,
'lib1': self._lib1,
'dir2': self._dir2,
'lib2': self._lib2,
'pathsep': os.pathsep,
'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])
})
# Write site.cfg
fd, self._sitecfg = mkstemp()
os.close(fd)
with open(self._sitecfg, 'w') as fd:
fd.write(site_cfg)
# Write the sources
with open(self._src1, 'w') as fd:
fd.write(fakelib_c_text)
with open(self._src2, 'w') as fd:
fd.write(fakelib_c_text)
# We create all class-instances
def site_and_parse(c, site_cfg):
c.files = [site_cfg]
c.parse_config_files()
return c
self.c_default = site_and_parse(get_class('default'), self._sitecfg)
self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
self.c_dup_options = site_and_parse(get_class('duplicate_options'),
self._sitecfg)
def teardown(self):
# Do each removal separately
try:
shutil.rmtree(self._dir1)
except Exception:
pass
try:
shutil.rmtree(self._dir2)
except Exception:
pass
try:
os.remove(self._sitecfg)
except Exception:
pass
def test_all(self):
# Read in all information in the ALL block
tsi = self.c_default
assert_equal(tsi.get_lib_dirs(), [self._dir1, self._dir2])
assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])
def test_temp1(self):
# Read in all information in the temp1 block
tsi = self.c_temp1
assert_equal(tsi.get_lib_dirs(), [self._dir1])
assert_equal(tsi.get_libraries(), [self._lib1])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
def test_temp2(self):
# Read in all information in the temp2 block
tsi = self.c_temp2
assert_equal(tsi.get_lib_dirs(), [self._dir2])
assert_equal(tsi.get_libraries(), [self._lib2])
# Now from rpath and not runtime_library_dirs
assert_equal(tsi.get_runtime_lib_dirs(key='rpath'), [self._dir2])
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
def test_duplicate_options(self):
# Ensure that duplicates are raising an AliasedOptionError
tsi = self.c_dup_options
assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries")
assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1])
assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2])
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
def test_compile1(self):
# Compile source and link the first source
c = customized_ccompiler()
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir1)
c.compile([os.path.basename(self._src1)], output_dir=self._dir1)
# Ensure that the object exists
assert_(os.path.isfile(self._src1.replace('.c', '.o')) or
os.path.isfile(self._src1.replace('.c', '.obj')))
finally:
os.chdir(previousDir)
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
@pytest.mark.skipif('msvc' in repr(ccompiler.new_compiler()),
reason="Fails with MSVC compiler ")
def test_compile2(self):
# Compile source and link the second source
tsi = self.c_temp2
c = customized_ccompiler()
extra_link_args = tsi.calc_extra_info()['extra_link_args']
previousDir = os.getcwd()
try:
# Change directory to not screw up directories
os.chdir(self._dir2)
c.compile([os.path.basename(self._src2)], output_dir=self._dir2,
extra_postargs=extra_link_args)
# Ensure that the object exists
assert_(os.path.isfile(self._src2.replace('.c', '.o')))
finally:
os.chdir(previousDir)
def test_overrides(self):
previousDir = os.getcwd()
cfg = os.path.join(self._dir1, 'site.cfg')
shutil.copy(self._sitecfg, cfg)
try:
os.chdir(self._dir1)
# Check that the '[ALL]' section does not override
# missing values from other sections
info = mkl_info()
lib_dirs = info.cp['ALL']['library_dirs'].split(os.pathsep)
assert info.get_lib_dirs() != lib_dirs
# But if we copy the values to a '[mkl]' section the value
# is correct
with open(cfg, 'r') as fid:
mkl = fid.read().replace('ALL', 'mkl')
with open(cfg, 'w') as fid:
fid.write(mkl)
info = mkl_info()
assert info.get_lib_dirs() == lib_dirs
# Also, the values will be taken from a section named '[DEFAULT]'
with open(cfg, 'r') as fid:
dflt = fid.read().replace('mkl', 'DEFAULT')
with open(cfg, 'w') as fid:
fid.write(dflt)
info = mkl_info()
assert info.get_lib_dirs() == lib_dirs
finally:
os.chdir(previousDir)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_parcoords.py | <gh_stars>1000+
from plotly.graph_objs import Parcoords
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/use_rules.py | #!/usr/bin/env python
"""
Build 'use others module data' mechanism for f2py2e.
Unfinished.
Copyright 2000 <NAME> all rights reserved,
<NAME> <<EMAIL>>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2000/09/10 12:35:43 $
<NAME>
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.3 $"[10:-1]
f2py_version = 'See `f2py -v`'
from .auxfuncs import (
applyrules, dictappend, gentitle, hasnote, outmess
)
usemodule_rules = {
'body': """
#begintitle#
static char doc_#apiname#[] = \"\\\nVariable wrapper signature:\\n\\
\t #name# = get_#name#()\\n\\
Arguments:\\n\\
#docstr#\";
extern F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#);
static PyObject *#apiname#(PyObject *capi_self, PyObject *capi_args) {
/*#decl#*/
\tif (!PyArg_ParseTuple(capi_args, \"\")) goto capi_fail;
printf(\"c: %d\\n\",F_MODFUNC(#usemodulename#,#USEMODULENAME#,#realname#,#REALNAME#));
\treturn Py_BuildValue(\"\");
capi_fail:
\treturn NULL;
}
""",
'method': '\t{\"get_#name#\",#apiname#,METH_VARARGS|METH_KEYWORDS,doc_#apiname#},',
'need': ['F_MODFUNC']
}
################
def buildusevars(m, r):
ret = {}
outmess(
'\t\tBuilding use variable hooks for module "%s" (feature only for F90/F95)...\n' % (m['name']))
varsmap = {}
revmap = {}
if 'map' in r:
for k in r['map'].keys():
if r['map'][k] in revmap:
outmess('\t\t\tVariable "%s<=%s" is already mapped by "%s". Skipping.\n' % (
r['map'][k], k, revmap[r['map'][k]]))
else:
revmap[r['map'][k]] = k
if 'only' in r and r['only']:
for v in r['map'].keys():
if r['map'][v] in m['vars']:
if revmap[r['map'][v]] == v:
varsmap[v] = r['map'][v]
else:
outmess('\t\t\tIgnoring map "%s=>%s". See above.\n' %
(v, r['map'][v]))
else:
outmess(
'\t\t\tNo definition for variable "%s=>%s". Skipping.\n' % (v, r['map'][v]))
else:
for v in m['vars'].keys():
if v in revmap:
varsmap[v] = revmap[v]
else:
varsmap[v] = v
for v in varsmap.keys():
ret = dictappend(ret, buildusevar(v, varsmap[v], m['vars'], m['name']))
return ret
def buildusevar(name, realname, vars, usemodulename):
outmess('\t\t\tConstructing wrapper function for variable "%s=>%s"...\n' % (
name, realname))
ret = {}
vrd = {'name': name,
'realname': realname,
'REALNAME': realname.upper(),
'usemodulename': usemodulename,
'USEMODULENAME': usemodulename.upper(),
'texname': name.replace('_', '\\_'),
'begintitle': gentitle('%s=>%s' % (name, realname)),
'endtitle': gentitle('end of %s=>%s' % (name, realname)),
'apiname': '#modulename#_use_%s_from_%s' % (realname, usemodulename)
}
nummap = {0: 'Ro', 1: 'Ri', 2: 'Rii', 3: 'Riii', 4: 'Riv',
5: 'Rv', 6: 'Rvi', 7: 'Rvii', 8: 'Rviii', 9: 'Rix'}
vrd['texnamename'] = name
for i in nummap.keys():
vrd['texnamename'] = vrd['texnamename'].replace(repr(i), nummap[i])
if hasnote(vars[realname]):
vrd['note'] = vars[realname]['note']
rd = dictappend({}, vrd)
print(name, realname, vars[realname])
ret = applyrules(usemodule_rules, rd)
return ret
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/util/test_assert_produces_warning.py | import warnings
import pytest
import pandas._testing as tm
def f():
warnings.warn("f1", FutureWarning)
warnings.warn("f2", RuntimeWarning)
@pytest.mark.filterwarnings("ignore:f1:FutureWarning")
def test_assert_produces_warning_honors_filter():
# Raise by default.
msg = r"Caused unexpected warning\(s\)"
with pytest.raises(AssertionError, match=msg):
with tm.assert_produces_warning(RuntimeWarning):
f()
with tm.assert_produces_warning(RuntimeWarning, raise_on_extra_warnings=False):
f()
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/groupby/test_function.py | import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
g = df.groupby("A")
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
df.loc[[2, 6], "B"] = min_val
expected.loc[[2, 3, 6, 7], "B"] = min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummin()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[1, 2, 2]))
result = df.groupby("a").b.cummin()
expected = pd.Series([1, 2, 1], name="b")
tm.assert_series_equal(result, expected)
def test_cummin_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummin()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(expected, result)
def test_cummax(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
max_val = numpy_dtypes_for_minmax[2]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_maxs = [3, 4, 4, 4, 2, 3, 3, 3]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_maxs}).astype(dtype)
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ max value for dtype
df.loc[[2, 6], "B"] = max_val
expected.loc[[2, 3, 6, 7], "B"] = max_val
result = df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = pd.DataFrame({"B": [np.nan, 4, np.nan, 4, np.nan, 3, np.nan, 3]})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(result, expected)
expected = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(result, expected)
# GH 15561
df = pd.DataFrame(dict(a=[1], b=pd.to_datetime(["2001"])))
expected = pd.Series(pd.to_datetime("2001"), index=[0], name="b")
result = df.groupby("a")["b"].cummax()
tm.assert_series_equal(expected, result)
# GH 15635
df = pd.DataFrame(dict(a=[1, 2, 1], b=[2, 1, 1]))
result = df.groupby("a").b.cummax()
expected = pd.Series([2, 1, 2], name="b")
tm.assert_series_equal(result, expected)
def test_cummax_all_nan_column():
base_df = pd.DataFrame({"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [np.nan] * 8})
expected = pd.DataFrame({"B": [np.nan] * 8})
result = base_df.groupby("A").cummax()
tm.assert_frame_equal(expected, result)
result = base_df.groupby("A").B.apply(lambda x: x.cummax()).to_frame()
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly increasing (T), strictly decreasing (F),
# abs val increasing (F), non-strictly increasing (T)
([1, 2, 5, 3, 2, 0, 4, 5, -6, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[1, 2.1, np.inf, 3, 2, np.inf, -np.inf, 5, 11, 1, -np.inf],
[True, False, True, False],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_increasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_increasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# Also check result equal to manually taking x.is_monotonic_increasing.
expected = df.groupby(["B"]).C.apply(lambda x: x.is_monotonic_increasing)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"in_vals, out_vals",
[
# Basics: strictly decreasing (T), strictly increasing (F),
# abs val decreasing (F), non-strictly increasing (T)
([10, 9, 7, 3, 4, 5, -3, 2, 0, 1, 1], [True, False, False, True]),
# Test with inf vals
(
[np.inf, 1, -np.inf, np.inf, 2, -3, -np.inf, 5, -3, -np.inf, -np.inf],
[True, True, False, True],
),
# Test with nan vals; should always be False
(
[1, 2, np.nan, 3, 2, np.nan, np.nan, 5, -np.inf, 1, np.nan],
[False, False, False, False],
),
],
)
def test_is_monotonic_decreasing(in_vals, out_vals):
# GH 17015
source_dict = {
"A": ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11"],
"B": ["a", "a", "a", "b", "b", "b", "c", "c", "c", "d", "d"],
"C": in_vals,
}
df = pd.DataFrame(source_dict)
result = df.groupby("B").C.is_monotonic_decreasing
index = Index(list("abcd"), name="B")
expected = pd.Series(index=index, data=out_vals, name="C")
tm.assert_series_equal(result, expected)
# describe
# --------------------------------
def test_apply_describe_bug(mframe):
grouped = mframe.groupby(level="first")
grouped.describe() # it works!
def test_series_describe_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
tm.assert_series_equal(result["mean"], grouped.mean(), check_names=False)
tm.assert_series_equal(result["std"], grouped.std(), check_names=False)
tm.assert_series_equal(result["min"], grouped.min(), check_names=False)
def test_series_describe_single():
ts = tm.makeTimeSeries()
grouped = ts.groupby(lambda x: x.month)
result = grouped.apply(lambda x: x.describe())
expected = grouped.describe().stack()
tm.assert_series_equal(result, expected)
def test_series_index_name(df):
grouped = df.loc[:, ["C"]].groupby(df["A"])
result = grouped.agg(lambda x: x.mean())
assert result.index.name == "A"
def test_frame_describe_multikey(tsframe):
grouped = tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.describe()
desc_groups = []
for col in tsframe:
group = grouped[col].describe()
# GH 17464 - Remove duplicate MultiIndex levels
group_col = pd.MultiIndex(
levels=[[col], group.columns],
codes=[[0] * len(group.columns), range(len(group.columns))],
)
group = pd.DataFrame(group.values, columns=group_col, index=group.index)
desc_groups.append(group)
expected = pd.concat(desc_groups, axis=1)
tm.assert_frame_equal(result, expected)
groupedT = tsframe.groupby({"A": 0, "B": 0, "C": 1, "D": 1}, axis=1)
result = groupedT.describe()
expected = tsframe.describe().T
expected.index = pd.MultiIndex(
levels=[[0, 1], expected.index],
codes=[[0, 0, 1, 1], range(len(expected.index))],
)
tm.assert_frame_equal(result, expected)
def test_frame_describe_tupleindex():
# GH 14848 - regression from 0.19.0 to 0.19.1
df1 = DataFrame(
{
"x": [1, 2, 3, 4, 5] * 3,
"y": [10, 20, 30, 40, 50] * 3,
"z": [100, 200, 300, 400, 500] * 3,
}
)
df1["k"] = [(0, 0, 1), (0, 1, 0), (1, 0, 0)] * 5
df2 = df1.rename(columns={"k": "key"})
msg = "Names should be list-like for a MultiIndex"
with pytest.raises(ValueError, match=msg):
df1.groupby("k").describe()
with pytest.raises(ValueError, match=msg):
df2.groupby("key").describe()
def test_frame_describe_unstacked_format():
# GH 4792
prices = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 24990,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 25499,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 25499,
}
volumes = {
pd.Timestamp("2011-01-06 10:59:05", tz=None): 1500000000,
pd.Timestamp("2011-01-06 12:43:33", tz=None): 5000000000,
pd.Timestamp("2011-01-06 12:54:09", tz=None): 100000000,
}
df = pd.DataFrame({"PRICE": prices, "VOLUME": volumes})
result = df.groupby("PRICE").VOLUME.describe()
data = [
df[df.PRICE == 24990].VOLUME.describe().values.tolist(),
df[df.PRICE == 25499].VOLUME.describe().values.tolist(),
]
expected = pd.DataFrame(
data,
index=pd.Index([24990, 25499], name="PRICE"),
columns=["count", "mean", "std", "min", "25%", "50%", "75%", "max"],
)
tm.assert_frame_equal(result, expected)
def test_groupby_mean_no_overflow():
# Regression test for (#22487)
df = pd.DataFrame(
{
"user": ["A", "A", "A", "A", "A"],
"connections": [4970, 4749, 4719, 4704, 18446744073699999744],
}
)
assert df.groupby("user")["connections"].mean()["A"] == 3689348814740003840
@pytest.mark.parametrize(
"values",
[
{
"a": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"b": [1, pd.NA, 2, 1, pd.NA, 2, 1, pd.NA, 2],
},
{"a": [1, 1, 2, 2, 3, 3], "b": [1, 2, 1, 2, 1, 2]},
],
)
@pytest.mark.parametrize("function", ["mean", "median", "var"])
def test_apply_to_nullable_integer_returns_float(values, function):
# https://github.com/pandas-dev/pandas/issues/32219
output = 0.5 if function == "var" else 1.5
arr = np.array([output] * 3, dtype=float)
idx = pd.Index([1, 2, 3], dtype=object, name="a")
expected = pd.DataFrame({"b": arr}, index=idx)
groups = pd.DataFrame(values, dtype="Int64").groupby("a")
result = getattr(groups, function)()
tm.assert_frame_equal(result, expected)
result = groups.agg(function)
tm.assert_frame_equal(result, expected)
result = groups.agg([function])
expected.columns = MultiIndex.from_tuples([("b", function)])
tm.assert_frame_equal(result, expected)
def test_groupby_sum_below_mincount_nullable_integer():
# https://github.com/pandas-dev/pandas/issues/32861
df = pd.DataFrame({"a": [0, 1, 2], "b": [0, 1, 2], "c": [0, 1, 2]}, dtype="Int64")
grouped = df.groupby("a")
idx = pd.Index([0, 1, 2], dtype=object, name="a")
result = grouped["b"].sum(min_count=2)
expected = pd.Series([pd.NA] * 3, dtype="Int64", index=idx, name="b")
tm.assert_series_equal(result, expected)
result = grouped.sum(min_count=2)
expected = pd.DataFrame(
{"b": [pd.NA] * 3, "c": [pd.NA] * 3}, dtype="Int64", index=idx
)
tm.assert_frame_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/isosurface/_spaceframe.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class SpaceframeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="spaceframe", parent_name="isosurface", **kwargs):
super(SpaceframeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Spaceframe"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `spaceframe`
elements. The default fill value is 0.15
meaning that only 15% of the area of every
faces of tetras would be shaded. Applying a
greater `fill` ratio would allow the creation
of stronger elements or could be sued to have
entirely closed areas (in case of using 1).
show
Displays/hides tetrahedron shapes between
minimum and maximum iso-values. Often useful
when either caps or surfaces are disabled or
filled with values less than 1.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_heatmapgl.py | <reponame>acrucetta/Chicago_COVI_WebApp
from plotly.graph_objs import Heatmapgl
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/express/data.py | <reponame>acrucetta/Chicago_COVI_WebApp
from __future__ import absolute_import
from plotly.data import *
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/indicator/_title.py | import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="indicator", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the title. It
defaults to `center` except for bullet charts
for which it defaults to right.
font
Set the font used to display the title
text
Sets the title of this indicator.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_astype.py | <gh_stars>100-1000
from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
Float64Index,
Index,
Int64Index,
NaT,
Timedelta,
TimedeltaIndex,
timedelta_range,
)
import pandas._testing as tm
class TestTimedeltaIndex:
def test_astype_object(self):
idx = timedelta_range(start="1 days", periods=4, freq="D", name="idx")
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
Timedelta("3 days"),
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype_object_with_nat(self):
idx = TimedeltaIndex(
[timedelta(days=1), timedelta(days=2), NaT, timedelta(days=4)], name="idx"
)
expected_list = [
Timedelta("1 days"),
Timedelta("2 days"),
NaT,
Timedelta("4 days"),
]
result = idx.astype(object)
expected = Index(expected_list, dtype=object, name="idx")
tm.assert_index_equal(result, expected)
assert idx.tolist() == expected_list
def test_astype(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN], name="idx")
result = idx.astype(object)
expected = Index(
[Timedelta("1 days 03:46:40")] + [NaT] * 3, dtype=object, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(int)
expected = Int64Index(
[100000000000000] + [-9223372036854775808] * 3, dtype=np.int64, name="idx"
)
tm.assert_index_equal(result, expected)
result = idx.astype(str)
expected = Index([str(x) for x in idx], name="idx")
tm.assert_index_equal(result, expected)
rng = timedelta_range("1 days", periods=10)
result = rng.astype("i8")
tm.assert_index_equal(result, Index(rng.asi8))
tm.assert_numpy_array_equal(rng.asi8, result.values)
def test_astype_uint(self):
arr = timedelta_range("1H", periods=2)
expected = pd.UInt64Index(
np.array([3600000000000, 90000000000000], dtype="uint64")
)
tm.assert_index_equal(arr.astype("uint64"), expected)
tm.assert_index_equal(arr.astype("uint32"), expected)
def test_astype_timedelta64(self):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN])
result = idx.astype("timedelta64")
expected = Float64Index([1e14] + [np.NaN] * 3, dtype="float64")
tm.assert_index_equal(result, expected)
result = idx.astype("timedelta64[ns]")
tm.assert_index_equal(result, idx)
assert result is not idx
result = idx.astype("timedelta64[ns]", copy=False)
tm.assert_index_equal(result, idx)
assert result is idx
@pytest.mark.parametrize("dtype", [float, "datetime64", "datetime64[ns]"])
def test_astype_raises(self, dtype):
# GH 13149, GH 13209
idx = TimedeltaIndex([1e14, "NaT", NaT, np.NaN])
msg = "Cannot cast TimedeltaArray to dtype"
with pytest.raises(TypeError, match=msg):
idx.astype(dtype)
def test_astype_category(self):
obj = pd.timedelta_range("1H", periods=2, freq="H")
result = obj.astype("category")
expected = pd.CategoricalIndex([pd.Timedelta("1H"), pd.Timedelta("2H")])
tm.assert_index_equal(result, expected)
result = obj._data.astype("category")
expected = expected.values
tm.assert_categorical_equal(result, expected)
def test_astype_array_fallback(self):
obj = pd.timedelta_range("1H", periods=2)
result = obj.astype(bool)
expected = pd.Index(np.array([True, True]))
tm.assert_index_equal(result, expected)
result = obj._data.astype(bool)
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/presentation_objs.py | <reponame>acrucetta/Chicago_COVI_WebApp
from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("presentation_objs")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/geo/projection/rotation/__init__.py | import sys
if sys.version_info < (3, 7):
from ._roll import RollValidator
from ._lon import LonValidator
from ._lat import LatValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._roll.RollValidator", "._lon.LonValidator", "._lat.LatValidator"],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/indexing/test_setitem.py | <reponame>acrucetta/Chicago_COVI_WebApp
import numpy as np
from pandas import NaT, Series, date_range
class TestSetitemDT64Values:
def test_setitem_none_nan(self):
series = Series(date_range("1/1/2000", periods=10))
series[3] = None
assert series[3] is NaT
series[3:5] = None
assert series[4] is NaT
series[5] = np.nan
assert series[5] is NaT
series[5:7] = np.nan
assert series[6] is NaT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.