repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/series/indexing/test_callable.py | import pandas as pd
import pandas._testing as tm
def test_getitem_callable():
# GH 12533
s = pd.Series(4, index=list("ABCD"))
result = s[lambda x: "A"]
assert result == s.loc["A"]
result = s[lambda x: ["A", "B"]]
tm.assert_series_equal(result, s.loc[["A", "B"]])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_callable():
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list("ABCD"))
s[lambda x: "A"] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list("ABCD")))
def test_setitem_other_callable():
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/colors.py | from __future__ import absolute_import
from _plotly_utils.colors import * # noqa: F401
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/groupby/test_pipe.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>100-1000
import numpy as np
import pandas as pd
from pandas import DataFrame, Index
import pandas._testing as tm
def test_pipe():
# Test the pipe method of DataFrameGroupBy.
# Issue #17871
random_state = np.random.RandomState(1234567890)
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": random_state.randn(8),
"C": random_state.randn(8),
}
)
def f(dfgb):
return dfgb.B.max() - dfgb.C.min().min()
def square(srs):
return srs ** 2
# Note that the transformations are
# GroupBy -> Series
# Series -> Series
# This then chains the GroupBy.pipe and the
# NDFrame.pipe methods
result = df.groupby("A").pipe(f).pipe(square)
index = Index(["bar", "foo"], dtype="object", name="A")
expected = pd.Series([8.99110003361, 8.17516964785], name="B", index=index)
tm.assert_series_equal(expected, result)
def test_pipe_args():
# Test passing args to the pipe method of DataFrameGroupBy.
# Issue #17871
df = pd.DataFrame(
{
"group": ["A", "A", "B", "B", "C"],
"x": [1.0, 2.0, 3.0, 2.0, 5.0],
"y": [10.0, 100.0, 1000.0, -100.0, -1000.0],
}
)
def f(dfgb, arg1):
return dfgb.filter(lambda grp: grp.y.mean() > arg1, dropna=False).groupby(
dfgb.grouper
)
def g(dfgb, arg2):
return dfgb.sum() / dfgb.sum().sum() + arg2
def h(df, arg3):
return df.x + df.y - arg3
result = df.groupby("group").pipe(f, 0).pipe(g, 10).pipe(h, 100)
# Assert the results here
index = pd.Index(["A", "B", "C"], name="group")
expected = pd.Series([-79.5160891089, -78.4839108911, -80], index=index)
tm.assert_series_equal(expected, result)
# test SeriesGroupby.pipe
ser = pd.Series([1, 1, 2, 2, 3, 3])
result = ser.groupby(ser).pipe(lambda grp: grp.sum() * grp.count())
expected = pd.Series([4, 8, 12], index=pd.Int64Index([1, 2, 3]))
tm.assert_series_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_waterfall.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_waterfall.py<gh_stars>1000+
from plotly.graph_objs import Waterfall
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_sunburst.py | from plotly.graph_objs import Sunburst
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/waterfall/hoverlabel/__init__.py | import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/sankey/link/_colorscaledefaults.py | import _plotly_utils.basevalidators
class ColorscaledefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="colorscaledefaults", parent_name="sankey.link", **kwargs
):
super(ColorscaledefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Colorscale"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/camera/_up.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class UpValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="up", parent_name="layout.scene.camera", **kwargs):
super(UpValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Up"),
data_docs=kwargs.pop(
"data_docs",
"""
x
y
z
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/session.py | from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("session")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/volume/caps/_x.py | <filename>env/lib/python3.8/site-packages/plotly/validators/volume/caps/_x.py
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="x", parent_name="volume.caps", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "X"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the x `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/modebar/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>10-100
import sys
if sys.version_info < (3, 7):
from ._uirevision import UirevisionValidator
from ._orientation import OrientationValidator
from ._color import ColorValidator
from ._bgcolor import BgcolorValidator
from ._activecolor import ActivecolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._uirevision.UirevisionValidator",
"._orientation.OrientationValidator",
"._color.ColorValidator",
"._bgcolor.BgcolorValidator",
"._activecolor.ActivecolorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_histogram2dcontour.py | from plotly.graph_objs import Histogram2dContour
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/tests/test_npy_pkg_config.py | <gh_stars>1000+
from __future__ import division, absolute_import, print_function
import os
from numpy.distutils.npy_pkg_config import read_config, parse_flags
from numpy.testing import temppath, assert_
simple = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[default]
cflags = -I/usr/include
libs = -L/usr/lib
"""
simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
'version': '0.1', 'name': 'foo'}
simple_variable = """\
[meta]
Name = foo
Description = foo lib
Version = 0.1
[variables]
prefix = /foo/bar
libdir = ${prefix}/lib
includedir = ${prefix}/include
[default]
cflags = -I${includedir}
libs = -L${libdir}
"""
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
class TestLibraryInfo(object):
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
f.write(simple)
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
assert_(out.cflags() == simple_d['cflags'])
assert_(out.libs() == simple_d['libflags'])
assert_(out.name == simple_d['name'])
assert_(out.version == simple_d['version'])
def test_simple_variable(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
f.write(simple_variable)
pkg = os.path.splitext(path)[0]
out = read_config(pkg)
assert_(out.cflags() == simple_variable_d['cflags'])
assert_(out.libs() == simple_variable_d['libflags'])
assert_(out.name == simple_variable_d['name'])
assert_(out.version == simple_variable_d['version'])
out.vars['prefix'] = '/Users/david'
assert_(out.cflags() == '-I/Users/david/include')
class TestParseFlags(object):
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
assert_(d['include_dirs'] == ['/usr/include'])
d = parse_flags("-I/usr/include -DFOO")
assert_(d['include_dirs'] == ['/usr/include'])
assert_(d['macros'] == ['FOO'])
d = parse_flags("-I /usr/include -DFOO")
assert_(d['include_dirs'] == ['/usr/include'])
assert_(d['macros'] == ['FOO'])
def test_simple_lflags(self):
d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])
d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
assert_(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
assert_(d['libraries'] == ['foo', 'bar'])
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/yaxis/_spikesnap.py | import _plotly_utils.basevalidators
class SpikesnapValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="spikesnap", parent_name="layout.yaxis", **kwargs):
super(SpikesnapValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["data", "cursor", "hovered data"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/testing/tests/test_utils.py | <filename>.venv/lib/python3.8/site-packages/numpy/testing/tests/test_utils.py<gh_stars>1000+
import warnings
import sys
import os
import itertools
import pytest
import weakref
import numpy as np
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_less, build_err_msg, raises,
assert_raises, assert_warns, assert_no_warnings, assert_allclose,
assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
class _GenericTest:
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
with assert_raises(AssertionError):
self._assert_func(a, b)
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
self._test_equal([1, 2, 3], (1, 2, 3))
class TestArrayEqual(_GenericTest):
def setup(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_0_ndim_array(self):
x = np.array(473963742225900817127911193656584771)
y = np.array(18535119325151578301457182298393896)
assert_raises(AssertionError, self._assert_func, x, y)
y = x
self._assert_func(x, y)
x = np.array(43)
y = np.array(10)
assert_raises(AssertionError, self._assert_func, x, y)
y = x
self._assert_func(x, y)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
# Test numeric types and object
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
# Test strings
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
self._test_equal(a, b)
c = np.array(['floupipi', 'floupa'])
self._test_not_equal(c, b)
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
self._test_equal(a, b)
c = np.empty(2, [('floupipi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
with suppress_warnings() as sup:
l = sup.record(FutureWarning, message="elementwise == ")
self._test_not_equal(c, b)
assert_equal(len(l), 1)
def test_masked_nan_inf(self):
# Regression test for gh-11121
a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
b = np.array([3., np.nan, 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
b = np.array([np.inf, 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_overrides_eq(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return bool(np.equal(self, other).all())
def __ne__(self, other):
return not self == other
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
assert_(type(a == a), bool)
assert_(a == a)
assert_(a != b)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
@pytest.mark.skipif(
not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')
def test_subclass_that_does_not_implement_npall(self):
class MyArray(np.ndarray):
def __array_function__(self, *args, **kwargs):
return NotImplemented
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
with assert_raises(TypeError):
np.all(a)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
class TestBuildErrorMessage:
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
'2.00003, 3.00004])')
assert_equal(a, b)
def test_build_err_msg_no_verbose(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, verbose=False)
b = '\nItems are not equal: There is a mismatch'
assert_equal(a, b)
def test_build_err_msg_custom_names(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
'1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
'3.00004])')
assert_equal(a, b)
def test_build_err_msg_custom_precision(self):
x = np.array([1.000000001, 2.00002, 3.00003])
y = np.array([1.000000002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, precision=10)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
'1.000000002, 2.00003 , 3.00004 ])')
assert_equal(a, b)
class TestEqual(TestArrayEqual):
def setup(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_datetime(self):
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "s")
)
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "m")
)
# gh-10081
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "s")
)
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "m")
)
def test_nat_items(self):
# not a datetime
nadt_no_unit = np.datetime64("NaT")
nadt_s = np.datetime64("NaT", "s")
nadt_d = np.datetime64("NaT", "ns")
# not a timedelta
natd_no_unit = np.timedelta64("NaT")
natd_s = np.timedelta64("NaT", "s")
natd_d = np.timedelta64("NaT", "ns")
dts = [nadt_no_unit, nadt_s, nadt_d]
tds = [natd_no_unit, natd_s, natd_d]
for a, b in itertools.product(dts, dts):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, tds):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, dts):
self._test_not_equal(a, b)
self._test_not_equal(a, [b])
self._test_not_equal([a], [b])
self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
self._test_not_equal([a], np.timedelta64(123, "s"))
self._test_not_equal([b], np.timedelta64(123, "s"))
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(np.PZERO, np.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
def test_object(self):
#gh-12942
import datetime
a = np.array([datetime.datetime(2000, 1, 1),
datetime.datetime(2000, 1, 2)])
self._test_not_equal(a, a[::-1])
class TestArrayAlmostEqual(_GenericTest):
def setup(self):
self._assert_func = assert_array_almost_equal
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, decimal=5))
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
assert_raises(AssertionError,
lambda: self._assert_func(anan, aone))
assert_raises(AssertionError,
lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError,
lambda: self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
b[0, 0] = -np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
def test_subclass(self):
a = np.array([[1., 2.], [3., 4.]])
b = np.ma.masked_array([[1., 2.], [0., 4.]],
[[False, False], [True, False]])
self._assert_func(a, b)
self._assert_func(b, a)
self._assert_func(b, b)
# Test fully masked as well (see gh-11123).
a = np.ma.MaskedArray(3.5, mask=True)
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.masked
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array([1., 2., 3.])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array(1.)
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super(MyArray, self).__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super(MyArray, self).__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestAlmostEqual(_GenericTest):
def setup(self):
self._assert_func = assert_almost_equal
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, 1))
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, np.inf))
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, 1))
assert_raises(AssertionError,
lambda: self._assert_func(-np.inf, np.inf))
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
def test_error_message(self):
"""Check the message is formatted correctly for the decimal value.
Also check the message when input includes inf or nan (gh12200)"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
# Test with a different amount of decimal digits
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y, decimal=12)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(
msgs[6],
' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
assert_equal(
msgs[7],
' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
# With the default value of decimal digits, only the 3rd element
# differs. Note that we only check for the formatting of the arrays
# themselves.
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
# Check the error message when input includes inf
x = np.array([np.inf, 0])
y = np.array([np.inf, 1])
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 1.')
assert_equal(msgs[6], ' x: array([inf, 0.])')
assert_equal(msgs[7], ' y: array([inf, 1.])')
# Check the error message when dividing by zero
x = np.array([1, 2])
y = np.array([0, 0])
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 2')
assert_equal(msgs[5], 'Max relative difference: inf')
def test_error_message_2(self):
"""Check the message is formatted correctly when either x or y is a scalar."""
x = 2
y = np.ones(20)
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 1.')
y = 2
x = np.ones(20)
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msgs = str(exc_info.value).split('\n')
assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
assert_equal(msgs[4], 'Max absolute difference: 1.')
assert_equal(msgs[5], 'Max relative difference: 0.5')
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super(MyArray, self).__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super(MyArray, self).__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestApproxEqual:
def setup(self):
self._assert_func = assert_approx_equal
def test_simple_0d_arrays(self):
x = np.array(1234.22)
y = np.array(1234.23)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
class TestArrayAssertLess:
def setup(self):
self._assert_func = assert_array_less
def test_simple_arrays(self):
x = np.array([1.1, 2.2])
y = np.array([1.2, 2.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 2.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank2(self):
x = np.array([[1.1, 2.2], [3.3, 4.4]])
y = np.array([[1.2, 2.3], [3.4, 4.5]])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([[1.0, 2.3], [3.4, 4.5]])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank3(self):
x = np.ones(shape=(2, 2, 2))
y = np.ones(shape=(2, 2, 2))+1
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y[0, 0, 0] = 0
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_simple_items(self):
x = 1.1
y = 2.2
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([2.2, 3.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 3.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_nan_noncompare(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_noncompare_array(self):
x = np.array([1.1, 2.2, 3.3])
anan = np.array(np.nan)
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
x = np.array([1.1, 2.2, np.nan])
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
y = np.array([1.0, 2.0, np.nan])
self._assert_func(y, x)
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_inf_compare(self):
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(aone, ainf)
self._assert_func(-ainf, aone)
self._assert_func(-ainf, ainf)
assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
def test_inf_compare_array(self):
x = np.array([1.1, 2.2, np.inf])
ainf = np.array(np.inf)
assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
self._assert_func(-ainf, x)
@pytest.mark.skip(reason="The raises decorator depends on Nose")
class TestRaises:
def setup(self):
class MyException(Exception):
pass
self.e = MyException
def raises_exception(self, e):
raise e
def does_not_raise_exception(self):
pass
def test_correct_catch(self):
raises(self.e)(self.raises_exception)(self.e) # raises?
def test_wrong_exception(self):
try:
raises(self.e)(self.raises_exception)(RuntimeError) # raises?
except RuntimeError:
return
else:
raise AssertionError("should have caught RuntimeError")
def test_catch_no_raise(self):
try:
raises(self.e)(self.does_not_raise_exception)() # raises?
except AssertionError:
return
else:
raise AssertionError("should have raised an AssertionError")
class TestWarns:
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_context_manager(self):
before_filters = sys.modules['warnings'].filters[:]
with assert_warns(UserWarning):
warnings.warn("yo")
after_filters = sys.modules['warnings'].filters
def no_warnings():
with assert_no_warnings():
warnings.warn("yo")
assert_raises(AssertionError, no_warnings)
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
# Should raise a DeprecationWarning
assert_warns(UserWarning, f)
failed = True
except DeprecationWarning:
pass
if failed:
raise AssertionError("wrong warning caught by assert_warn")
class TestAssertAllclose:
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
assert_raises(AssertionError, assert_allclose, x, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
assert_raises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
def test_min_int(self):
a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
# Should not raise:
assert_allclose(a, a)
def test_report_fail_percentage(self):
a = np.array([1, 1, 1, 1])
b = np.array([1, 1, 1, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_('Mismatched elements: 1 / 4 (25%)\n'
'Max absolute difference: 1\n'
'Max relative difference: 0.5' in msg)
def test_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
# Should not raise:
assert_allclose(a, b, equal_nan=True)
def test_not_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
def test_equal_nan_default(self):
# Make sure equal_nan default behavior remains unchanged. (All
# of these functions use assert_array_compare under the hood.)
# None of these should raise.
a = np.array([np.nan])
b = np.array([np.nan])
assert_array_equal(a, b)
assert_array_almost_equal(a, b)
assert_array_less(a, b)
assert_allclose(a, b)
def test_report_max_relative_error(self):
a = np.array([0, 1])
b = np.array([0, 2])
with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
msg = str(exc_info.value)
assert_('Max relative difference: 0.5' in msg)
class TestArrayAlmostEqualNulp:
def test_float64_pass(self):
# The number of units of least precision
# In this case, use a few places above the lowest level (ie nulp=1)
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
# Addition
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
# Subtraction
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float64_ignore_nan(self):
# Ignore ULP differences between various NAN's
# Note that MIPS may reverse quiet and signaling nans
# so we use the builtin version as a base.
offset = np.uint64(0xffffffff)
nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones.
nan1_f64 = nan1_i64.view(np.float64)
nan2_f64 = nan2_i64.view(np.float64)
assert_array_max_ulp(nan1_f64, nan2_f64, 0)
def test_float32_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float32_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float32_ignore_nan(self):
# Ignore ULP differences between various NAN's
# Note that MIPS may reverse quiet and signaling nans
# so we use the builtin version as a base.
offset = np.uint32(0xffff)
nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones.
nan1_f32 = nan1_i32.view(np.float32)
nan2_f32 = nan2_i32.view(np.float32)
assert_array_max_ulp(nan1_f32, nan2_f32, 0)
def test_float16_pass(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float16_fail(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float16_ignore_nan(self):
# Ignore ULP differences between various NAN's
# Note that MIPS may reverse quiet and signaling nans
# so we use the builtin version as a base.
offset = np.uint16(0xff)
nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones.
nan1_f16 = nan1_i16.view(np.float16)
nan2_f16 = nan2_i16.view(np.float16)
assert_array_max_ulp(nan1_f16, nan2_f16, 0)
def test_complex128_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex128_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
# The test condition needs to be at least a factor of sqrt(2) smaller
# because the real and imaginary parts both change
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
def test_complex64_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
class TestULP:
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
# Generate 1 + small deviation, check that adding eps gives a few UNL
x = np.ones(10).astype(np.float64)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
# Test that nan is 'far' from small, tiny, inf, max and min
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([np.PZERO]).astype(dt)
nzero = np.array([np.NZERO]).astype(dt)
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
class TestStringEqual:
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
with pytest.raises(AssertionError) as exc_info:
assert_string_equal("foo\nbar", "hello\nbar")
msg = str(exc_info.value)
assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
def test_regex(self):
assert_string_equal("a+*b", "a+*b")
assert_raises(AssertionError,
lambda: assert_string_equal("aaa", "a+b"))
def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
try:
mod_warns = mod.__warningregistry__
except AttributeError:
# the lack of a __warningregistry__
# attribute means that no warning has
# occurred; this can be triggered in
# a parallel test scenario, while in
# a serial test scenario an initial
# warning (and therefore the attribute)
# are always created first
mod_warns = {}
num_warns = len(mod_warns)
# Python 3.4 appears to clear any pre-existing warnings of the same type,
# when raising warnings inside a catch_warnings block. So, there is a
# warning generated by the tests within the context manager, but no
# previous warnings.
if 'version' in mod_warns:
# Python 3 adds a 'version' entry to the registry,
# do not count it.
num_warns -= 1
# Behavior of warnings is Python version dependent. Adjust the
# expected result to compensate. In particular, Python 3.7 does
# not make an entry for ignored warnings.
if sys.version_info[:2] >= (3, 7):
if py37 is not None:
n_in_context = py37
elif sys.version_info[:2] >= (3, 4):
if py34 is not None:
n_in_context = py34
assert_equal(num_warns, n_in_context)
def test_warn_len_equal_call_scenarios():
# assert_warn_len_equal is called under
# varying circumstances depending on serial
# vs. parallel test scenarios; this test
# simply aims to probe both code paths and
# check that no assertion is uncaught
# parallel scenario -- no warning issued yet
class mod:
pass
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=0)
# serial test scenario -- the __warningregistry__
# attribute should be present
class mod:
def __init__(self):
self.__warningregistry__ = {'warning1':1,
'warning2':2}
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=2)
def _get_fresh_mod():
# Get this module, with warning registry empty
my_mod = sys.modules[__name__]
try:
my_mod.__warningregistry__.clear()
except AttributeError:
# will not have a __warningregistry__ unless warning has been
# raised in the module at some point
pass
return my_mod
def test_clear_and_catch_warnings():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
# Without specified modules, don't clear warnings during context
# Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0)
# Confirm that specifying module keeps old warning, does not add new
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 1, py37=0)
# Another warning, no module spec does add to warnings dict, except on
# Python 3.4 (see comments in `assert_warn_len_equal`)
# Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 2, py34=1, py37=0)
def test_suppress_warnings_module():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
def warn_other_module():
# Apply along axis is implemented in python; stacklevel=2 means
# we end up inside its module, not ours.
def warn(arr):
warnings.warn("Some warning 2", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
# Test module based warning suppression:
assert_warn_len_equal(my_mod, 0)
with suppress_warnings() as sup:
sup.record(UserWarning)
# suppress warning from other module (may have .pyc ending),
# if apply_along_axis is moved, had to be changed.
sup.filter(module=np.lib.shape_base)
warnings.warn("Some warning")
warn_other_module()
# Check that the suppression did test the file correctly (this module
# got filtered)
assert_equal(len(sup.log), 1)
assert_equal(sup.log[0].message.args[0], "Some warning")
assert_warn_len_equal(my_mod, 0, py37=0)
sup = suppress_warnings()
# Will have to be changed if apply_along_axis is moved:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# And test repeat works:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Without specified modules, don't clear warnings during context
# Python 3.7 does not add ignored warnings.
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0)
def test_suppress_warnings_type():
# Initial state of module, no warnings
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
# Test module based warning suppression:
with suppress_warnings() as sup:
sup.filter(UserWarning)
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup = suppress_warnings()
sup.filter(UserWarning)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# And test repeat works:
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
# Without specified modules, don't clear warnings during context
# Python 3.7 does not add ignored warnings.
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1, py37=0)
def test_suppress_warnings_decorate_no_record():
sup = suppress_warnings()
sup.filter(UserWarning)
@sup
def warn(category):
warnings.warn('Some warning', category)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
warn(UserWarning) # should be supppressed
warn(RuntimeWarning)
assert_equal(len(w), 1)
def test_suppress_warnings_record():
sup = suppress_warnings()
log1 = sup.record()
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2),1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Do it again, with the same context to see if some warnings survived:
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2), 1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
# Test nested:
with suppress_warnings() as sup:
sup.record()
with suppress_warnings() as sup2:
sup2.record(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
assert_equal(len(sup2.log), 1)
assert_equal(len(sup.log), 1)
def test_suppress_warnings_forwarding():
def warn_other_module():
# Apply along axis is implemented in python; stacklevel=2 means
# we end up inside its module, not ours.
def warn(arr):
warnings.warn("Some warning", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("always"):
for i in range(2):
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("location"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("module"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
warn_other_module()
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("once"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some other warning")
warn_other_module()
assert_equal(len(sup.log), 2)
def test_tempdir():
with tempdir() as tdir:
fpath = os.path.join(tdir, 'tmp')
with open(fpath, 'w'):
pass
assert_(not os.path.isdir(tdir))
raised = False
try:
with tempdir() as tdir:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isdir(tdir))
def test_temppath():
with temppath() as fpath:
with open(fpath, 'w'):
pass
assert_(not os.path.isfile(fpath))
raised = False
try:
with temppath() as fpath:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isfile(fpath))
class my_cacw(clear_and_catch_warnings):
class_modules = (sys.modules[__name__],)
def test_clear_and_catch_warnings_inherit():
# Test can subclass and add default modules
my_mod = _get_fresh_mod()
with my_cacw():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestAssertNoGcCycles:
""" Test assert_no_gc_cycles """
def test_passes(self):
def no_cycle():
b = []
b.append([])
return b
with assert_no_gc_cycles():
no_cycle()
assert_no_gc_cycles(no_cycle)
def test_asserts(self):
def make_cycle():
a = []
a.append(a)
a.append(a)
return a
with assert_raises(AssertionError):
with assert_no_gc_cycles():
make_cycle()
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
@pytest.mark.slow
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
error, instead of hanging forever trying to clear it.
"""
class ReferenceCycleInDel:
"""
An object that not only contains a reference cycle, but creates new
cycles whenever it's garbage-collected and its __del__ runs
"""
make_cycle = True
def __init__(self):
self.cycle = self
def __del__(self):
# break the current cycle so that `self` can be freed
self.cycle = None
if ReferenceCycleInDel.make_cycle:
# but create a new one so that the garbage collector has more
# work to do.
ReferenceCycleInDel()
try:
w = weakref.ref(ReferenceCycleInDel())
try:
with assert_raises(RuntimeError):
# this will be unable to get a baseline empty garbage
assert_no_gc_cycles(lambda: None)
except AssertionError:
# the above test is only necessary if the GC actually tried to free
# our object anyway, which python 2.7 does not.
if w() is not None:
pytest.skip("GC does not call __del__ on cyclic objects")
raise
finally:
# make sure that we stop creating reference cycles
ReferenceCycleInDel.make_cycle = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexes/period/test_formats.py | import numpy as np
import pytest
import pandas as pd
from pandas import PeriodIndex
import pandas._testing as tm
def test_to_native_types():
index = PeriodIndex(["2017-01-01", "2017-01-02", "2017-01-03"], freq="D")
# First, with no arguments.
expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype="=U10")
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
# No NaN values, so na_rep has no effect
result = index.to_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
# Make sure slicing works
expected = np.array(["2017-01-01", "2017-01-03"], dtype="=U10")
result = index.to_native_types([0, 2])
tm.assert_numpy_array_equal(result, expected)
# Make sure date formatting works
expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype="=U10")
result = index.to_native_types(date_format="%m-%Y-%d")
tm.assert_numpy_array_equal(result, expected)
# NULL object handling should work
index = PeriodIndex(["2017-01-01", pd.NaT, "2017-01-03"], freq="D")
expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object)
result = index.to_native_types()
tm.assert_numpy_array_equal(result, expected)
expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object)
result = index.to_native_types(na_rep="pandas")
tm.assert_numpy_array_equal(result, expected)
class TestPeriodIndexRendering:
def test_frame_repr(self):
df = pd.DataFrame({"A": [1, 2, 3]}, index=pd.date_range("2000", periods=3))
result = repr(df)
expected = " A\n2000-01-01 1\n2000-01-02 2\n2000-01-03 3"
assert result == expected
@pytest.mark.parametrize("method", ["__repr__", "__str__"])
def test_representation(self, method):
# GH#7601
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
idx10 = PeriodIndex(["2011-01-01", "2011-02-01"], freq="3D")
exp1 = "PeriodIndex([], dtype='period[D]', freq='D')"
exp2 = "PeriodIndex(['2011-01-01'], dtype='period[D]', freq='D')"
exp3 = "PeriodIndex(['2011-01-01', '2011-01-02'], dtype='period[D]', freq='D')"
exp4 = (
"PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "
"dtype='period[D]', freq='D')"
)
exp5 = (
"PeriodIndex(['2011', '2012', '2013'], dtype='period[A-DEC]', "
"freq='A-DEC')"
)
exp6 = (
"PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], "
"dtype='period[H]', freq='H')"
)
exp7 = "PeriodIndex(['2013Q1'], dtype='period[Q-DEC]', freq='Q-DEC')"
exp8 = "PeriodIndex(['2013Q1', '2013Q2'], dtype='period[Q-DEC]', freq='Q-DEC')"
exp9 = (
"PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], "
"dtype='period[Q-DEC]', freq='Q-DEC')"
)
exp10 = (
"PeriodIndex(['2011-01-01', '2011-02-01'], "
"dtype='period[3D]', freq='3D')"
)
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9, idx10],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9, exp10],
):
result = getattr(idx, method)()
assert result == expected
def test_representation_to_series(self):
# GH#10971
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
exp1 = """Series([], dtype: period[D])"""
exp2 = """0 2011-01-01
dtype: period[D]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: period[D]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: period[D]"""
exp5 = """0 2011
1 2012
2 2013
dtype: period[A-DEC]"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: period[H]"""
exp7 = """0 2013Q1
dtype: period[Q-DEC]"""
exp8 = """0 2013Q1
1 2013Q2
dtype: period[Q-DEC]"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: period[Q-DEC]"""
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9],
):
result = repr(pd.Series(idx))
assert result == expected
def test_summary(self):
# GH#9116
idx1 = PeriodIndex([], freq="D")
idx2 = PeriodIndex(["2011-01-01"], freq="D")
idx3 = PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")
idx4 = PeriodIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")
idx5 = PeriodIndex(["2011", "2012", "2013"], freq="A")
idx6 = PeriodIndex(["2011-01-01 09:00", "2012-02-01 10:00", "NaT"], freq="H")
idx7 = pd.period_range("2013Q1", periods=1, freq="Q")
idx8 = pd.period_range("2013Q1", periods=2, freq="Q")
idx9 = pd.period_range("2013Q1", periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip(
[idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9],
):
result = idx._summary()
assert result == expected
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/scene/camera/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/scene/camera/__init__.py
import sys
if sys.version_info < (3, 7):
from ._center import Center
from ._eye import Eye
from ._projection import Projection
from ._up import Up
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._center.Center", "._eye.Eye", "._projection.Projection", "._up.Up"],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/updatemenu/_type.py | import _plotly_utils.basevalidators
class TypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="type", parent_name="layout.updatemenu", **kwargs):
super(TypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["dropdown", "buttons"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/layer/__init__.py | <gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._type import TypeValidator
from ._templateitemname import TemplateitemnameValidator
from ._symbol import SymbolValidator
from ._sourcetype import SourcetypeValidator
from ._sourcelayer import SourcelayerValidator
from ._sourceattribution import SourceattributionValidator
from ._source import SourceValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._minzoom import MinzoomValidator
from ._maxzoom import MaxzoomValidator
from ._line import LineValidator
from ._fill import FillValidator
from ._coordinates import CoordinatesValidator
from ._color import ColorValidator
from ._circle import CircleValidator
from ._below import BelowValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._type.TypeValidator",
"._templateitemname.TemplateitemnameValidator",
"._symbol.SymbolValidator",
"._sourcetype.SourcetypeValidator",
"._sourcelayer.SourcelayerValidator",
"._sourceattribution.SourceattributionValidator",
"._source.SourceValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._minzoom.MinzoomValidator",
"._maxzoom.MaxzoomValidator",
"._line.LineValidator",
"._fill.FillValidator",
"._coordinates.CoordinatesValidator",
"._color.ColorValidator",
"._circle.CircleValidator",
"._below.BelowValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_barpolar.py | from plotly.graph_objs import Barpolar
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/camera/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/scene/camera/__init__.py
import sys
if sys.version_info < (3, 7):
from ._up import UpValidator
from ._projection import ProjectionValidator
from ._eye import EyeValidator
from ._center import CenterValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._up.UpValidator",
"._projection.ProjectionValidator",
"._eye.EyeValidator",
"._center.CenterValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/indexers.py | """
Low-dependency indexing utilities.
"""
import warnings
import numpy as np
from pandas._typing import Any, AnyArrayLike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if isinstance(indexer, tuple):
if len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> None:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
None
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
# boolean with truth values == len of the value is ok too
if isinstance(indexer, (np.ndarray, list)):
if is_list_like(value) and len(indexer) != len(value):
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
elif isinstance(indexer, slice):
# slice
if is_list_like(value) and len(values):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel=3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/command/install_clib.py | <gh_stars>1000+
from __future__ import division, absolute_import, print_function
import os
from distutils.core import Command
from distutils.ccompiler import new_compiler
from numpy.distutils.misc_util import get_cmd
class install_clib(Command):
description = "Command to install installable C libraries"
user_options = []
def initialize_options(self):
self.install_dir = None
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install', ('install_lib', 'install_dir'))
def run (self):
build_clib_cmd = get_cmd("build_clib")
if not build_clib_cmd.build_clib:
# can happen if the user specified `--skip-build`
build_clib_cmd.finalize_options()
build_dir = build_clib_cmd.build_clib
# We need the compiler to get the library name -> filename association
if not build_clib_cmd.compiler:
compiler = new_compiler(compiler=None)
compiler.customize(self.distribution)
else:
compiler = build_clib_cmd.compiler
for l in self.distribution.installed_libraries:
target_dir = os.path.join(self.install_dir, l.target_dir)
name = compiler.library_filename(l.name)
source = os.path.join(build_dir, name)
self.mkpath(target_dir)
self.outfiles.append(self.copy_file(source, target_dir)[0])
def get_outputs(self):
return self.outfiles
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/annotation/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/scene/annotation/__init__.py
import sys
if sys.version_info < (3, 7):
from ._z import ZValidator
from ._yshift import YshiftValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xshift import XshiftValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._width import WidthValidator
from ._visible import VisibleValidator
from ._valign import ValignValidator
from ._textangle import TextangleValidator
from ._text import TextValidator
from ._templateitemname import TemplateitemnameValidator
from ._startstandoff import StartstandoffValidator
from ._startarrowsize import StartarrowsizeValidator
from ._startarrowhead import StartarrowheadValidator
from ._standoff import StandoffValidator
from ._showarrow import ShowarrowValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._hovertext import HovertextValidator
from ._hoverlabel import HoverlabelValidator
from ._height import HeightValidator
from ._font import FontValidator
from ._captureevents import CaptureeventsValidator
from ._borderwidth import BorderwidthValidator
from ._borderpad import BorderpadValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
from ._ay import AyValidator
from ._ax import AxValidator
from ._arrowwidth import ArrowwidthValidator
from ._arrowsize import ArrowsizeValidator
from ._arrowside import ArrowsideValidator
from ._arrowhead import ArrowheadValidator
from ._arrowcolor import ArrowcolorValidator
from ._align import AlignValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._z.ZValidator",
"._yshift.YshiftValidator",
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xshift.XshiftValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._width.WidthValidator",
"._visible.VisibleValidator",
"._valign.ValignValidator",
"._textangle.TextangleValidator",
"._text.TextValidator",
"._templateitemname.TemplateitemnameValidator",
"._startstandoff.StartstandoffValidator",
"._startarrowsize.StartarrowsizeValidator",
"._startarrowhead.StartarrowheadValidator",
"._standoff.StandoffValidator",
"._showarrow.ShowarrowValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._hovertext.HovertextValidator",
"._hoverlabel.HoverlabelValidator",
"._height.HeightValidator",
"._font.FontValidator",
"._captureevents.CaptureeventsValidator",
"._borderwidth.BorderwidthValidator",
"._borderpad.BorderpadValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
"._ay.AyValidator",
"._ax.AxValidator",
"._arrowwidth.ArrowwidthValidator",
"._arrowsize.ArrowsizeValidator",
"._arrowside.ArrowsideValidator",
"._arrowhead.ArrowheadValidator",
"._arrowcolor.ArrowcolorValidator",
"._align.AlignValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/parcats/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/parcats/__init__.py
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._tickfont import TickfontValidator
from ._stream import StreamValidator
from ._sortpaths import SortpathsValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._line import LineValidator
from ._labelfont import LabelfontValidator
from ._hovertemplate import HovertemplateValidator
from ._hoveron import HoveronValidator
from ._hoverinfo import HoverinfoValidator
from ._domain import DomainValidator
from ._dimensiondefaults import DimensiondefaultsValidator
from ._dimensions import DimensionsValidator
from ._countssrc import CountssrcValidator
from ._counts import CountsValidator
from ._bundlecolors import BundlecolorsValidator
from ._arrangement import ArrangementValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._tickfont.TickfontValidator",
"._stream.StreamValidator",
"._sortpaths.SortpathsValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._line.LineValidator",
"._labelfont.LabelfontValidator",
"._hovertemplate.HovertemplateValidator",
"._hoveron.HoveronValidator",
"._hoverinfo.HoverinfoValidator",
"._domain.DomainValidator",
"._dimensiondefaults.DimensiondefaultsValidator",
"._dimensions.DimensionsValidator",
"._countssrc.CountssrcValidator",
"._counts.CountsValidator",
"._bundlecolors.BundlecolorsValidator",
"._arrangement.ArrangementValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_choroplethmapbox.py | <gh_stars>1000+
from plotly.graph_objs import Choroplethmapbox
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scatterternary.py | from plotly.graph_objs import Scatterternary
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/figure_factory/_quiver.py | <filename>env/lib/python3.8/site-packages/plotly/figure_factory/_quiver.py
from __future__ import absolute_import
import math
from plotly import exceptions
from plotly.graph_objs import graph_objs
from plotly.figure_factory import utils
def create_quiver(
x, y, u, v, scale=0.1, arrow_scale=0.3, angle=math.pi / 9, scaleratio=None, **kwargs
):
"""
Returns data for a quiver plot.
:param (list|ndarray) x: x coordinates of the arrow locations
:param (list|ndarray) y: y coordinates of the arrow locations
:param (list|ndarray) u: x components of the arrow vectors
:param (list|ndarray) v: y components of the arrow vectors
:param (float in [0,1]) scale: scales size of the arrows(ideally to
avoid overlap). Default = .1
:param (float in [0,1]) arrow_scale: value multiplied to length of barb
to get length of arrowhead. Default = .3
:param (angle in radians) angle: angle of arrowhead. Default = pi/9
:param (positive float) scaleratio: the ratio between the scale of the y-axis
and the scale of the x-axis (scale_y / scale_x). Default = None, the
scale ratio is not fixed.
:param kwargs: kwargs passed through plotly.graph_objs.Scatter
for more information on valid kwargs call
help(plotly.graph_objs.Scatter)
:rtype (dict): returns a representation of quiver figure.
Example 1: Trivial Quiver
>>> from plotly.figure_factory import create_quiver
>>> import math
>>> # 1 Arrow from (0,0) to (1,1)
>>> fig = create_quiver(x=[0], y=[0], u=[1], v=[1], scale=1)
>>> fig.show()
Example 2: Quiver plot using meshgrid
>>> from plotly.figure_factory import create_quiver
>>> import numpy as np
>>> import math
>>> # Add data
>>> x,y = np.meshgrid(np.arange(0, 2, .2), np.arange(0, 2, .2))
>>> u = np.cos(x)*y
>>> v = np.sin(x)*y
>>> #Create quiver
>>> fig = create_quiver(x, y, u, v)
>>> fig.show()
Example 3: Styling the quiver plot
>>> from plotly.figure_factory import create_quiver
>>> import numpy as np
>>> import math
>>> # Add data
>>> x, y = np.meshgrid(np.arange(-np.pi, math.pi, .5),
... np.arange(-math.pi, math.pi, .5))
>>> u = np.cos(x)*y
>>> v = np.sin(x)*y
>>> # Create quiver
>>> fig = create_quiver(x, y, u, v, scale=.2, arrow_scale=.3, angle=math.pi/6,
... name='Wind Velocity', line=dict(width=1))
>>> # Add title to layout
>>> fig.update_layout(title='Quiver Plot') # doctest: +SKIP
>>> fig.show()
Example 4: Forcing a fix scale ratio to maintain the arrow length
>>> from plotly.figure_factory import create_quiver
>>> import numpy as np
>>> # Add data
>>> x,y = np.meshgrid(np.arange(0.5, 3.5, .5), np.arange(0.5, 4.5, .5))
>>> u = x
>>> v = y
>>> angle = np.arctan(v / u)
>>> norm = 0.25
>>> u = norm * np.cos(angle)
>>> v = norm * np.sin(angle)
>>> # Create quiver with a fix scale ratio
>>> fig = create_quiver(x, y, u, v, scale = 1, scaleratio = 0.5)
>>> fig.show()
"""
utils.validate_equal_length(x, y, u, v)
utils.validate_positive_scalars(arrow_scale=arrow_scale, scale=scale)
if scaleratio is None:
quiver_obj = _Quiver(x, y, u, v, scale, arrow_scale, angle)
else:
quiver_obj = _Quiver(x, y, u, v, scale, arrow_scale, angle, scaleratio)
barb_x, barb_y = quiver_obj.get_barbs()
arrow_x, arrow_y = quiver_obj.get_quiver_arrows()
quiver_plot = graph_objs.Scatter(
x=barb_x + arrow_x, y=barb_y + arrow_y, mode="lines", **kwargs
)
data = [quiver_plot]
if scaleratio is None:
layout = graph_objs.Layout(hovermode="closest")
else:
layout = graph_objs.Layout(
hovermode="closest", yaxis=dict(scaleratio=scaleratio, scaleanchor="x")
)
return graph_objs.Figure(data=data, layout=layout)
class _Quiver(object):
"""
Refer to FigureFactory.create_quiver() for docstring
"""
def __init__(self, x, y, u, v, scale, arrow_scale, angle, scaleratio=1, **kwargs):
try:
x = utils.flatten(x)
except exceptions.PlotlyError:
pass
try:
y = utils.flatten(y)
except exceptions.PlotlyError:
pass
try:
u = utils.flatten(u)
except exceptions.PlotlyError:
pass
try:
v = utils.flatten(v)
except exceptions.PlotlyError:
pass
self.x = x
self.y = y
self.u = u
self.v = v
self.scale = scale
self.scaleratio = scaleratio
self.arrow_scale = arrow_scale
self.angle = angle
self.end_x = []
self.end_y = []
self.scale_uv()
barb_x, barb_y = self.get_barbs()
arrow_x, arrow_y = self.get_quiver_arrows()
def scale_uv(self):
"""
Scales u and v to avoid overlap of the arrows.
u and v are added to x and y to get the
endpoints of the arrows so a smaller scale value will
result in less overlap of arrows.
"""
self.u = [i * self.scale * self.scaleratio for i in self.u]
self.v = [i * self.scale for i in self.v]
def get_barbs(self):
"""
Creates x and y startpoint and endpoint pairs
After finding the endpoint of each barb this zips startpoint and
endpoint pairs to create 2 lists: x_values for barbs and y values
for barbs
:rtype: (list, list) barb_x, barb_y: list of startpoint and endpoint
x_value pairs separated by a None to create the barb of the arrow,
and list of startpoint and endpoint y_value pairs separated by a
None to create the barb of the arrow.
"""
self.end_x = [i + j for i, j in zip(self.x, self.u)]
self.end_y = [i + j for i, j in zip(self.y, self.v)]
empty = [None] * len(self.x)
barb_x = utils.flatten(zip(self.x, self.end_x, empty))
barb_y = utils.flatten(zip(self.y, self.end_y, empty))
return barb_x, barb_y
def get_quiver_arrows(self):
"""
Creates lists of x and y values to plot the arrows
Gets length of each barb then calculates the length of each side of
the arrow. Gets angle of barb and applies angle to each side of the
arrowhead. Next uses arrow_scale to scale the length of arrowhead and
creates x and y values for arrowhead point1 and point2. Finally x and y
values for point1, endpoint and point2s for each arrowhead are
separated by a None and zipped to create lists of x and y values for
the arrows.
:rtype: (list, list) arrow_x, arrow_y: list of point1, endpoint, point2
x_values separated by a None to create the arrowhead and list of
point1, endpoint, point2 y_values separated by a None to create
the barb of the arrow.
"""
dif_x = [i - j for i, j in zip(self.end_x, self.x)]
dif_y = [i - j for i, j in zip(self.end_y, self.y)]
# Get barb lengths(default arrow length = 30% barb length)
barb_len = [None] * len(self.x)
for index in range(len(barb_len)):
barb_len[index] = math.hypot(dif_x[index] / self.scaleratio, dif_y[index])
# Make arrow lengths
arrow_len = [None] * len(self.x)
arrow_len = [i * self.arrow_scale for i in barb_len]
# Get barb angles
barb_ang = [None] * len(self.x)
for index in range(len(barb_ang)):
barb_ang[index] = math.atan2(dif_y[index], dif_x[index] / self.scaleratio)
# Set angles to create arrow
ang1 = [i + self.angle for i in barb_ang]
ang2 = [i - self.angle for i in barb_ang]
cos_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
cos_ang1[index] = math.cos(ang1[index])
seg1_x = [i * j for i, j in zip(arrow_len, cos_ang1)]
sin_ang1 = [None] * len(ang1)
for index in range(len(ang1)):
sin_ang1[index] = math.sin(ang1[index])
seg1_y = [i * j for i, j in zip(arrow_len, sin_ang1)]
cos_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
cos_ang2[index] = math.cos(ang2[index])
seg2_x = [i * j for i, j in zip(arrow_len, cos_ang2)]
sin_ang2 = [None] * len(ang2)
for index in range(len(ang2)):
sin_ang2[index] = math.sin(ang2[index])
seg2_y = [i * j for i, j in zip(arrow_len, sin_ang2)]
# Set coordinates to create arrow
for index in range(len(self.end_x)):
point1_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg1_x)]
point1_y = [i - j for i, j in zip(self.end_y, seg1_y)]
point2_x = [i - j * self.scaleratio for i, j in zip(self.end_x, seg2_x)]
point2_y = [i - j for i, j in zip(self.end_y, seg2_y)]
# Combine lists to create arrow
empty = [None] * len(self.end_x)
arrow_x = utils.flatten(zip(point1_x, self.end_x, point2_x, empty))
arrow_y = utils.flatten(zip(point1_y, self.end_y, point2_y, empty))
return arrow_x, arrow_y
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_title.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(self, plotly_name="title", parent_name="layout", **kwargs):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets the title font. Note that the title's font
used to be customized by the now deprecated
`titlefont` attribute.
pad
Sets the padding of the title. Each padding
value only applies when the corresponding
`xanchor`/`yanchor` value is set accordingly.
E.g. for left padding to take effect, `xanchor`
must be set to "left". The same rule applies if
`xanchor`/`yanchor` is determined
automatically. Padding is muted if the
respective anchor value is "middle*/*center".
text
Sets the plot's title. Note that before the
existence of `title.text`, the title's contents
used to be defined as the `title` attribute
itself. This behavior has been deprecated.
x
Sets the x position with respect to `xref` in
normalized coordinates from 0 (left) to 1
(right).
xanchor
Sets the title's horizontal alignment with
respect to its x position. "left" means that
the title starts at x, "right" means that the
title ends at x and "center" means that the
title's center is at x. "auto" divides `xref`
by three and calculates the `xanchor` value
automatically based on the value of `x`.
xref
Sets the container `x` refers to. "container"
spans the entire `width` of the plot. "paper"
refers to the width of the plotting area only.
y
Sets the y position with respect to `yref` in
normalized coordinates from 0 (bottom) to 1
(top). "auto" places the baseline of the title
onto the vertical center of the top margin.
yanchor
Sets the title's vertical alignment with
respect to its y position. "top" means that the
title's cap line is at y, "bottom" means that
the title's baseline is at y and "middle" means
that the title's midline is at y. "auto"
divides `yref` by three and calculates the
`yanchor` value automatically based on the
value of `y`.
yref
Sets the container `y` refers to. "container"
spans the entire `height` of the plot. "paper"
refers to the height of the plotting area only.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotlywidget/__init__.py | def _jupyter_nbextension_paths():
return [
{
"section": "notebook",
"src": "static",
"dest": "plotlywidget",
"require": "plotlywidget/extension",
}
]
__frontend_version__ = "^0.1"
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/io/excel/test_odf.py | <filename>env/lib/python3.8/site-packages/pandas/tests/io/excel/test_odf.py
import functools
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
pytest.importorskip("odf")
@pytest.fixture(autouse=True)
def cd_and_set_engine(monkeypatch, datapath):
func = functools.partial(pd.read_excel, engine="odf")
monkeypatch.setattr(pd, "read_excel", func)
monkeypatch.chdir(datapath("io", "data", "excel"))
def test_read_invalid_types_raises():
# the invalid_value_type.ods required manually editing
# of the included content.xml file
with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"):
pd.read_excel("invalid_value_type.ods")
def test_read_writer_table():
# Also test reading tables from an text OpenDocument file
# (.odt)
index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header")
expected = pd.DataFrame(
[[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]],
index=index,
columns=["Column 1", "Unnamed: 2", "Column 3"],
)
result = pd.read_excel("writertable.odt", "Table1", index_col=0)
tm.assert_frame_equal(result, expected)
def test_nonexistent_sheetname_raises(read_ext):
# GH-27676
# Specifying a non-existent sheet_name parameter should throw an error
# with the sheet name.
with pytest.raises(ValueError, match="sheet xyz not found"):
pd.read_excel("blank.ods", sheet_name="xyz")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/util/test_util.py | import os
import pytest
import pandas.compat as compat
import pandas._testing as tm
def test_rands():
r = tm.rands(10)
assert len(r) == 10
def test_rands_array_1d():
arr = tm.rands_array(5, size=10)
assert arr.shape == (10,)
assert len(arr[0]) == 5
def test_rands_array_2d():
arr = tm.rands_array(7, size=(10, 10))
assert arr.shape == (10, 10)
assert len(arr[1, 1]) == 7
def test_numpy_err_state_is_default():
expected = {"over": "warn", "divide": "warn", "invalid": "warn", "under": "ignore"}
import numpy as np
# The error state should be unchanged after that import.
assert np.geterr() == expected
def test_convert_rows_list_to_csv_str():
rows_list = ["aaa", "bbb", "ccc"]
ret = tm.convert_rows_list_to_csv_str(rows_list)
if compat.is_platform_windows():
expected = "aaa\r\nbbb\r\nccc\r\n"
else:
expected = "aaa\nbbb\nccc\n"
assert ret == expected
def test_create_temp_directory():
with tm.ensure_clean_dir() as path:
assert os.path.exists(path)
assert os.path.isdir(path)
assert not os.path.exists(path)
@pytest.mark.parametrize("strict_data_files", [True, False])
def test_datapath_missing(datapath):
with pytest.raises(ValueError, match="Could not find file"):
datapath("not_a_file")
def test_datapath(datapath):
args = ("data", "iris.csv")
result = datapath(*args)
expected = os.path.join(os.path.dirname(os.path.dirname(__file__)), *args)
assert result == expected
def test_rng_context():
import numpy as np
expected0 = 1.764052345967664
expected1 = 1.6243453636632417
with tm.RNGContext(0):
with tm.RNGContext(1):
assert np.random.randn() == expected1
assert np.random.randn() == expected0
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_angularaxis.py | import _plotly_utils.basevalidators
class AngularaxisValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="angularaxis", parent_name="layout", **kwargs):
super(AngularaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "AngularAxis"),
data_docs=kwargs.pop(
"data_docs",
"""
domain
Polar chart subplots are not supported yet.
This key has currently no effect.
endpadding
Legacy polar charts are deprecated! Please
switch to "polar" subplots.
range
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Defines the start
and end point of this angular axis.
showline
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not the line bounding this angular axis will
be shown on the figure.
showticklabels
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not the angular axis ticks will feature tick
labels.
tickcolor
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the color of
the tick lines on this angular axis.
ticklen
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the length of
the tick lines on this angular axis.
tickorientation
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the
orientation (from the paper perspective) of the
angular axis tick labels.
ticksuffix
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Sets the length of
the tick lines on this angular axis.
visible
Legacy polar charts are deprecated! Please
switch to "polar" subplots. Determines whether
or not this axis will be visible.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/ternary/__init__.py | import sys
if sys.version_info < (3, 7):
from ._aaxis import Aaxis
from ._baxis import Baxis
from ._caxis import Caxis
from ._domain import Domain
from . import aaxis
from . import baxis
from . import caxis
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".aaxis", ".baxis", ".caxis"],
["._aaxis.Aaxis", "._baxis.Baxis", "._caxis.Caxis", "._domain.Domain"],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/base/test_factorize.py | <reponame>acrucetta/Chicago_COVI_WebApp
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize("sort", [True, False])
def test_factorize(index_or_series_obj, sort):
obj = index_or_series_obj
result_codes, result_uniques = obj.factorize(sort=sort)
constructor = pd.Index
if isinstance(obj, pd.MultiIndex):
constructor = pd.MultiIndex.from_tuples
expected_uniques = constructor(obj.unique())
if sort:
expected_uniques = expected_uniques.sort_values()
# construct an integer ndarray so that
# `expected_uniques.take(expected_codes)` is equal to `obj`
expected_uniques_list = list(expected_uniques)
expected_codes = [expected_uniques_list.index(val) for val in obj]
expected_codes = np.asarray(expected_codes, dtype=np.intp)
tm.assert_numpy_array_equal(result_codes, expected_codes)
tm.assert_index_equal(result_uniques, expected_uniques)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/command/install.py | from __future__ import division, absolute_import, print_function
import sys
if 'setuptools' in sys.modules:
import setuptools.command.install as old_install_mod
have_setuptools = True
else:
import distutils.command.install as old_install_mod
have_setuptools = False
from distutils.file_util import write_file
old_install = old_install_mod.install
class install(old_install):
# Always run install_clib - the command is cheap, so no need to bypass it;
# but it's not run by setuptools -- so it's run again in install_data
sub_commands = old_install.sub_commands + [
('install_clib', lambda x: True)
]
def finalize_options (self):
old_install.finalize_options(self)
self.install_lib = self.install_libbase
def setuptools_run(self):
""" The setuptools version of the .run() method.
We must pull in the entire code so we can override the level used in the
_getframe() call since we wrap this call by one more level.
"""
from distutils.command.install import install as distutils_install
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return distutils_install.run(self)
# Attempt to detect whether we were called from setup() or by another
# command. If we were called by setup(), our caller will be the
# 'run_command' method in 'distutils.dist', and *its* caller will be
# the 'run_commands' method. If we were called any other way, our
# immediate caller *might* be 'run_command', but it won't have been
# called by 'run_commands'. This is slightly kludgy, but seems to
# work.
#
caller = sys._getframe(3)
caller_module = caller.f_globals.get('__name__', '')
caller_name = caller.f_code.co_name
if caller_module != 'distutils.dist' or caller_name!='run_commands':
# We weren't called from the command line or setup(), so we
# should run in backward-compatibility mode to support bdist_*
# commands.
distutils_install.run(self)
else:
self.do_egg_install()
def run(self):
if not have_setuptools:
r = old_install.run(self)
else:
r = self.setuptools_run()
if self.record:
# bdist_rpm fails when INSTALLED_FILES contains
# paths with spaces. Such paths must be enclosed
# with double-quotes.
with open(self.record, 'r') as f:
lines = []
need_rewrite = False
for l in f:
l = l.rstrip()
if ' ' in l:
need_rewrite = True
l = '"%s"' % (l)
lines.append(l)
if need_rewrite:
self.execute(write_file,
(self.record, lines),
"re-writing list of installed files to '%s'" %
self.record)
return r
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_densitymapbox.py | from plotly.graph_objs import Densitymapbox
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/_template.py | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Template(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.template"
_valid_props = {"data", "layout"}
# data
# ----
@property
def data(self):
"""
The 'data' property is an instance of Data
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.template.Data`
- A dict of string/value properties that will be passed
to the Data constructor
Supported dict properties:
area
A tuple of :class:`plotly.graph_objects.Area`
instances or dicts with compatible properties
barpolar
A tuple of
:class:`plotly.graph_objects.Barpolar`
instances or dicts with compatible properties
bar
A tuple of :class:`plotly.graph_objects.Bar`
instances or dicts with compatible properties
box
A tuple of :class:`plotly.graph_objects.Box`
instances or dicts with compatible properties
candlestick
A tuple of
:class:`plotly.graph_objects.Candlestick`
instances or dicts with compatible properties
carpet
A tuple of :class:`plotly.graph_objects.Carpet`
instances or dicts with compatible properties
choroplethmapbox
A tuple of
:class:`plotly.graph_objects.Choroplethmapbox`
instances or dicts with compatible properties
choropleth
A tuple of
:class:`plotly.graph_objects.Choropleth`
instances or dicts with compatible properties
cone
A tuple of :class:`plotly.graph_objects.Cone`
instances or dicts with compatible properties
contourcarpet
A tuple of
:class:`plotly.graph_objects.Contourcarpet`
instances or dicts with compatible properties
contour
A tuple of
:class:`plotly.graph_objects.Contour` instances
or dicts with compatible properties
densitymapbox
A tuple of
:class:`plotly.graph_objects.Densitymapbox`
instances or dicts with compatible properties
funnelarea
A tuple of
:class:`plotly.graph_objects.Funnelarea`
instances or dicts with compatible properties
funnel
A tuple of :class:`plotly.graph_objects.Funnel`
instances or dicts with compatible properties
heatmapgl
A tuple of
:class:`plotly.graph_objects.Heatmapgl`
instances or dicts with compatible properties
heatmap
A tuple of
:class:`plotly.graph_objects.Heatmap` instances
or dicts with compatible properties
histogram2dcontour
A tuple of :class:`plotly.graph_objects.Histogr
am2dContour` instances or dicts with compatible
properties
histogram2d
A tuple of
:class:`plotly.graph_objects.Histogram2d`
instances or dicts with compatible properties
histogram
A tuple of
:class:`plotly.graph_objects.Histogram`
instances or dicts with compatible properties
image
A tuple of :class:`plotly.graph_objects.Image`
instances or dicts with compatible properties
indicator
A tuple of
:class:`plotly.graph_objects.Indicator`
instances or dicts with compatible properties
isosurface
A tuple of
:class:`plotly.graph_objects.Isosurface`
instances or dicts with compatible properties
mesh3d
A tuple of :class:`plotly.graph_objects.Mesh3d`
instances or dicts with compatible properties
ohlc
A tuple of :class:`plotly.graph_objects.Ohlc`
instances or dicts with compatible properties
parcats
A tuple of
:class:`plotly.graph_objects.Parcats` instances
or dicts with compatible properties
parcoords
A tuple of
:class:`plotly.graph_objects.Parcoords`
instances or dicts with compatible properties
pie
A tuple of :class:`plotly.graph_objects.Pie`
instances or dicts with compatible properties
pointcloud
A tuple of
:class:`plotly.graph_objects.Pointcloud`
instances or dicts with compatible properties
sankey
A tuple of :class:`plotly.graph_objects.Sankey`
instances or dicts with compatible properties
scatter3d
A tuple of
:class:`plotly.graph_objects.Scatter3d`
instances or dicts with compatible properties
scattercarpet
A tuple of
:class:`plotly.graph_objects.Scattercarpet`
instances or dicts with compatible properties
scattergeo
A tuple of
:class:`plotly.graph_objects.Scattergeo`
instances or dicts with compatible properties
scattergl
A tuple of
:class:`plotly.graph_objects.Scattergl`
instances or dicts with compatible properties
scattermapbox
A tuple of
:class:`plotly.graph_objects.Scattermapbox`
instances or dicts with compatible properties
scatterpolargl
A tuple of
:class:`plotly.graph_objects.Scatterpolargl`
instances or dicts with compatible properties
scatterpolar
A tuple of
:class:`plotly.graph_objects.Scatterpolar`
instances or dicts with compatible properties
scatter
A tuple of
:class:`plotly.graph_objects.Scatter` instances
or dicts with compatible properties
scatterternary
A tuple of
:class:`plotly.graph_objects.Scatterternary`
instances or dicts with compatible properties
splom
A tuple of :class:`plotly.graph_objects.Splom`
instances or dicts with compatible properties
streamtube
A tuple of
:class:`plotly.graph_objects.Streamtube`
instances or dicts with compatible properties
sunburst
A tuple of
:class:`plotly.graph_objects.Sunburst`
instances or dicts with compatible properties
surface
A tuple of
:class:`plotly.graph_objects.Surface` instances
or dicts with compatible properties
table
A tuple of :class:`plotly.graph_objects.Table`
instances or dicts with compatible properties
treemap
A tuple of
:class:`plotly.graph_objects.Treemap` instances
or dicts with compatible properties
violin
A tuple of :class:`plotly.graph_objects.Violin`
instances or dicts with compatible properties
volume
A tuple of :class:`plotly.graph_objects.Volume`
instances or dicts with compatible properties
waterfall
A tuple of
:class:`plotly.graph_objects.Waterfall`
instances or dicts with compatible properties
Returns
-------
plotly.graph_objs.layout.template.Data
"""
return self["data"]
@data.setter
def data(self, val):
self["data"] = val
# layout
# ------
@property
def layout(self):
"""
The 'layout' property is an instance of Layout
that may be specified as:
- An instance of :class:`plotly.graph_objs.Layout`
- A dict of string/value properties that will be passed
to the Layout constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.layout.template.Layout
"""
return self["layout"]
@layout.setter
def layout(self, val):
self["layout"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
data
:class:`plotly.graph_objects.layout.template.Data`
instance or dict with compatible properties
layout
:class:`plotly.graph_objects.Layout` instance or dict
with compatible properties
"""
def __init__(self, arg=None, data=None, layout=None, **kwargs):
"""
Construct a new Template object
Default attributes to be applied to the plot. This should be a
dict with format: `{'layout': layoutTemplate, 'data':
{trace_type: [traceTemplate, ...], ...}}` where
`layoutTemplate` is a dict matching the structure of
`figure.layout` and `traceTemplate` is a dict matching the
structure of the trace with type `trace_type` (e.g. 'scatter').
Alternatively, this may be specified as an instance of
plotly.graph_objs.layout.Template. Trace templates are applied
cyclically to traces of each type. Container arrays (eg
`annotations`) have special handling: An object ending in
`defaults` (eg `annotationdefaults`) is applied to each array
item. But if an item has a `templateitemname` key we look in
the template array for an item with matching `name` and apply
that instead. If no matching `name` is found we mark the item
invisible. Any named template item not referenced is appended
to the end of the array, so this can be used to add a watermark
annotation or a logo image, for example. To omit one of these
items on the plot, make an item with matching
`templateitemname` and `visible: false`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Template`
data
:class:`plotly.graph_objects.layout.template.Data`
instance or dict with compatible properties
layout
:class:`plotly.graph_objects.Layout` instance or dict
with compatible properties
Returns
-------
Template
"""
super(Template, self).__init__("template")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Template
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Template`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("data", None)
_v = data if data is not None else _v
if _v is not None:
self["data"] = _v
_v = arg.pop("layout", None)
_v = layout if layout is not None else _v
if _v is not None:
self["layout"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/table/header/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._valuessrc import ValuessrcValidator
from ._values import ValuesValidator
from ._suffixsrc import SuffixsrcValidator
from ._suffix import SuffixValidator
from ._prefixsrc import PrefixsrcValidator
from ._prefix import PrefixValidator
from ._line import LineValidator
from ._height import HeightValidator
from ._formatsrc import FormatsrcValidator
from ._format import FormatValidator
from ._font import FontValidator
from ._fill import FillValidator
from ._alignsrc import AlignsrcValidator
from ._align import AlignValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._valuessrc.ValuessrcValidator",
"._values.ValuesValidator",
"._suffixsrc.SuffixsrcValidator",
"._suffix.SuffixValidator",
"._prefixsrc.PrefixsrcValidator",
"._prefix.PrefixValidator",
"._line.LineValidator",
"._height.HeightValidator",
"._formatsrc.FormatsrcValidator",
"._format.FormatValidator",
"._font.FontValidator",
"._fill.FillValidator",
"._alignsrc.AlignsrcValidator",
"._align.AlignValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/widgets.py | <filename>env/lib/python3.8/site-packages/plotly/widgets.py<gh_stars>1000+
from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("widgets")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_modebar.py | import _plotly_utils.basevalidators
class ModebarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="modebar", parent_name="layout", **kwargs):
super(ModebarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Modebar"),
data_docs=kwargs.pop(
"data_docs",
"""
activecolor
Sets the color of the active or hovered on
icons in the modebar.
bgcolor
Sets the background color of the modebar.
color
Sets the color of the icons in the modebar.
orientation
Sets the orientation of the modebar.
uirevision
Controls persistence of user-driven changes
related to the modebar, including `hovermode`,
`dragmode`, and `showspikes` at both the root
level and inside subplots. Defaults to
`layout.uirevision`.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_to_timestamp.py | from datetime import timedelta
import pytest
from pandas import PeriodIndex, Series, Timedelta, date_range, period_range, to_datetime
import pandas._testing as tm
class TestToTimestamp:
def test_to_timestamp(self):
index = period_range(freq="A", start="1/1/2001", end="12/1/2009")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001", end="12/31/2009", freq="A-DEC")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "D") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
exp_index = date_range("1/1/2001", end="1/1/2009", freq="AS-JAN")
result = series.to_timestamp(how="start")
tm.assert_index_equal(result.index, exp_index)
def _get_with_delta(delta, freq="A-DEC"):
return date_range(
to_datetime("1/1/2001") + delta,
to_datetime("12/31/2009") + delta,
freq=freq,
)
delta = timedelta(hours=23)
result = series.to_timestamp("H", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "h") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
delta = timedelta(hours=23, minutes=59)
result = series.to_timestamp("T", "end")
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "m") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
result = series.to_timestamp("S", "end")
delta = timedelta(hours=23, minutes=59, seconds=59)
exp_index = _get_with_delta(delta)
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
index = period_range(freq="H", start="1/1/2001", end="1/2/2001")
series = Series(1, index=index, name="foo")
exp_index = date_range("1/1/2001 00:59:59", end="1/2/2001 00:59:59", freq="H")
result = series.to_timestamp(how="end")
exp_index = exp_index + Timedelta(1, "s") - Timedelta(1, "ns")
tm.assert_index_equal(result.index, exp_index)
assert result.name == "foo"
def test_to_timestamp_raises(self, index):
# https://github.com/pandas-dev/pandas/issues/33327
ser = Series(index=index, dtype=object)
if not isinstance(index, PeriodIndex):
msg = f"unsupported Type {type(index).__name__}"
with pytest.raises(TypeError, match=msg):
ser.to_timestamp()
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_isin.py | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex, Series
import pandas._testing as tm
class TestDataFrameIsIn:
def test_isin(self):
# GH#4211
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
other = ["a", "b", "c"]
result = df.isin(other)
expected = DataFrame([df.loc[s].isin(other) for s in df.index])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# GH#16991
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
expected = DataFrame(False, df.index, df.columns)
result = df.isin(empty)
tm.assert_frame_equal(result, expected)
def test_isin_dict(self):
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
d = {"A": ["a"]}
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
# non unique columns
df = DataFrame({"A": ["a", "b", "c"], "B": ["a", "e", "f"]})
df.columns = ["A", "A"]
expected = DataFrame(False, df.index, df.columns)
expected.loc[0, "A"] = True
result = df.isin(d)
tm.assert_frame_equal(result, expected)
def test_isin_with_string_scalar(self):
# GH#4763
df = DataFrame(
{
"vals": [1, 2, 3, 4],
"ids": ["a", "b", "f", "n"],
"ids2": ["a", "n", "c", "n"],
},
index=["foo", "bar", "baz", "qux"],
)
with pytest.raises(TypeError):
df.isin("a")
with pytest.raises(TypeError):
df.isin("aaa")
def test_isin_df(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
df2 = DataFrame({"A": [0, 2, 12, 4], "B": [2, np.nan, 4, 5]})
expected = DataFrame(False, df1.index, df1.columns)
result = df1.isin(df2)
expected["A"].loc[[1, 3]] = True
expected["B"].loc[[0, 2]] = True
tm.assert_frame_equal(result, expected)
# partial overlapping columns
df2.columns = ["A", "C"]
result = df1.isin(df2)
expected["B"] = False
tm.assert_frame_equal(result, expected)
def test_isin_tuples(self):
# GH#16394
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "f"]})
df["C"] = list(zip(df["A"], df["B"]))
result = df["C"].isin([(1, "a")])
tm.assert_series_equal(result, Series([True, False, False], name="C"))
def test_isin_df_dupe_values(self):
df1 = DataFrame({"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]})
# just cols duped
df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]], columns=["B", "B"])
with pytest.raises(ValueError):
df1.isin(df2)
# just index duped
df2 = DataFrame(
[[0, 2], [12, 4], [2, np.nan], [4, 5]],
columns=["A", "B"],
index=[0, 0, 1, 1],
)
with pytest.raises(ValueError):
df1.isin(df2)
# cols and index:
df2.columns = ["B", "B"]
with pytest.raises(ValueError):
df1.isin(df2)
def test_isin_dupe_self(self):
other = DataFrame({"A": [1, 0, 1, 0], "B": [1, 1, 0, 0]})
df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=["A", "A"])
result = df.isin(other)
expected = DataFrame(False, index=df.index, columns=df.columns)
expected.loc[0] = True
expected.iloc[1, 1] = True
tm.assert_frame_equal(result, expected)
def test_isin_against_series(self):
df = pd.DataFrame(
{"A": [1, 2, 3, 4], "B": [2, np.nan, 4, 4]}, index=["a", "b", "c", "d"]
)
s = pd.Series([1, 3, 11, 4], index=["a", "b", "c", "d"])
expected = DataFrame(False, index=df.index, columns=df.columns)
expected["A"].loc["a"] = True
expected.loc["d"] = True
result = df.isin(s)
tm.assert_frame_equal(result, expected)
def test_isin_multiIndex(self):
idx = MultiIndex.from_tuples(
[
(0, "a", "foo"),
(0, "a", "bar"),
(0, "b", "bar"),
(0, "b", "baz"),
(2, "a", "foo"),
(2, "a", "bar"),
(2, "c", "bar"),
(2, "c", "baz"),
(1, "b", "foo"),
(1, "b", "bar"),
(1, "c", "bar"),
(1, "c", "baz"),
]
)
df1 = DataFrame({"A": np.ones(12), "B": np.zeros(12)}, index=idx)
df2 = DataFrame(
{
"A": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
"B": [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1],
}
)
# against regular index
expected = DataFrame(False, index=df1.index, columns=df1.columns)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
df2.index = idx
expected = df2.values.astype(np.bool)
expected[:, 1] = ~expected[:, 1]
expected = DataFrame(expected, columns=["A", "B"], index=idx)
result = df1.isin(df2)
tm.assert_frame_equal(result, expected)
def test_isin_empty_datetimelike(self):
# GH#15473
df1_ts = DataFrame({"date": pd.to_datetime(["2014-01-01", "2014-01-02"])})
df1_td = DataFrame({"date": [pd.Timedelta(1, "s"), pd.Timedelta(2, "s")]})
df2 = DataFrame({"date": []})
df3 = DataFrame()
expected = DataFrame({"date": [False, False]})
result = df1_ts.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_ts.isin(df3)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df2)
tm.assert_frame_equal(result, expected)
result = df1_td.isin(df3)
tm.assert_frame_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_candlestick.py | from plotly.graph_objs import Candlestick
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/indicator/delta/_decreasing.py | import _plotly_utils.basevalidators
class DecreasingValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="decreasing", parent_name="indicator.delta", **kwargs
):
super(DecreasingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Decreasing"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the color for increasing value.
symbol
Sets the symbol to display for increasing value
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/matplotlylib/mplexporter/renderers/vega_renderer.py | import warnings
import json
import random
from .base import Renderer
from ..exporter import Exporter
class VegaRenderer(Renderer):
def open_figure(self, fig, props):
self.props = props
self.figwidth = int(props['figwidth'] * props['dpi'])
self.figheight = int(props['figheight'] * props['dpi'])
self.data = []
self.scales = []
self.axes = []
self.marks = []
def open_axes(self, ax, props):
if len(self.axes) > 0:
warnings.warn("multiple axes not yet supported")
self.axes = [dict(type="x", scale="x", ticks=10),
dict(type="y", scale="y", ticks=10)]
self.scales = [dict(name="x",
domain=props['xlim'],
type="linear",
range="width",
),
dict(name="y",
domain=props['ylim'],
type="linear",
range="height",
),]
def draw_line(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'line',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"stroke": {"value": style['color']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['linewidth']},
}
}
})
def draw_markers(self, data, coordinates, style, label, mplobj=None):
if coordinates != 'data':
warnings.warn("Only data coordinates supported. Skipping this")
dataname = "table{0:03d}".format(len(self.data) + 1)
# TODO: respect the other style settings
self.data.append({'name': dataname,
'values': [dict(x=d[0], y=d[1]) for d in data]})
self.marks.append({'type': 'symbol',
'from': {'data': dataname},
'properties': {
"enter": {
"interpolate": {"value": "monotone"},
"x": {"scale": "x", "field": "data.x"},
"y": {"scale": "y", "field": "data.y"},
"fill": {"value": style['facecolor']},
"fillOpacity": {"value": style['alpha']},
"stroke": {"value": style['edgecolor']},
"strokeOpacity": {"value": style['alpha']},
"strokeWidth": {"value": style['edgewidth']},
}
}
})
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
if text_type == 'xlabel':
self.axes[0]['title'] = text
elif text_type == 'ylabel':
self.axes[1]['title'] = text
class VegaHTML(object):
def __init__(self, renderer):
self.specification = dict(width=renderer.figwidth,
height=renderer.figheight,
data=renderer.data,
scales=renderer.scales,
axes=renderer.axes,
marks=renderer.marks)
def html(self):
"""Build the HTML representation for IPython."""
id = random.randint(0, 2 ** 16)
html = '<div id="vis%d"></div>' % id
html += '<script>\n'
html += VEGA_TEMPLATE % (json.dumps(self.specification), id)
html += '</script>\n'
return html
def _repr_html_(self):
return self.html()
def fig_to_vega(fig, notebook=False):
"""Convert a matplotlib figure to vega dictionary
if notebook=True, then return an object which will display in a notebook
otherwise, return an HTML string.
"""
renderer = VegaRenderer()
Exporter(renderer).run(fig)
vega_html = VegaHTML(renderer)
if notebook:
return vega_html
else:
return vega_html.html()
VEGA_TEMPLATE = """
( function() {
var _do_plot = function() {
if ( (typeof vg == 'undefined') && (typeof IPython != 'undefined')) {
$([IPython.events]).on("vega_loaded.vincent", _do_plot);
return;
}
vg.parse.spec(%s, function(chart) {
chart({el: "#vis%d"}).update();
});
};
_do_plot();
})();
"""
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_astype.py | from pandas import Series, date_range
import pandas._testing as tm
class TestAstype:
def test_astype_dt64_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti = date_range("2012-01-01", periods=3)
result = Series(dti).astype(str)
expected = Series(["2012-01-01", "2012-01-02", "2012-01-03"], dtype=object)
tm.assert_series_equal(result, expected)
def test_astype_dt64tz_to_str(self):
# GH#10442 : testing astype(str) is correct for Series/DatetimeIndex
dti_tz = date_range("2012-01-01", periods=3, tz="US/Eastern")
result = Series(dti_tz).astype(str)
expected = Series(
[
"2012-01-01 00:00:00-05:00",
"2012-01-02 00:00:00-05:00",
"2012-01-03 00:00:00-05:00",
],
dtype=object,
)
tm.assert_series_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/colorscale/__init__.py | import sys
if sys.version_info < (3, 7):
from ._sequentialminus import SequentialminusValidator
from ._sequential import SequentialValidator
from ._diverging import DivergingValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sequentialminus.SequentialminusValidator",
"._sequential.SequentialValidator",
"._diverging.DivergingValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/files.py | from __future__ import absolute_import
from _plotly_utils.files import *
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/figure_factory/_2d_density.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/plotly/figure_factory/_2d_density.py
from __future__ import absolute_import
from numbers import Number
import plotly.exceptions
import plotly.colors as clrs
from plotly.graph_objs import graph_objs
def make_linear_colorscale(colors):
"""
Makes a list of colors into a colorscale-acceptable form
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
scale = 1.0 / (len(colors) - 1)
return [[i * scale, color] for i, color in enumerate(colors)]
def create_2d_density(
x,
y,
colorscale="Earth",
ncontours=20,
hist_color=(0, 0, 0.5),
point_color=(0, 0, 0.5),
point_size=2,
title="2D Density Plot",
height=600,
width=600,
):
"""
**deprecated**, use instead
:func:`plotly.express.density_heatmap`.
:param (list|array) x: x-axis data for plot generation
:param (list|array) y: y-axis data for plot generation
:param (str|tuple|list) colorscale: either a plotly scale name, an rgb
or hex color, a color tuple or a list or tuple of colors. An rgb
color is of the form 'rgb(x, y, z)' where x, y, z belong to the
interval [0, 255] and a color tuple is a tuple of the form
(a, b, c) where a, b and c belong to [0, 1]. If colormap is a
list, it must contain the valid color types aforementioned as its
members.
:param (int) ncontours: the number of 2D contours to draw on the plot
:param (str) hist_color: the color of the plotted histograms
:param (str) point_color: the color of the scatter points
:param (str) point_size: the color of the scatter points
:param (str) title: set the title for the plot
:param (float) height: the height of the chart
:param (float) width: the width of the chart
Examples
--------
Example 1: Simple 2D Density Plot
>>> from plotly.figure_factory import create_2d_density
>>> import numpy as np
>>> # Make data points
>>> t = np.linspace(-1,1.2,2000)
>>> x = (t**3)+(0.3*np.random.randn(2000))
>>> y = (t**6)+(0.3*np.random.randn(2000))
>>> # Create a figure
>>> fig = create_2d_density(x, y)
>>> # Plot the data
>>> fig.show()
Example 2: Using Parameters
>>> from plotly.figure_factory import create_2d_density
>>> import numpy as np
>>> # Make data points
>>> t = np.linspace(-1,1.2,2000)
>>> x = (t**3)+(0.3*np.random.randn(2000))
>>> y = (t**6)+(0.3*np.random.randn(2000))
>>> # Create custom colorscale
>>> colorscale = ['#7A4579', '#D56073', 'rgb(236,158,105)',
... (1, 1, 0.2), (0.98,0.98,0.98)]
>>> # Create a figure
>>> fig = create_2d_density(x, y, colorscale=colorscale,
... hist_color='rgb(255, 237, 222)', point_size=3)
>>> # Plot the data
>>> fig.show()
"""
# validate x and y are filled with numbers only
for array in [x, y]:
if not all(isinstance(element, Number) for element in array):
raise plotly.exceptions.PlotlyError(
"All elements of your 'x' and 'y' lists must be numbers."
)
# validate x and y are the same length
if len(x) != len(y):
raise plotly.exceptions.PlotlyError(
"Both lists 'x' and 'y' must be the same length."
)
colorscale = clrs.validate_colors(colorscale, "rgb")
colorscale = make_linear_colorscale(colorscale)
# validate hist_color and point_color
hist_color = clrs.validate_colors(hist_color, "rgb")
point_color = clrs.validate_colors(point_color, "rgb")
trace1 = graph_objs.Scatter(
x=x,
y=y,
mode="markers",
name="points",
marker=dict(color=point_color[0], size=point_size, opacity=0.4),
)
trace2 = graph_objs.Histogram2dContour(
x=x,
y=y,
name="density",
ncontours=ncontours,
colorscale=colorscale,
reversescale=True,
showscale=False,
)
trace3 = graph_objs.Histogram(
x=x, name="x density", marker=dict(color=hist_color[0]), yaxis="y2"
)
trace4 = graph_objs.Histogram(
y=y, name="y density", marker=dict(color=hist_color[0]), xaxis="x2"
)
data = [trace1, trace2, trace3, trace4]
layout = graph_objs.Layout(
showlegend=False,
autosize=False,
title=title,
height=height,
width=width,
xaxis=dict(domain=[0, 0.85], showgrid=False, zeroline=False),
yaxis=dict(domain=[0, 0.85], showgrid=False, zeroline=False),
margin=dict(t=50),
hovermode="closest",
bargap=0,
xaxis2=dict(domain=[0.85, 1], showgrid=False, zeroline=False),
yaxis2=dict(domain=[0.85, 1], showgrid=False, zeroline=False),
)
fig = graph_objs.Figure(data=data, layout=layout)
return fig
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/compat.py | """Small modules to cope with python 2 vs 3 incompatibilities inside
numpy.distutils
"""
from __future__ import division, absolute_import, print_function
import sys
def get_exception():
return sys.exc_info()[1]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_updatemenus.py | <gh_stars>1000+
import _plotly_utils.basevalidators
class UpdatemenusValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="updatemenus", parent_name="layout", **kwargs):
super(UpdatemenusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Updatemenu"),
data_docs=kwargs.pop(
"data_docs",
"""
active
Determines which button (by index starting from
0) is considered active.
bgcolor
Sets the background color of the update menu
buttons.
bordercolor
Sets the color of the border enclosing the
update menu.
borderwidth
Sets the width (in px) of the border enclosing
the update menu.
buttons
A tuple of :class:`plotly.graph_objects.layout.
updatemenu.Button` instances or dicts with
compatible properties
buttondefaults
When used in a template (as layout.template.lay
out.updatemenu.buttondefaults), sets the
default property values to use for elements of
layout.updatemenu.buttons
direction
Determines the direction in which the buttons
are laid out, whether in a dropdown menu or a
row/column of buttons. For `left` and `up`, the
buttons will still appear in left-to-right or
top-to-bottom order respectively.
font
Sets the font of the update menu button text.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pad
Sets the padding around the buttons or dropdown
menu.
showactive
Highlights active dropdown item or active
button if true.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
type
Determines whether the buttons are accessible
via a dropdown menu or whether the buttons are
stacked horizontally or vertically
visible
Determines whether or not the update menu is
visible.
x
Sets the x position (in normalized coordinates)
of the update menu.
xanchor
Sets the update menu's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the range
selector.
y
Sets the y position (in normalized coordinates)
of the update menu.
yanchor
Sets the update menu's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the range
selector.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_violin.py | <gh_stars>1000+
from plotly.graph_objs import Violin
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/window/numba_.py | from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
from pandas._typing import Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
check_kwargs_and_nopython,
get_jit_arguments,
jit_user_function,
)
def generate_numba_apply_func(
args: Tuple,
kwargs: Dict[str, Any],
func: Callable[..., Scalar],
engine_kwargs: Optional[Dict[str, bool]],
):
"""
Generate a numba jitted apply function specified by values from engine_kwargs.
1. jit the user's function
2. Return a rolling apply function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the rolling apply function.
Parameters
----------
args : tuple
*args to be passed into the function
kwargs : dict
**kwargs to be passed into the function
func : function
function to be applied to each window and will be JITed
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs)
check_kwargs_and_nopython(kwargs, nopython)
numba_func = jit_user_function(func, nopython, nogil, parallel)
numba = import_optional_dependency("numba")
if parallel:
loop_range = numba.prange
else:
loop_range = range
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def roll_apply(
values: np.ndarray, begin: np.ndarray, end: np.ndarray, minimum_periods: int,
) -> np.ndarray:
result = np.empty(len(begin))
for i in loop_range(len(result)):
start = begin[i]
stop = end[i]
window = values[start:stop]
count_nan = np.sum(np.isnan(window))
if len(window) - count_nan >= minimum_periods:
result[i] = numba_func(window, *args)
else:
result[i] = np.nan
return result
return roll_apply
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexes/multi/test_contains.py | import numpy as np
import pytest
from pandas.compat import PYPY
import pandas as pd
from pandas import MultiIndex
import pandas._testing as tm
def test_contains_top_level():
midx = MultiIndex.from_product([["A", "B"], [1, 2]])
assert "A" in midx
assert "A" not in midx._engine
def test_contains_with_nat():
# MI with a NaT
mi = MultiIndex(
levels=[["C"], pd.date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
assert ("C", pd.Timestamp("2012-01-01")) in mi
for val in mi.values:
assert val in mi
def test_contains(idx):
assert ("foo", "two") in idx
assert ("bar", "two") not in idx
assert None not in idx
@pytest.mark.skipif(not PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_pypy():
idx = MultiIndex.from_arrays([["foo", "bar"], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([("bar", np.nan)]), np.array([False, True]))
tm.assert_numpy_array_equal(
idx.isin([("bar", float("nan"))]), np.array([False, True])
)
def test_isin():
values = [("foo", 2), ("bar", 3), ("quux", 4)]
idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)])
result = idx.isin(values)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# empty, return dtype bool
idx = MultiIndex.from_arrays([[], []])
result = idx.isin(values)
assert len(result) == 0
assert result.dtype == np.bool_
@pytest.mark.skipif(PYPY, reason="tuples cmp recursively on PyPy")
def test_isin_nan_not_pypy():
idx = MultiIndex.from_arrays([["foo", "bar"], [1.0, np.nan]])
tm.assert_numpy_array_equal(idx.isin([("bar", np.nan)]), np.array([False, False]))
tm.assert_numpy_array_equal(
idx.isin([("bar", float("nan"))]), np.array([False, False])
)
def test_isin_level_kwarg():
idx = MultiIndex.from_arrays([["qux", "baz", "foo", "bar"], np.arange(4)])
vals_0 = ["foo", "bar", "quux"]
vals_1 = [2, 3, 10]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=0))
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level=-2))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=1))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level=-1))
msg = "Too many levels: Index has only 2 levels, not 6"
with pytest.raises(IndexError, match=msg):
idx.isin(vals_0, level=5)
msg = "Too many levels: Index has only 2 levels, -5 is not a valid level number"
with pytest.raises(IndexError, match=msg):
idx.isin(vals_0, level=-5)
with pytest.raises(KeyError, match=r"'Level 1\.0 not found'"):
idx.isin(vals_0, level=1.0)
with pytest.raises(KeyError, match=r"'Level -1\.0 not found'"):
idx.isin(vals_1, level=-1.0)
with pytest.raises(KeyError, match="'Level A not found'"):
idx.isin(vals_1, level="A")
idx.names = ["A", "B"]
tm.assert_numpy_array_equal(expected, idx.isin(vals_0, level="A"))
tm.assert_numpy_array_equal(expected, idx.isin(vals_1, level="B"))
with pytest.raises(KeyError, match="'Level C not found'"):
idx.isin(vals_1, level="C")
def test_contains_with_missing_value():
# issue 19132
idx = MultiIndex.from_arrays([[1, np.nan, 2]])
assert np.nan in idx
idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]])
assert np.nan not in idx
assert (1, np.nan) in idx
@pytest.mark.parametrize(
"labels,expected,level",
[
([("b", np.nan)], np.array([False, False, True]), None,),
([np.nan, "a"], np.array([True, True, False]), 0),
(["d", np.nan], np.array([False, True, True]), 1),
],
)
def test_isin_multi_index_with_missing_value(labels, expected, level):
# GH 19132
midx = MultiIndex.from_arrays([[np.nan, "a", "b"], ["c", "d", np.nan]])
tm.assert_numpy_array_equal(midx.isin(labels, level=level), expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_table.py | from plotly.graph_objs import Table
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/random/tests/test_extending.py | <filename>env/lib/python3.8/site-packages/numpy/random/tests/test_extending.py
import os, sys
import pytest
import warnings
import shutil
import subprocess
try:
import cffi
except ImportError:
cffi = None
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
cffi = None
try:
with warnings.catch_warnings(record=True) as w:
# numba issue gh-4733
warnings.filterwarnings('always', '', DeprecationWarning)
import numba
except ImportError:
numba = None
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from distutils.version import LooseVersion
# Cython 0.29.14 is required for Python 3.8 and there are
# other fixes in the 0.29 series that are needed even for earlier
# Python versions.
# Note: keep in sync with the one in pyproject.toml
required_version = LooseVersion('0.29.14')
if LooseVersion(cython_version) < required_version:
# too old or wrong cython, skip the test
cython = None
@pytest.mark.skipif(cython is None, reason="requires cython")
@pytest.mark.slow
def test_cython(tmp_path):
examples = os.path.join(os.path.dirname(__file__), '..', '_examples')
# CPython 3.5 and below does not handle __fspath__ well: see bpo-26027
shutil.copytree(examples, str(tmp_path / '_examples'))
subprocess.check_call([sys.executable, 'setup.py', 'build'],
cwd=str(tmp_path / '_examples' / 'cython'))
@pytest.mark.skipif(numba is None or cffi is None,
reason="requires numba and cffi")
def test_numba():
from numpy.random._examples.numba import extending
@pytest.mark.skipif(cffi is None, reason="requires cffi")
def test_cffi():
from numpy.random._examples.cffi import extending
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/core/reshape/melt.py | <filename>env/lib/python3.8/site-packages/pandas/core/reshape/melt.py<gh_stars>1000+
import re
from typing import List
import numpy as np
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import is_extension_array_dtype, is_list_like
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.generic import ABCMultiIndex
from pandas.core.dtypes.missing import notna
from pandas.core.arrays import Categorical
import pandas.core.common as com
from pandas.core.frame import DataFrame, _shared_docs
from pandas.core.indexes.base import Index
from pandas.core.reshape.concat import concat
from pandas.core.tools.numeric import to_numeric
@Appender(
_shared_docs["melt"]
% dict(caller="pd.melt(df, ", versionadded="", other="DataFrame.melt")
)
def melt(
frame: DataFrame,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> DataFrame:
# TODO: what about the existing index?
# If multiindex, gather names of columns on all level for checking presence
# of `id_vars` and `value_vars`
if isinstance(frame.columns, ABCMultiIndex):
cols = [x for c in frame.columns for x in c]
else:
cols = list(frame.columns)
if id_vars is not None:
if not is_list_like(id_vars):
id_vars = [id_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(id_vars, list):
raise ValueError(
"id_vars must be a list of tuples when columns are a MultiIndex"
)
else:
# Check that `id_vars` are in frame
id_vars = list(id_vars)
missing = Index(com.flatten(id_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'id_vars' are not present "
"in the DataFrame: {missing}"
"".format(missing=list(missing))
)
else:
id_vars = []
if value_vars is not None:
if not is_list_like(value_vars):
value_vars = [value_vars]
elif isinstance(frame.columns, ABCMultiIndex) and not isinstance(
value_vars, list
):
raise ValueError(
"value_vars must be a list of tuples when columns are a MultiIndex"
)
else:
value_vars = list(value_vars)
# Check that `value_vars` are in frame
missing = Index(com.flatten(value_vars)).difference(cols)
if not missing.empty:
raise KeyError(
"The following 'value_vars' are not present in "
"the DataFrame: {missing}"
"".format(missing=list(missing))
)
frame = frame.loc[:, id_vars + value_vars]
else:
frame = frame.copy()
if col_level is not None: # allow list or other?
# frame is a copy
frame.columns = frame.columns.get_level_values(col_level)
if var_name is None:
if isinstance(frame.columns, ABCMultiIndex):
if len(frame.columns.names) == len(set(frame.columns.names)):
var_name = frame.columns.names
else:
var_name = [
"variable_{i}".format(i=i) for i in range(len(frame.columns.names))
]
else:
var_name = [
frame.columns.name if frame.columns.name is not None else "variable"
]
if isinstance(var_name, str):
var_name = [var_name]
N, K = frame.shape
K -= len(id_vars)
mdata = {}
for col in id_vars:
id_data = frame.pop(col)
if is_extension_array_dtype(id_data):
id_data = concat([id_data] * K, ignore_index=True)
else:
id_data = np.tile(id_data.values, K)
mdata[col] = id_data
mcolumns = id_vars + var_name + [value_name]
mdata[value_name] = frame.values.ravel("F")
for i, col in enumerate(var_name):
# asanyarray will keep the columns as an Index
mdata[col] = np.asanyarray(frame.columns._get_level_values(i)).repeat(N)
return frame._constructor(mdata, columns=mcolumns)
@deprecate_kwarg(old_arg_name="label", new_arg_name=None)
def lreshape(data: DataFrame, groups, dropna: bool = True, label=None) -> DataFrame:
"""
Reshape long-format data to wide. Generalized inverse of DataFrame.pivot
Parameters
----------
data : DataFrame
groups : dict
{new_name : list_of_columns}
dropna : boolean, default True
Examples
--------
>>> data = pd.DataFrame({'hr1': [514, 573], 'hr2': [545, 526],
... 'team': ['Red Sox', 'Yankees'],
... 'year1': [2007, 2007], 'year2': [2008, 2008]})
>>> data
hr1 hr2 team year1 year2
0 514 545 Red Sox 2007 2008
1 573 526 Yankees 2007 2008
>>> pd.lreshape(data, {'year': ['year1', 'year2'], 'hr': ['hr1', 'hr2']})
team year hr
0 Red Sox 2007 514
1 Yankees 2007 573
2 Red Sox 2008 545
3 Yankees 2008 526
Returns
-------
reshaped : DataFrame
"""
if isinstance(groups, dict):
keys = list(groups.keys())
values = list(groups.values())
else:
keys, values = zip(*groups)
all_cols = list(set.union(*[set(x) for x in values]))
id_cols = list(data.columns.difference(all_cols))
K = len(values[0])
for seq in values:
if len(seq) != K:
raise ValueError("All column lists must be same length")
mdata = {}
pivot_cols = []
for target, names in zip(keys, values):
to_concat = [data[col].values for col in names]
mdata[target] = concat_compat(to_concat)
pivot_cols.append(target)
for col in id_cols:
mdata[col] = np.tile(data[col].values, K)
if dropna:
mask = np.ones(len(mdata[pivot_cols[0]]), dtype=bool)
for c in pivot_cols:
mask &= notna(mdata[c])
if not mask.all():
mdata = {k: v[mask] for k, v in mdata.items()}
return data._constructor(mdata, columns=id_cols + pivot_cols)
def wide_to_long(
df: DataFrame, stubnames, i, j, sep: str = "", suffix: str = r"\d+"
) -> DataFrame:
r"""
Wide panel to long format. Less flexible but more user-friendly than melt.
With stubnames ['A', 'B'], this function expects to find one or more
group of columns with format
A-suffix1, A-suffix2,..., B-suffix1, B-suffix2,...
You specify what you want to call this suffix in the resulting long format
with `j` (for example `j='year'`)
Each row of these wide variables are assumed to be uniquely identified by
`i` (can be a single column name or a list of column names)
All remaining variables in the data frame are left intact.
Parameters
----------
df : DataFrame
The wide-format DataFrame.
stubnames : str or list-like
The stub name(s). The wide format variables are assumed to
start with the stub names.
i : str or list-like
Column(s) to use as id variable(s).
j : str
The name of the sub-observation variable. What you wish to name your
suffix in the long format.
sep : str, default ""
A character indicating the separation of the variable names
in the wide format, to be stripped from the names in the long format.
For example, if your column names are A-suffix1, A-suffix2, you
can strip the hyphen by specifying `sep='-'`.
suffix : str, default '\\d+'
A regular expression capturing the wanted suffixes. '\\d+' captures
numeric suffixes. Suffixes with no numbers could be specified with the
negated character class '\\D+'. You can also further disambiguate
suffixes, for example, if your wide variables are of the form
A-one, B-two,.., and you have an unrelated column A-rating, you can
ignore the last one by specifying `suffix='(!?one|two)'`.
.. versionchanged:: 0.23.0
When all suffixes are numeric, they are cast to int64/float64.
Returns
-------
DataFrame
A DataFrame that contains each stub name as a variable, with new index
(i, j).
Notes
-----
All extra variables are left untouched. This simply uses
`pandas.melt` under the hood, but is hard-coded to "do the right thing"
in a typical case.
Examples
--------
>>> np.random.seed(123)
>>> df = pd.DataFrame({"A1970" : {0 : "a", 1 : "b", 2 : "c"},
... "A1980" : {0 : "d", 1 : "e", 2 : "f"},
... "B1970" : {0 : 2.5, 1 : 1.2, 2 : .7},
... "B1980" : {0 : 3.2, 1 : 1.3, 2 : .1},
... "X" : dict(zip(range(3), np.random.randn(3)))
... })
>>> df["id"] = df.index
>>> df
A1970 A1980 B1970 B1980 X id
0 a d 2.5 3.2 -1.085631 0
1 b e 1.2 1.3 0.997345 1
2 c f 0.7 0.1 0.282978 2
>>> pd.wide_to_long(df, ["A", "B"], i="id", j="year")
... # doctest: +NORMALIZE_WHITESPACE
X A B
id year
0 1970 -1.085631 a 2.5
1 1970 0.997345 b 1.2
2 1970 0.282978 c 0.7
0 1980 -1.085631 d 3.2
1 1980 0.997345 e 1.3
2 1980 0.282978 f 0.1
With multiple id columns
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht1': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht2': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 1 2.8
2 3.4
2 1 2.9
2 3.8
3 1 2.2
2 2.9
2 1 1 2.0
2 3.2
2 1 1.8
2 2.8
3 1 1.9
2 2.4
3 1 1 2.2
2 3.3
2 1 2.3
2 3.4
3 1 2.1
2 2.9
Going from long back to wide just takes some creative use of `unstack`
>>> w = l.unstack()
>>> w.columns = w.columns.map('{0[0]}{0[1]}'.format)
>>> w.reset_index()
famid birth ht1 ht2
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
Less wieldy column names are also handled
>>> np.random.seed(0)
>>> df = pd.DataFrame({'A(weekly)-2010': np.random.rand(3),
... 'A(weekly)-2011': np.random.rand(3),
... 'B(weekly)-2010': np.random.rand(3),
... 'B(weekly)-2011': np.random.rand(3),
... 'X' : np.random.randint(3, size=3)})
>>> df['id'] = df.index
>>> df # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
A(weekly)-2010 A(weekly)-2011 B(weekly)-2010 B(weekly)-2011 X id
0 0.548814 0.544883 0.437587 0.383442 0 0
1 0.715189 0.423655 0.891773 0.791725 1 1
2 0.602763 0.645894 0.963663 0.528895 1 2
>>> pd.wide_to_long(df, ['A(weekly)', 'B(weekly)'], i='id',
... j='year', sep='-')
... # doctest: +NORMALIZE_WHITESPACE
X A(weekly) B(weekly)
id year
0 2010 0 0.548814 0.437587
1 2010 1 0.715189 0.891773
2 2010 1 0.602763 0.963663
0 2011 0 0.544883 0.383442
1 2011 1 0.423655 0.791725
2 2011 1 0.645894 0.528895
If we have many columns, we could also use a regex to find our
stubnames and pass that list on to wide_to_long
>>> stubnames = sorted(
... set([match[0] for match in df.columns.str.findall(
... r'[A-B]\(.*\)').values if match != []])
... )
>>> list(stubnames)
['A(weekly)', 'B(weekly)']
All of the above examples have integers as suffixes. It is possible to
have non-integers as suffixes.
>>> df = pd.DataFrame({
... 'famid': [1, 1, 1, 2, 2, 2, 3, 3, 3],
... 'birth': [1, 2, 3, 1, 2, 3, 1, 2, 3],
... 'ht_one': [2.8, 2.9, 2.2, 2, 1.8, 1.9, 2.2, 2.3, 2.1],
... 'ht_two': [3.4, 3.8, 2.9, 3.2, 2.8, 2.4, 3.3, 3.4, 2.9]
... })
>>> df
famid birth ht_one ht_two
0 1 1 2.8 3.4
1 1 2 2.9 3.8
2 1 3 2.2 2.9
3 2 1 2.0 3.2
4 2 2 1.8 2.8
5 2 3 1.9 2.4
6 3 1 2.2 3.3
7 3 2 2.3 3.4
8 3 3 2.1 2.9
>>> l = pd.wide_to_long(df, stubnames='ht', i=['famid', 'birth'], j='age',
... sep='_', suffix='\w+')
>>> l
... # doctest: +NORMALIZE_WHITESPACE
ht
famid birth age
1 1 one 2.8
two 3.4
2 one 2.9
two 3.8
3 one 2.2
two 2.9
2 1 one 2.0
two 3.2
2 one 1.8
two 2.8
3 one 1.9
two 2.4
3 1 one 2.2
two 3.3
2 one 2.3
two 3.4
3 one 2.1
two 2.9
"""
def get_var_names(df, stub: str, sep: str, suffix: str) -> List[str]:
regex = r"^{stub}{sep}{suffix}$".format(
stub=re.escape(stub), sep=re.escape(sep), suffix=suffix
)
pattern = re.compile(regex)
return [col for col in df.columns if pattern.match(col)]
def melt_stub(df, stub: str, i, j, value_vars, sep: str):
newdf = melt(
df,
id_vars=i,
value_vars=value_vars,
value_name=stub.rstrip(sep),
var_name=j,
)
newdf[j] = Categorical(newdf[j])
newdf[j] = newdf[j].str.replace(re.escape(stub + sep), "")
# GH17627 Cast numerics suffixes to int/float
newdf[j] = to_numeric(newdf[j], errors="ignore")
return newdf.set_index(i + [j])
if not is_list_like(stubnames):
stubnames = [stubnames]
else:
stubnames = list(stubnames)
if any(col in stubnames for col in df.columns):
raise ValueError("stubname can't be identical to a column name")
if not is_list_like(i):
i = [i]
else:
i = list(i)
if df[i].duplicated().any():
raise ValueError("the id variables need to uniquely identify each row")
value_vars = [get_var_names(df, stub, sep, suffix) for stub in stubnames]
value_vars_flattened = [e for sublist in value_vars for e in sublist]
id_vars = list(set(df.columns.tolist()).difference(value_vars_flattened))
_melted = [melt_stub(df, s, i, j, v, sep) for s, v in zip(stubnames, value_vars)]
melted = _melted[0].join(_melted[1:], how="outer")
if len(i) == 1:
new = df[id_vars].set_index(i).join(melted)
return new
new = df[id_vars].merge(melted.reset_index(), on=i).set_index(i + [j])
return new
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_isosurface.py | <reponame>acrucetta/Chicago_COVI_WebApp
from plotly.graph_objs import Isosurface
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test__exceptions.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/numpy/core/tests/test__exceptions.py
"""
Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
"""
import numpy as np
_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
class TestArrayMemoryError:
def test_str(self):
e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
str(e) # not crashing is enough
# testing these properties is easier than testing the full string repr
def test__size_to_string(self):
""" Test e._size_to_string """
f = _ArrayMemoryError._size_to_string
Ki = 1024
assert f(0) == '0 bytes'
assert f(1) == '1 bytes'
assert f(1023) == '1023 bytes'
assert f(Ki) == '1.00 KiB'
assert f(Ki+1) == '1.00 KiB'
assert f(10*Ki) == '10.0 KiB'
assert f(int(999.4*Ki)) == '999. KiB'
assert f(int(1023.4*Ki)) == '1023. KiB'
assert f(int(1023.5*Ki)) == '1.00 MiB'
assert f(Ki*Ki) == '1.00 MiB'
# 1023.9999 Mib should round to 1 GiB
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
# larger than sys.maxsize, adding larger prefices isn't going to help
# anyway.
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
def test__total_size(self):
""" Test e._total_size """
e = _ArrayMemoryError((1,), np.dtype(np.uint8))
assert e._total_size == 1
e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
assert e._total_size == 1024
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_area.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_area.py
from plotly.graph_objs import Area
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/dashboard_objs.py | <filename>env/lib/python3.8/site-packages/plotly/dashboard_objs.py
from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("dashboard_objs")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/scalar/period/test_period.py | <filename>env/lib/python3.8/site-packages/pandas/tests/scalar/period/test_period.py
from datetime import date, datetime, timedelta
from distutils.version import StrictVersion
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import iNaT, period as libperiod
from pandas._libs.tslibs.ccalendar import DAYS, MONTHS
from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
from pandas._libs.tslibs.parsing import DateParseError
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas._libs.tslibs.timezones import dateutil_gettz, maybe_get_tz
from pandas.compat.numpy import np_datetime64_compat
import pandas as pd
from pandas import NaT, Period, Timedelta, Timestamp, offsets
import pandas._testing as tm
class TestPeriodConstruction:
def test_construction(self):
i1 = Period("1/1/2005", freq="M")
i2 = Period("Jan 2005")
assert i1 == i2
i1 = Period("2005", freq="A")
i2 = Period("2005")
i3 = Period("2005", freq="a")
assert i1 == i2
assert i1 == i3
i4 = Period("2005", freq="M")
i5 = Period("2005", freq="m")
msg = r"Input has different freq=M from Period\(freq=A-DEC\)"
with pytest.raises(IncompatibleFrequency, match=msg):
i1 != i4
assert i4 == i5
i1 = Period.now("Q")
i2 = Period(datetime.now(), freq="Q")
i3 = Period.now("q")
assert i1 == i2
assert i1 == i3
i1 = Period("1982", freq="min")
i2 = Period("1982", freq="MIN")
assert i1 == i2
i2 = Period("1982", freq=("Min", 1))
assert i1 == i2
i1 = Period(year=2005, month=3, day=1, freq="D")
i2 = Period("3/1/2005", freq="D")
assert i1 == i2
i3 = Period(year=2005, month=3, day=1, freq="d")
assert i1 == i3
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
msg = "Must supply freq for ordinal value"
with pytest.raises(ValueError, match=msg):
Period(ordinal=200701)
with pytest.raises(ValueError, match="Invalid frequency: X"):
Period("2007-1-1", freq="X")
def test_construction_bday(self):
# Biz day construction, roll forward if non-weekday
i1 = Period("3/10/12", freq="B")
i2 = Period("3/10/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/11/12", freq="D")
assert i1 == i2.asfreq("B")
i2 = Period("3/12/12", freq="D")
assert i1 == i2.asfreq("B")
i3 = Period("3/10/12", freq="b")
assert i1 == i3
i1 = Period(year=2012, month=3, day=10, freq="B")
i2 = Period("3/12/12", freq="B")
assert i1 == i2
def test_construction_quarter(self):
i1 = Period(year=2005, quarter=1, freq="Q")
i2 = Period("1/1/2005", freq="Q")
assert i1 == i2
i1 = Period(year=2005, quarter=3, freq="Q")
i2 = Period("9/1/2005", freq="Q")
assert i1 == i2
i1 = Period("2005Q1")
i2 = Period(year=2005, quarter=1, freq="Q")
i3 = Period("2005q1")
assert i1 == i2
assert i1 == i3
i1 = Period("05Q1")
assert i1 == i2
lower = Period("05q1")
assert i1 == lower
i1 = Period("1Q2005")
assert i1 == i2
lower = Period("1q2005")
assert i1 == lower
i1 = Period("1Q05")
assert i1 == i2
lower = Period("1q05")
assert i1 == lower
i1 = Period("4Q1984")
assert i1.year == 1984
lower = Period("4q1984")
assert i1 == lower
def test_construction_month(self):
expected = Period("2007-01", freq="M")
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period("200701", freq="M")
assert i1 == expected
i1 = Period(200701, freq="M")
assert i1 == expected
i1 = Period(ordinal=200701, freq="M")
assert i1.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
def test_period_constructor_offsets(self):
assert Period("1/1/2005", freq=offsets.MonthEnd()) == Period(
"1/1/2005", freq="M"
)
assert Period("2005", freq=offsets.YearEnd()) == Period("2005", freq="A")
assert Period("2005", freq=offsets.MonthEnd()) == Period("2005", freq="M")
assert Period("3/10/12", freq=offsets.BusinessDay()) == Period(
"3/10/12", freq="B"
)
assert Period("3/10/12", freq=offsets.Day()) == Period("3/10/12", freq="D")
assert Period(
year=2005, quarter=1, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=1, freq="Q")
assert Period(
year=2005, quarter=2, freq=offsets.QuarterEnd(startingMonth=12)
) == Period(year=2005, quarter=2, freq="Q")
assert Period(year=2005, month=3, day=1, freq=offsets.Day()) == Period(
year=2005, month=3, day=1, freq="D"
)
assert Period(year=2012, month=3, day=10, freq=offsets.BDay()) == Period(
year=2012, month=3, day=10, freq="B"
)
expected = Period("2005-03-01", freq="3D")
assert Period(year=2005, month=3, day=1, freq=offsets.Day(3)) == expected
assert Period(year=2005, month=3, day=1, freq="3D") == expected
assert Period(year=2012, month=3, day=10, freq=offsets.BDay(3)) == Period(
year=2012, month=3, day=10, freq="3B"
)
assert Period(200701, freq=offsets.MonthEnd()) == Period(200701, freq="M")
i1 = Period(ordinal=200701, freq=offsets.MonthEnd())
i2 = Period(ordinal=200701, freq="M")
assert i1 == i2
assert i1.year == 18695
assert i2.year == 18695
i1 = Period(datetime(2007, 1, 1), freq="M")
i2 = Period("200701", freq="M")
assert i1 == i2
i1 = Period(date(2007, 1, 1), freq="M")
i2 = Period(datetime(2007, 1, 1), freq="M")
i3 = Period(np.datetime64("2007-01-01"), freq="M")
i4 = Period(np_datetime64_compat("2007-01-01 00:00:00Z"), freq="M")
i5 = Period(np_datetime64_compat("2007-01-01 00:00:00.000Z"), freq="M")
assert i1 == i2
assert i1 == i3
assert i1 == i4
assert i1 == i5
i1 = Period("2007-01-01 09:00:00.001")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1000), freq="L")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.001Z"), freq="L")
assert i1 == expected
i1 = Period("2007-01-01 09:00:00.00101")
expected = Period(datetime(2007, 1, 1, 9, 0, 0, 1010), freq="U")
assert i1 == expected
expected = Period(np_datetime64_compat("2007-01-01 09:00:00.00101Z"), freq="U")
assert i1 == expected
def test_invalid_arguments(self):
with pytest.raises(ValueError):
Period(datetime.now())
with pytest.raises(ValueError):
Period(datetime.now().date())
with pytest.raises(ValueError):
Period(1.6, freq="D")
with pytest.raises(ValueError):
Period(ordinal=1.6, freq="D")
with pytest.raises(ValueError):
Period(ordinal=2, value=1, freq="D")
with pytest.raises(ValueError):
Period(month=1)
with pytest.raises(ValueError):
Period("-2000", "A")
with pytest.raises(DateParseError):
Period("0", "A")
with pytest.raises(DateParseError):
Period("1/1/-2000", "A")
def test_constructor_corner(self):
expected = Period("2007-01", freq="2M")
assert Period(year=2007, month=1, freq="2M") == expected
assert Period(None) is NaT
p = Period("2007-01-01", freq="D")
result = Period(p, freq="A")
exp = Period("2007", freq="A")
assert result == exp
def test_constructor_infer_freq(self):
p = Period("2007-01-01")
assert p.freq == "D"
p = Period("2007-01-01 07")
assert p.freq == "H"
p = Period("2007-01-01 07:10")
assert p.freq == "T"
p = Period("2007-01-01 07:10:15")
assert p.freq == "S"
p = Period("2007-01-01 07:10:15.123")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123000")
assert p.freq == "L"
p = Period("2007-01-01 07:10:15.123400")
assert p.freq == "U"
def test_multiples(self):
result1 = Period("1989", freq="2A")
result2 = Period("1989", freq="A")
assert result1.ordinal == result2.ordinal
assert result1.freqstr == "2A-DEC"
assert result2.freqstr == "A-DEC"
assert result1.freq == offsets.YearEnd(2)
assert result2.freq == offsets.YearEnd()
assert (result1 + 1).ordinal == result1.ordinal + 2
assert (1 + result1).ordinal == result1.ordinal + 2
assert (result1 - 1).ordinal == result2.ordinal - 2
assert (-1 + result1).ordinal == result2.ordinal - 2
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_quarterly(self, month):
# bugs in scikits.timeseries
freq = "Q-{month}".format(month=month)
exp = Period("1989Q3", freq=freq)
assert "1989Q3" in str(exp)
stamp = exp.to_timestamp("D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
stamp = exp.to_timestamp("3D", how="end")
p = Period(stamp, freq=freq)
assert p == exp
@pytest.mark.parametrize("month", MONTHS)
def test_period_cons_annual(self, month):
# bugs in scikits.timeseries
freq = "A-{month}".format(month=month)
exp = Period("1989", freq=freq)
stamp = exp.to_timestamp("D", how="end") + timedelta(days=30)
p = Period(stamp, freq=freq)
assert p == exp + 1
assert isinstance(p, Period)
@pytest.mark.parametrize("day", DAYS)
@pytest.mark.parametrize("num", range(10, 17))
def test_period_cons_weekly(self, num, day):
daystr = "2011-02-{num}".format(num=num)
freq = "W-{day}".format(day=day)
result = Period(daystr, freq=freq)
expected = Period(daystr, freq="D").asfreq(freq)
assert result == expected
assert isinstance(result, Period)
def test_period_from_ordinal(self):
p = Period("2011-01", freq="M")
res = Period._from_ordinal(p.ordinal, freq="M")
assert p == res
assert isinstance(res, Period)
def test_period_cons_nat(self):
p = Period("NaT", freq="M")
assert p is NaT
p = Period("nat", freq="W-SUN")
assert p is NaT
p = Period(iNaT, freq="D")
assert p is NaT
p = Period(iNaT, freq="3D")
assert p is NaT
p = Period(iNaT, freq="1D1H")
assert p is NaT
p = Period("NaT")
assert p is NaT
p = Period(iNaT)
assert p is NaT
def test_period_cons_mult(self):
p1 = Period("2011-01", freq="3M")
p2 = Period("2011-01", freq="M")
assert p1.ordinal == p2.ordinal
assert p1.freq == offsets.MonthEnd(3)
assert p1.freqstr == "3M"
assert p2.freq == offsets.MonthEnd()
assert p2.freqstr == "M"
result = p1 + 1
assert result.ordinal == (p2 + 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
result = p1 - 1
assert result.ordinal == (p2 - 3).ordinal
assert result.freq == p1.freq
assert result.freqstr == "3M"
msg = "Frequency must be positive, because it represents span: -3M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-3M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0M")
def test_period_cons_combined(self):
p = [
(
Period("2011-01", freq="1D1H"),
Period("2011-01", freq="1H1D"),
Period("2011-01", freq="H"),
),
(
Period(ordinal=1, freq="1D1H"),
Period(ordinal=1, freq="1H1D"),
Period(ordinal=1, freq="H"),
),
]
for p1, p2, p3 in p:
assert p1.ordinal == p3.ordinal
assert p2.ordinal == p3.ordinal
assert p1.freq == offsets.Hour(25)
assert p1.freqstr == "25H"
assert p2.freq == offsets.Hour(25)
assert p2.freqstr == "25H"
assert p3.freq == offsets.Hour()
assert p3.freqstr == "H"
result = p1 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 + 1
assert result.ordinal == (p3 + 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
result = p1 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p1.freq
assert result.freqstr == "25H"
result = p2 - 1
assert result.ordinal == (p3 - 25).ordinal
assert result.freq == p2.freq
assert result.freqstr == "25H"
msg = "Frequency must be positive, because it represents span: -25H"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="-1H1D")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1D1H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="-1H1D")
msg = "Frequency must be positive, because it represents span: 0D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="0D0H")
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq="0D0H")
# You can only combine together day and intraday offsets
msg = "Invalid frequency: 1W1D"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1W1D")
msg = "Invalid frequency: 1D1W"
with pytest.raises(ValueError, match=msg):
Period("2011-01", freq="1D1W")
class TestPeriodMethods:
def test_round_trip(self):
p = Period("2000Q1")
new_p = tm.round_trip_pickle(p)
assert new_p == p
def test_hash(self):
assert hash(Period("2011-01", freq="M")) == hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01-01", freq="D")) != hash(Period("2011-01", freq="M"))
assert hash(Period("2011-01", freq="3M")) != hash(Period("2011-01", freq="2M"))
assert hash(Period("2011-01", freq="M")) != hash(Period("2011-02", freq="M"))
# --------------------------------------------------------------
# to_timestamp
@pytest.mark.parametrize("tzstr", ["Europe/Brussels", "Asia/Tokyo", "US/Pacific"])
def test_to_timestamp_tz_arg(self, tzstr):
p = Period("1/1/2005", freq="M").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period("1/1/2005", freq="3H").to_timestamp(tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period("1/1/2005", freq="A").to_timestamp(freq="A", tz=tzstr)
exp = Timestamp("31/12/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
p = Period("1/1/2005", freq="A").to_timestamp(freq="3H", tz=tzstr)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
exp_zone = pytz.timezone(tzstr).normalize(p)
assert p == exp
assert p.tz == exp_zone.tzinfo
assert p.tz == exp.tz
@pytest.mark.parametrize(
"tzstr",
["dateutil/Europe/Brussels", "dateutil/Asia/Tokyo", "dateutil/US/Pacific"],
)
def test_to_timestamp_tz_arg_dateutil(self, tzstr):
tz = maybe_get_tz(tzstr)
p = Period("1/1/2005", freq="M").to_timestamp(tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
p = Period("1/1/2005", freq="M").to_timestamp(freq="3H", tz=tz)
exp = Timestamp("1/1/2005", tz="UTC").tz_convert(tzstr)
assert p == exp
assert p.tz == dateutil_gettz(tzstr.split("/", 1)[1])
assert p.tz == exp.tz
def test_to_timestamp_tz_arg_dateutil_from_string(self):
p = Period("1/1/2005", freq="M").to_timestamp(tz="dateutil/Europe/Brussels")
assert p.tz == dateutil_gettz("Europe/Brussels")
def test_to_timestamp_mult(self):
p = Period("2011-01", freq="M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-02-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
p = Period("2011-01", freq="3M")
assert p.to_timestamp(how="S") == Timestamp("2011-01-01")
expected = Timestamp("2011-04-01") - Timedelta(1, "ns")
assert p.to_timestamp(how="E") == expected
def test_to_timestamp(self):
p = Period("1982", freq="A")
start_ts = p.to_timestamp(how="S")
aliases = ["s", "StarT", "BEGIn"]
for a in aliases:
assert start_ts == p.to_timestamp("D", how=a)
# freq with mult should not affect to the result
assert start_ts == p.to_timestamp("3D", how=a)
end_ts = p.to_timestamp(how="E")
aliases = ["e", "end", "FINIsH"]
for a in aliases:
assert end_ts == p.to_timestamp("D", how=a)
assert end_ts == p.to_timestamp("3D", how=a)
from_lst = ["A", "Q", "M", "W", "B", "D", "H", "Min", "S"]
def _ex(p):
return Timestamp((p + p.freq).start_time.value - 1)
for i, fcode in enumerate(from_lst):
p = Period("1982", freq=fcode)
result = p.to_timestamp().to_period(fcode)
assert result == p
assert p.start_time == p.to_timestamp(how="S")
assert p.end_time == _ex(p)
# Frequency other than daily
p = Period("1985", freq="A")
result = p.to_timestamp("H", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("3H", how="end")
assert result == expected
result = p.to_timestamp("T", how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
result = p.to_timestamp("2T", how="end")
assert result == expected
result = p.to_timestamp(how="end")
expected = Timestamp(1986, 1, 1) - Timedelta(1, "ns")
assert result == expected
expected = datetime(1985, 1, 1)
result = p.to_timestamp("H", how="start")
assert result == expected
result = p.to_timestamp("T", how="start")
assert result == expected
result = p.to_timestamp("S", how="start")
assert result == expected
result = p.to_timestamp("3H", how="start")
assert result == expected
result = p.to_timestamp("5S", how="start")
assert result == expected
# --------------------------------------------------------------
# Rendering: __repr__, strftime, etc
def test_repr(self):
p = Period("Jan-2000")
assert "2000-01" in repr(p)
p = Period("2000-12-15")
assert "2000-12-15" in repr(p)
def test_repr_nat(self):
p = Period("nat", freq="M")
assert repr(NaT) in repr(p)
def test_millisecond_repr(self):
p = Period("2000-01-01 12:15:02.123")
assert repr(p) == "Period('2000-01-01 12:15:02.123', 'L')"
def test_microsecond_repr(self):
p = Period("2000-01-01 12:15:02.123567")
assert repr(p) == "Period('2000-01-01 12:15:02.123567', 'U')"
def test_strftime(self):
# GH#3363
p = Period("2000-1-1 12:34:12", freq="S")
res = p.strftime("%Y-%m-%d %H:%M:%S")
assert res == "2000-01-01 12:34:12"
assert isinstance(res, str)
class TestPeriodProperties:
"Test properties such as year, month, weekday, etc...."
@pytest.mark.parametrize("freq", ["A", "M", "D", "H"])
def test_is_leap_year(self, freq):
# GH 13727
p = Period("2000-01-01 00:00:00", freq=freq)
assert p.is_leap_year
assert isinstance(p.is_leap_year, bool)
p = Period("1999-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
p = Period("2004-01-01 00:00:00", freq=freq)
assert p.is_leap_year
p = Period("2100-01-01 00:00:00", freq=freq)
assert not p.is_leap_year
def test_quarterly_negative_ordinals(self):
p = Period(ordinal=-1, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 4
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="Q-DEC")
assert p.year == 1969
assert p.quarter == 3
assert isinstance(p, Period)
p = Period(ordinal=-2, freq="M")
assert p.year == 1969
assert p.month == 11
assert isinstance(p, Period)
def test_freq_str(self):
i1 = Period("1982", freq="Min")
assert i1.freq == offsets.Minute()
assert i1.freqstr == "T"
def test_period_deprecated_freq(self):
cases = {
"M": ["MTH", "MONTH", "MONTHLY", "Mth", "month", "monthly"],
"B": ["BUS", "BUSINESS", "BUSINESSLY", "WEEKDAY", "bus"],
"D": ["DAY", "DLY", "DAILY", "Day", "Dly", "Daily"],
"H": ["HR", "HOUR", "HRLY", "HOURLY", "hr", "Hour", "HRly"],
"T": ["minute", "MINUTE", "MINUTELY", "minutely"],
"S": ["sec", "SEC", "SECOND", "SECONDLY", "second"],
"L": ["MILLISECOND", "MILLISECONDLY", "millisecond"],
"U": ["MICROSECOND", "MICROSECONDLY", "microsecond"],
"N": ["NANOSECOND", "NANOSECONDLY", "nanosecond"],
}
msg = INVALID_FREQ_ERR_MSG
for exp, freqs in cases.items():
for freq in freqs:
with pytest.raises(ValueError, match=msg):
Period("2016-03-01 09:00", freq=freq)
with pytest.raises(ValueError, match=msg):
Period(ordinal=1, freq=freq)
# check supported freq-aliases still works
p1 = Period("2016-03-01 09:00", freq=exp)
p2 = Period(ordinal=1, freq=exp)
assert isinstance(p1, Period)
assert isinstance(p2, Period)
def test_start_time(self):
freq_lst = ["A", "Q", "M", "D", "H", "T", "S"]
xp = datetime(2012, 1, 1)
for f in freq_lst:
p = Period("2012", freq=f)
assert p.start_time == xp
assert Period("2012", freq="B").start_time == datetime(2012, 1, 2)
assert Period("2012", freq="W").start_time == datetime(2011, 12, 26)
def test_end_time(self):
p = Period("2012", freq="A")
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
xp = _ex(2013, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="Q")
xp = _ex(2012, 4, 1)
assert xp == p.end_time
p = Period("2012", freq="M")
xp = _ex(2012, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="D")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
p = Period("2012", freq="H")
xp = _ex(2012, 1, 1, 1)
assert xp == p.end_time
p = Period("2012", freq="B")
xp = _ex(2012, 1, 3)
assert xp == p.end_time
p = Period("2012", freq="W")
xp = _ex(2012, 1, 2)
assert xp == p.end_time
# Test for GH 11738
p = Period("2012", freq="15D")
xp = _ex(2012, 1, 16)
assert xp == p.end_time
p = Period("2012", freq="1D1H")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
p = Period("2012", freq="1H1D")
xp = _ex(2012, 1, 2, 1)
assert xp == p.end_time
def test_anchor_week_end_time(self):
def _ex(*args):
return Timestamp(Timestamp(datetime(*args)).value - 1)
p = Period("2013-1-1", "W-SAT")
xp = _ex(2013, 1, 6)
assert p.end_time == xp
def test_properties_annually(self):
# Test properties on Periods with annually frequency.
a_date = Period(freq="A", year=2007)
assert a_date.year == 2007
def test_properties_quarterly(self):
# Test properties on Periods with daily frequency.
qedec_date = Period(freq="Q-DEC", year=2007, quarter=1)
qejan_date = Period(freq="Q-JAN", year=2007, quarter=1)
qejun_date = Period(freq="Q-JUN", year=2007, quarter=1)
#
for x in range(3):
for qd in (qedec_date, qejan_date, qejun_date):
assert (qd + x).qyear == 2007
assert (qd + x).quarter == x + 1
def test_properties_monthly(self):
# Test properties on Periods with daily frequency.
m_date = Period(freq="M", year=2007, month=1)
for x in range(11):
m_ival_x = m_date + x
assert m_ival_x.year == 2007
if 1 <= x + 1 <= 3:
assert m_ival_x.quarter == 1
elif 4 <= x + 1 <= 6:
assert m_ival_x.quarter == 2
elif 7 <= x + 1 <= 9:
assert m_ival_x.quarter == 3
elif 10 <= x + 1 <= 12:
assert m_ival_x.quarter == 4
assert m_ival_x.month == x + 1
def test_properties_weekly(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
#
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
assert Period(freq="W", year=2012, month=2, day=1).days_in_month == 29
def test_properties_weekly_legacy(self):
# Test properties on Periods with daily frequency.
w_date = Period(freq="W", year=2007, month=1, day=7)
assert w_date.year == 2007
assert w_date.quarter == 1
assert w_date.month == 1
assert w_date.week == 1
assert (w_date - 1).week == 52
assert w_date.days_in_month == 31
exp = Period(freq="W", year=2012, month=2, day=1)
assert exp.days_in_month == 29
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
Period(freq="WK", year=2007, month=1, day=7)
def test_properties_daily(self):
# Test properties on Periods with daily frequency.
b_date = Period(freq="B", year=2007, month=1, day=1)
#
assert b_date.year == 2007
assert b_date.quarter == 1
assert b_date.month == 1
assert b_date.day == 1
assert b_date.weekday == 0
assert b_date.dayofyear == 1
assert b_date.days_in_month == 31
assert Period(freq="B", year=2012, month=2, day=1).days_in_month == 29
d_date = Period(freq="D", year=2007, month=1, day=1)
assert d_date.year == 2007
assert d_date.quarter == 1
assert d_date.month == 1
assert d_date.day == 1
assert d_date.weekday == 0
assert d_date.dayofyear == 1
assert d_date.days_in_month == 31
assert Period(freq="D", year=2012, month=2, day=1).days_in_month == 29
def test_properties_hourly(self):
# Test properties on Periods with hourly frequency.
h_date1 = Period(freq="H", year=2007, month=1, day=1, hour=0)
h_date2 = Period(freq="2H", year=2007, month=1, day=1, hour=0)
for h_date in [h_date1, h_date2]:
assert h_date.year == 2007
assert h_date.quarter == 1
assert h_date.month == 1
assert h_date.day == 1
assert h_date.weekday == 0
assert h_date.dayofyear == 1
assert h_date.hour == 0
assert h_date.days_in_month == 31
assert (
Period(freq="H", year=2012, month=2, day=1, hour=0).days_in_month == 29
)
def test_properties_minutely(self):
# Test properties on Periods with minutely frequency.
t_date = Period(freq="Min", year=2007, month=1, day=1, hour=0, minute=0)
#
assert t_date.quarter == 1
assert t_date.month == 1
assert t_date.day == 1
assert t_date.weekday == 0
assert t_date.dayofyear == 1
assert t_date.hour == 0
assert t_date.minute == 0
assert t_date.days_in_month == 31
assert (
Period(freq="D", year=2012, month=2, day=1, hour=0, minute=0).days_in_month
== 29
)
def test_properties_secondly(self):
# Test properties on Periods with secondly frequency.
s_date = Period(
freq="Min", year=2007, month=1, day=1, hour=0, minute=0, second=0
)
#
assert s_date.year == 2007
assert s_date.quarter == 1
assert s_date.month == 1
assert s_date.day == 1
assert s_date.weekday == 0
assert s_date.dayofyear == 1
assert s_date.hour == 0
assert s_date.minute == 0
assert s_date.second == 0
assert s_date.days_in_month == 31
assert (
Period(
freq="Min", year=2012, month=2, day=1, hour=0, minute=0, second=0
).days_in_month
== 29
)
class TestPeriodField:
def test_get_period_field_array_raises_on_out_of_range(self):
msg = "Buffer dtype mismatch, expected 'int64_t' but got 'double'"
with pytest.raises(ValueError, match=msg):
libperiod.get_period_field_arr(-1, np.empty(1), 0)
class TestComparisons:
def setup_method(self, method):
self.january1 = Period("2000-01", "M")
self.january2 = Period("2000-01", "M")
self.february = Period("2000-02", "M")
self.march = Period("2000-03", "M")
self.day = Period("2012-01-01", "D")
def test_equal(self):
assert self.january1 == self.january2
def test_equal_Raises_Value(self):
with pytest.raises(IncompatibleFrequency):
self.january1 == self.day
def test_notEqual(self):
assert self.january1 != 1
assert self.january1 != self.february
def test_greater(self):
assert self.february > self.january1
def test_greater_Raises_Value(self):
with pytest.raises(IncompatibleFrequency):
self.january1 > self.day
def test_greater_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 > 1
def test_greaterEqual(self):
assert self.january1 >= self.january2
def test_greaterEqual_Raises_Value(self):
with pytest.raises(IncompatibleFrequency):
self.january1 >= self.day
with pytest.raises(TypeError):
print(self.january1 >= 1)
def test_smallerEqual(self):
assert self.january1 <= self.january2
def test_smallerEqual_Raises_Value(self):
with pytest.raises(IncompatibleFrequency):
self.january1 <= self.day
def test_smallerEqual_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 <= 1
def test_smaller(self):
assert self.january1 < self.february
def test_smaller_Raises_Value(self):
with pytest.raises(IncompatibleFrequency):
self.january1 < self.day
def test_smaller_Raises_Type(self):
with pytest.raises(TypeError):
self.january1 < 1
def test_sort(self):
periods = [self.march, self.january1, self.february]
correctPeriods = [self.january1, self.february, self.march]
assert sorted(periods) == correctPeriods
def test_period_nat_comp(self):
p_nat = Period("NaT", freq="D")
p = Period("2011-01-01", freq="D")
nat = Timestamp("NaT")
t = Timestamp("2011-01-01")
# confirm Period('NaT') work identical with Timestamp('NaT')
for left, right in [
(p_nat, p),
(p, p_nat),
(p_nat, p_nat),
(nat, t),
(t, nat),
(nat, nat),
]:
assert not left < right
assert not left > right
assert not left == right
assert left != right
assert not left <= right
assert not left >= right
class TestArithmetic:
def test_sub_delta(self):
left, right = Period("2011", freq="A"), Period("2007", freq="A")
result = left - right
assert result == 4 * right.freq
with pytest.raises(IncompatibleFrequency):
left - Period("2007-01", freq="M")
def test_add_integer(self):
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
assert per1 + 1 == per2
assert 1 + per1 == per2
def test_add_sub_nat(self):
# GH#13071
p = Period("2011-01", freq="M")
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
p = Period("NaT", freq="M")
assert p is NaT
assert p + NaT is NaT
assert NaT + p is NaT
assert p - NaT is NaT
assert NaT - p is NaT
def test_add_invalid(self):
# GH#4731
per1 = Period(freq="D", year=2008, month=1, day=1)
per2 = Period(freq="D", year=2008, month=1, day=2)
msg = r"unsupported operand type\(s\)"
with pytest.raises(TypeError, match=msg):
per1 + "str"
with pytest.raises(TypeError, match=msg):
"str" + per1
with pytest.raises(TypeError, match=msg):
per1 + per2
boxes = [lambda x: x, lambda x: pd.Series([x]), lambda x: pd.Index([x])]
ids = ["identity", "Series", "Index"]
@pytest.mark.parametrize("lbox", boxes, ids=ids)
@pytest.mark.parametrize("rbox", boxes, ids=ids)
def test_add_timestamp_raises(self, rbox, lbox):
# GH#17983
ts = Timestamp("2017")
per = Period("2017", freq="M")
# We may get a different message depending on which class raises
# the error.
msg = (
r"cannot add|unsupported operand|"
r"can only operate on a|incompatible type|"
r"ufunc add cannot use operands"
)
with pytest.raises(TypeError, match=msg):
lbox(ts) + rbox(per)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(ts)
with pytest.raises(TypeError, match=msg):
lbox(per) + rbox(per)
def test_sub(self):
per1 = Period("2011-01-01", freq="D")
per2 = Period("2011-01-15", freq="D")
off = per1.freq
assert per1 - per2 == -14 * off
assert per2 - per1 == 14 * off
msg = r"Input has different freq=M from Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
per1 - Period("2011-02", freq="M")
@pytest.mark.parametrize("n", [1, 2, 3, 4])
def test_sub_n_gt_1_ticks(self, tick_classes, n):
# GH 23878
p1 = Period("19910905", freq=tick_classes(n))
p2 = Period("19920406", freq=tick_classes(n))
expected = Period(str(p2), freq=p2.freq.base) - Period(
str(p1), freq=p1.freq.base
)
assert (p2 - p1) == expected
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [1, 2, 3, 4])
@pytest.mark.parametrize(
"offset, kwd_name",
[
(offsets.YearEnd, "month"),
(offsets.QuarterEnd, "startingMonth"),
(offsets.MonthEnd, None),
(offsets.Week, "weekday"),
],
)
def test_sub_n_gt_1_offsets(self, offset, kwd_name, n, normalize):
# GH 23878
kwds = {kwd_name: 3} if kwd_name is not None else {}
p1_d = "19910905"
p2_d = "19920406"
p1 = Period(p1_d, freq=offset(n, normalize, **kwds))
p2 = Period(p2_d, freq=offset(n, normalize, **kwds))
expected = Period(p2_d, freq=p2.freq.base) - Period(p1_d, freq=p1.freq.base)
assert (p2 - p1) == expected
def test_add_offset(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
exp = Period("2013", freq=freq)
assert p + offsets.YearEnd(2) == exp
assert offsets.YearEnd(2) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(IncompatibleFrequency):
o + p
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
exp = Period("2011-05", freq=freq)
assert p + offsets.MonthEnd(2) == exp
assert offsets.MonthEnd(2) + p == exp
exp = Period("2012-03", freq=freq)
assert p + offsets.MonthEnd(12) == exp
assert offsets.MonthEnd(12) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(IncompatibleFrequency):
o + p
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
exp = Period("2011-04-06", freq=freq)
assert p + offsets.Day(5) == exp
assert offsets.Day(5) + p == exp
exp = Period("2011-04-02", freq=freq)
assert p + offsets.Hour(24) == exp
assert offsets.Hour(24) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + np.timedelta64(2, "D") == exp
with pytest.raises(TypeError):
np.timedelta64(2, "D") + p
exp = Period("2011-04-02", freq=freq)
assert p + np.timedelta64(3600 * 24, "s") == exp
with pytest.raises(TypeError):
np.timedelta64(3600 * 24, "s") + p
exp = Period("2011-03-30", freq=freq)
assert p + timedelta(-2) == exp
assert timedelta(-2) + p == exp
exp = Period("2011-04-03", freq=freq)
assert p + timedelta(hours=48) == exp
assert timedelta(hours=48) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(IncompatibleFrequency):
o + p
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
exp = Period("2011-04-03 09:00", freq=freq)
assert p + offsets.Day(2) == exp
assert offsets.Day(2) + p == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert p + offsets.Hour(3) == exp
assert offsets.Hour(3) + p == exp
exp = Period("2011-04-01 12:00", freq=freq)
assert p + np.timedelta64(3, "h") == exp
with pytest.raises(TypeError):
np.timedelta64(3, "h") + p
exp = Period("2011-04-01 10:00", freq=freq)
assert p + np.timedelta64(3600, "s") == exp
with pytest.raises(TypeError):
np.timedelta64(3600, "s") + p
exp = Period("2011-04-01 11:00", freq=freq)
assert p + timedelta(minutes=120) == exp
assert timedelta(minutes=120) + p == exp
exp = Period("2011-04-05 12:00", freq=freq)
assert p + timedelta(days=4, minutes=180) == exp
assert timedelta(days=4, minutes=180) + p == exp
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency):
p + o
if isinstance(o, np.timedelta64):
with pytest.raises(TypeError):
o + p
else:
with pytest.raises(IncompatibleFrequency):
o + p
def test_add_offset_nat(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [offsets.YearEnd(2)]:
assert p + o is NaT
assert o + p is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
assert p + o is NaT
assert o + p is NaT
for freq in ["M", "2M", "3M"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p + o is NaT
assert o + p is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
assert p + o is NaT
assert o + p is NaT
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [
offsets.Day(5),
offsets.Hour(24),
np.timedelta64(2, "D"),
np.timedelta64(3600 * 24, "s"),
timedelta(-2),
timedelta(hours=48),
]:
assert p + o is NaT
assert o + p is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
assert p + o is NaT
assert o + p is NaT
for freq in ["H", "2H", "3H"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [
offsets.Day(2),
offsets.Hour(3),
np.timedelta64(3, "h"),
np.timedelta64(3600, "s"),
timedelta(minutes=120),
timedelta(days=4, minutes=180),
]:
assert p + o is NaT
assert o + p is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
assert p + o is NaT
assert o + p is NaT
def test_sub_offset(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("2011", freq=freq)
assert p - offsets.YearEnd(2) == Period("2009", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency):
p - o
for freq in ["M", "2M", "3M"]:
p = Period("2011-03", freq=freq)
assert p - offsets.MonthEnd(2) == Period("2011-01", freq=freq)
assert p - offsets.MonthEnd(12) == Period("2010-03", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
with pytest.raises(IncompatibleFrequency):
p - o
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("2011-04-01", freq=freq)
assert p - offsets.Day(5) == Period("2011-03-27", freq=freq)
assert p - offsets.Hour(24) == Period("2011-03-31", freq=freq)
assert p - np.timedelta64(2, "D") == Period("2011-03-30", freq=freq)
assert p - np.timedelta64(3600 * 24, "s") == Period("2011-03-31", freq=freq)
assert p - timedelta(-2) == Period("2011-04-03", freq=freq)
assert p - timedelta(hours=48) == Period("2011-03-30", freq=freq)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
with pytest.raises(IncompatibleFrequency):
p - o
for freq in ["H", "2H", "3H"]:
p = Period("2011-04-01 09:00", freq=freq)
assert p - offsets.Day(2) == Period("2011-03-30 09:00", freq=freq)
assert p - offsets.Hour(3) == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3, "h") == Period("2011-04-01 06:00", freq=freq)
assert p - np.timedelta64(3600, "s") == Period(
"2011-04-01 08:00", freq=freq
)
assert p - timedelta(minutes=120) == Period("2011-04-01 07:00", freq=freq)
assert p - timedelta(days=4, minutes=180) == Period(
"2011-03-28 06:00", freq=freq
)
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
with pytest.raises(IncompatibleFrequency):
p - o
def test_sub_offset_nat(self):
# freq is DateOffset
for freq in ["A", "2A", "3A"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [offsets.YearEnd(2)]:
assert p - o is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
assert p - o is NaT
for freq in ["M", "2M", "3M"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [offsets.MonthEnd(2), offsets.MonthEnd(12)]:
assert p - o is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(365, "D"),
timedelta(365),
]:
assert p - o is NaT
# freq is Tick
for freq in ["D", "2D", "3D"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [
offsets.Day(5),
offsets.Hour(24),
np.timedelta64(2, "D"),
np.timedelta64(3600 * 24, "s"),
timedelta(-2),
timedelta(hours=48),
]:
assert p - o is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(4, "h"),
timedelta(hours=23),
]:
assert p - o is NaT
for freq in ["H", "2H", "3H"]:
p = Period("NaT", freq=freq)
assert p is NaT
for o in [
offsets.Day(2),
offsets.Hour(3),
np.timedelta64(3, "h"),
np.timedelta64(3600, "s"),
timedelta(minutes=120),
timedelta(days=4, minutes=180),
]:
assert p - o is NaT
for o in [
offsets.YearBegin(2),
offsets.MonthBegin(1),
offsets.Minute(),
np.timedelta64(3200, "s"),
timedelta(hours=23, minutes=30),
]:
assert p - o is NaT
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_nat_ops(self, freq):
p = Period("NaT", freq=freq)
assert p is NaT
assert p + 1 is NaT
assert 1 + p is NaT
assert p - 1 is NaT
assert p - Period("2011-01", freq=freq) is NaT
assert Period("2011-01", freq=freq) - p is NaT
def test_period_ops_offset(self):
p = Period("2011-04-01", freq="D")
result = p + offsets.Day()
exp = Period("2011-04-02", freq="D")
assert result == exp
result = p - offsets.Day(2)
exp = Period("2011-03-30", freq="D")
assert result == exp
msg = r"Input cannot be converted to Period\(freq=D\)"
with pytest.raises(IncompatibleFrequency, match=msg):
p + offsets.Hour(2)
with pytest.raises(IncompatibleFrequency, match=msg):
p - offsets.Hour(2)
def test_period_immutable():
# see gh-17116
per = Period("2014Q1")
with pytest.raises(AttributeError):
per.ordinal = 14
freq = per.freq
with pytest.raises(AttributeError):
per.freq = 2 * freq
@pytest.mark.xfail(
StrictVersion(dateutil.__version__.split(".dev")[0]) < StrictVersion("2.7.0"),
reason="Bug in dateutil < 2.7.0 when parsing old dates: Period('0001-01-07', 'D')",
strict=False,
)
def test_small_year_parsing():
per1 = Period("0001-01-07", "D")
assert per1.year == 1
assert per1.day == 7
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/compat/py3k.py | """
Python 3.X compatibility tools.
While this file was originally intented for Python 2 -> 3 transition,
it is now used to create a compatibility layer between different
minor versions of Python 3.
While the active version of numpy may not support a given version of python, we
allow downstream libraries to continue to use these shims for forward
compatibility with numpy while they transition their code to newer versions of
Python.
"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
import sys
import os
try:
from pathlib import Path, PurePath
except ImportError:
Path = PurePath = None
if sys.version_info[0] >= 3:
import io
try:
import pickle5 as pickle
except ImportError:
import pickle
long = int
integer_types = (int,)
basestring = str
unicode = str
bytes = bytes
def asunicode(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def asbytes(s):
if isinstance(s, bytes):
return s
return str(s).encode('latin1')
def asstr(s):
if isinstance(s, bytes):
return s.decode('latin1')
return str(s)
def isfileobj(f):
return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
def sixu(s):
return s
strchar = 'U'
else:
import cpickle as pickle
bytes = str
long = long
basestring = basestring
unicode = unicode
integer_types = (int, long)
asbytes = str
asstr = str
strchar = 'S'
def isfileobj(f):
return isinstance(f, file)
def asunicode(s):
if isinstance(s, unicode):
return s
return str(s).decode('ascii')
def open_latin1(filename, mode='r'):
return open(filename, mode=mode)
def sixu(s):
return unicode(s, 'unicode_escape')
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
def is_pathlib_path(obj):
"""
Check whether obj is a pathlib.Path object.
Prefer using `isinstance(obj, os_PathLike)` instead of this function.
"""
return Path is not None and isinstance(obj, Path)
# from Python 3.7
class contextlib_nullcontext(object):
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
if sys.version_info[0] >= 3 and sys.version_info[1] >= 4:
def npy_load_module(name, fn, info=None):
"""
Load a module.
.. versionadded:: 1.11.2
Parameters
----------
name : str
Full module name.
fn : str
Path to module file.
info : tuple, optional
Only here for backward compatibility with Python 2.*.
Returns
-------
mod : module
"""
import importlib.machinery
return importlib.machinery.SourceFileLoader(name, fn).load_module()
else:
def npy_load_module(name, fn, info=None):
"""
Load a module.
.. versionadded:: 1.11.2
Parameters
----------
name : str
Full module name.
fn : str
Path to module file.
info : tuple, optional
Information as returned by `imp.find_module`
(suffix, mode, type).
Returns
-------
mod : module
"""
import imp
if info is None:
path = os.path.dirname(fn)
fo, fn, info = imp.find_module(name, [path])
else:
fo = open(fn, info[1])
try:
mod = imp.load_module(name, fo, fn, info)
finally:
fo.close()
return mod
# backport abc.ABC
import abc
if sys.version_info[:2] >= (3, 4):
abc_ABC = abc.ABC
else:
abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
if sys.version_info[:2] >= (3, 6):
os_fspath = os.fspath
os_PathLike = os.PathLike
else:
def _PurePath__fspath__(self):
return str(self)
class os_PathLike(abc_ABC):
"""Abstract base class for implementing the file system path protocol."""
@abc.abstractmethod
def __fspath__(self):
"""Return the file system path representation of the object."""
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
if PurePath is not None and issubclass(subclass, PurePath):
return True
return hasattr(subclass, '__fspath__')
def os_fspath(path):
"""Return the path representation of a path-like object.
If str or bytes is passed in, it is returned unchanged. Otherwise the
os.PathLike interface is used to get the path representation. If the
path representation is not str or bytes, TypeError is raised. If the
provided path is not str, bytes, or os.PathLike, TypeError is raised.
"""
if isinstance(path, (str, bytes)):
return path
# Work from the object's type to match method resolution of other magic
# methods.
path_type = type(path)
try:
path_repr = path_type.__fspath__(path)
except AttributeError:
if hasattr(path_type, '__fspath__'):
raise
elif PurePath is not None and issubclass(path_type, PurePath):
return _PurePath__fspath__(path)
else:
raise TypeError("expected str, bytes or os.PathLike object, "
"not " + path_type.__name__)
if isinstance(path_repr, (str, bytes)):
return path_repr
else:
raise TypeError("expected {}.__fspath__() to return str or bytes, "
"not {}".format(path_type.__name__,
type(path_repr).__name__))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/heatmap/_ytype.py | import _plotly_utils.basevalidators
class YtypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="ytype", parent_name="heatmap", **kwargs):
super(YtypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["array", "scaled"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_mapbox.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/plotly/validators/layout/_mapbox.py<gh_stars>10-100
import _plotly_utils.basevalidators
class MapboxValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="mapbox", parent_name="layout", **kwargs):
super(MapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Mapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
accesstoken
Sets the mapbox access token to be used for
this mapbox map. Alternatively, the mapbox
access token can be set in the configuration
options under `mapboxAccessToken`. Note that
accessToken are only required when `style` (e.g
with values : basic, streets, outdoors, light,
dark, satellite, satellite-streets ) and/or a
layout layer references the Mapbox server.
bearing
Sets the bearing angle of the map in degrees
counter-clockwise from North (mapbox.bearing).
center
:class:`plotly.graph_objects.layout.mapbox.Cent
er` instance or dict with compatible properties
domain
:class:`plotly.graph_objects.layout.mapbox.Doma
in` instance or dict with compatible properties
layers
A tuple of :class:`plotly.graph_objects.layout.
mapbox.Layer` instances or dicts with
compatible properties
layerdefaults
When used in a template (as
layout.template.layout.mapbox.layerdefaults),
sets the default property values to use for
elements of layout.mapbox.layers
pitch
Sets the pitch angle of the map (in degrees,
where 0 means perpendicular to the surface of
the map) (mapbox.pitch).
style
Defines the map layers that are rendered by
default below the trace layers defined in
`data`, which are themselves by default
rendered below the layers defined in
`layout.mapbox.layers`. These layers can be
defined either explicitly as a Mapbox Style
object which can contain multiple layer
definitions that load data from any public or
private Tile Map Service (TMS or XYZ) or Web
Map Service (WMS) or implicitly by using one of
the built-in style objects which use WMSes
which do not require any access tokens, or by
using a default Mapbox style or custom Mapbox
style URL, both of which require a Mapbox
access token Note that Mapbox access token can
be set in the `accesstoken` attribute or in the
`mapboxAccessToken` config option. Mapbox
Style objects are of the form described in the
Mapbox GL JS documentation available at
https://docs.mapbox.com/mapbox-gl-js/style-spec
The built-in plotly.js styles objects are:
open-street-map, white-bg, carto-positron,
carto-darkmatter, stamen-terrain, stamen-toner,
stamen-watercolor The built-in Mapbox styles
are: basic, streets, outdoors, light, dark,
satellite, satellite-streets Mapbox style URLs
are of the form:
mapbox://mapbox.mapbox-<name>-<version>
uirevision
Controls persistence of user-driven changes in
the view: `center`, `zoom`, `bearing`, `pitch`.
Defaults to `layout.uirevision`.
zoom
Sets the zoom level of the map (mapbox.zoom).
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/angularaxis/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/angularaxis/__init__.py
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._ticksuffix import TicksuffixValidator
from ._tickorientation import TickorientationValidator
from ._ticklen import TicklenValidator
from ._tickcolor import TickcolorValidator
from ._showticklabels import ShowticklabelsValidator
from ._showline import ShowlineValidator
from ._range import RangeValidator
from ._endpadding import EndpaddingValidator
from ._domain import DomainValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._ticksuffix.TicksuffixValidator",
"._tickorientation.TickorientationValidator",
"._ticklen.TicklenValidator",
"._tickcolor.TickcolorValidator",
"._showticklabels.ShowticklabelsValidator",
"._showline.ShowlineValidator",
"._range.RangeValidator",
"._endpadding.EndpaddingValidator",
"._domain.DomainValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/_hovermode.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/plotly/validators/layout/scene/_hovermode.py<gh_stars>10-100
import _plotly_utils.basevalidators
class HovermodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="hovermode", parent_name="layout.scene", **kwargs):
super(HovermodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "modebar"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["closest", False]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/plotly/io/kaleido.py | <filename>.venv/lib/python3.8/site-packages/plotly/io/kaleido.py
from ._kaleido import to_image, write_image, scope
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/table/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._stream import StreamValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._header import HeaderValidator
from ._domain import DomainValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._columnwidthsrc import ColumnwidthsrcValidator
from ._columnwidth import ColumnwidthValidator
from ._columnordersrc import ColumnordersrcValidator
from ._columnorder import ColumnorderValidator
from ._cells import CellsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._stream.StreamValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._header.HeaderValidator",
"._domain.DomainValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._columnwidthsrc.ColumnwidthsrcValidator",
"._columnwidth.ColumnwidthValidator",
"._columnordersrc.ColumnordersrcValidator",
"._columnorder.ColumnorderValidator",
"._cells.CellsValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/arrays/sparse/accessor.py | <gh_stars>100-1000
"""Sparse accessor"""
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import find_common_type
from pandas.core.accessor import PandasDelegate, delegate_names
from pandas.core.arrays.sparse.array import SparseArray
from pandas.core.arrays.sparse.dtype import SparseDtype
class BaseAccessor:
_validation_msg = "Can only use the '.sparse' accessor with Sparse data."
def __init__(self, data=None):
self._parent = data
self._validate(data)
def _validate(self, data):
raise NotImplementedError
@delegate_names(
SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
)
class SparseAccessor(BaseAccessor, PandasDelegate):
"""
Accessor for SparseSparse from other sparse matrix data types.
"""
def _validate(self, data):
if not isinstance(data.dtype, SparseDtype):
raise AttributeError(self._validation_msg)
def _delegate_property_get(self, name, *args, **kwargs):
return getattr(self._parent.array, name)
def _delegate_method(self, name, *args, **kwargs):
if name == "from_coo":
return self.from_coo(*args, **kwargs)
elif name == "to_coo":
return self.to_coo(*args, **kwargs)
else:
raise ValueError
@classmethod
def from_coo(cls, A, dense_index=False):
"""
Create a Series with sparse values from a scipy.sparse.coo_matrix.
Parameters
----------
A : scipy.sparse.coo_matrix
dense_index : bool, default False
If False (default), the SparseSeries index consists of only the
coords of the non-null entries of the original coo_matrix.
If True, the SparseSeries index consists of the full sorted
(row, col) coordinates of the coo_matrix.
Returns
-------
s : Series
A Series with sparse values.
Examples
--------
>>> from scipy import sparse
>>> A = sparse.coo_matrix(
... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 2.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> ss = pd.Series.sparse.from_coo(A)
>>> ss
0 2 1.0
3 2.0
1 0 3.0
dtype: Sparse[float64, nan]
"""
from pandas.core.arrays.sparse.scipy_sparse import _coo_to_sparse_series
from pandas import Series
result = _coo_to_sparse_series(A, dense_index=dense_index)
result = Series(result.array, index=result.index, copy=False)
return result
def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels=False):
"""
Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
Use row_levels and column_levels to determine the row and column
coordinates respectively. row_levels and column_levels are the names
(labels) or numbers of the levels. {row_levels, column_levels} must be
a partition of the MultiIndex level names (or numbers).
Parameters
----------
row_levels : tuple/list
column_levels : tuple/list
sort_labels : bool, default False
Sort the row and column labels before forming the sparse matrix.
Returns
-------
y : scipy.sparse.coo_matrix
rows : list (row labels)
columns : list (column labels)
Examples
--------
>>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
>>> s.index = pd.MultiIndex.from_tuples(
... [
... (1, 2, "a", 0),
... (1, 2, "a", 1),
... (1, 1, "b", 0),
... (1, 1, "b", 1),
... (2, 1, "b", 0),
... (2, 1, "b", 1)
... ],
... names=["A", "B", "C", "D"],
... )
>>> s
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: float64
>>> ss = s.astype("Sparse")
>>> ss
A B C D
1 2 a 0 3.0
1 NaN
1 b 0 1.0
1 3.0
2 1 b 0 NaN
1 NaN
dtype: Sparse[float64, nan]
>>> A, rows, columns = ss.sparse.to_coo(
... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
... )
>>> A
<3x4 sparse matrix of type '<class 'numpy.float64'>'
with 3 stored elements in COOrdinate format>
>>> A.todense()
matrix([[0., 0., 1., 3.],
[3., 0., 0., 0.],
[0., 0., 0., 0.]])
>>> rows
[(1, 1), (1, 2), (2, 1)]
>>> columns
[('a', 0), ('a', 1), ('b', 0), ('b', 1)]
"""
from pandas.core.arrays.sparse.scipy_sparse import _sparse_series_to_coo
A, rows, columns = _sparse_series_to_coo(
self._parent, row_levels, column_levels, sort_labels=sort_labels
)
return A, rows, columns
def to_dense(self):
"""
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
"""
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
class SparseFrameAccessor(BaseAccessor, PandasDelegate):
"""
DataFrame accessor for sparse data.
.. versionadded:: 0.25.0
"""
def _validate(self, data):
dtypes = data.dtypes
if not all(isinstance(t, SparseDtype) for t in dtypes):
raise AttributeError(self._validation_msg)
@classmethod
def from_spmatrix(cls, data, index=None, columns=None):
"""
Create a new DataFrame from a scipy sparse matrix.
.. versionadded:: 0.25.0
Parameters
----------
data : scipy.sparse.spmatrix
Must be convertible to csc format.
index, columns : Index, optional
Row and column labels to use for the resulting DataFrame.
Defaults to a RangeIndex.
Returns
-------
DataFrame
Each column of the DataFrame is stored as a
:class:`arrays.SparseArray`.
Examples
--------
>>> import scipy.sparse
>>> mat = scipy.sparse.eye(3)
>>> pd.DataFrame.sparse.from_spmatrix(mat)
0 1 2
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
from pandas import DataFrame
from pandas._libs.sparse import IntIndex
data = data.tocsc()
index, columns = cls._prep_index(data, index, columns)
n_rows, n_columns = data.shape
# We need to make sure indices are sorted, as we create
# IntIndex with no input validation (i.e. check_integrity=False ).
# Indices may already be sorted in scipy in which case this adds
# a small overhead.
data.sort_indices()
indices = data.indices
indptr = data.indptr
array_data = data.data
dtype = SparseDtype(array_data.dtype, 0)
arrays = []
for i in range(n_columns):
sl = slice(indptr[i], indptr[i + 1])
idx = IntIndex(n_rows, indices[sl], check_integrity=False)
arr = SparseArray._simple_new(array_data[sl], idx, dtype)
arrays.append(arr)
return DataFrame._from_arrays(
arrays, columns=columns, index=index, verify_integrity=False
)
def to_dense(self):
"""
Convert a DataFrame with sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
DataFrame
A DataFrame with the same values stored as dense arrays.
Examples
--------
>>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
>>> df.sparse.to_dense()
A
0 0
1 1
2 0
"""
from pandas import DataFrame
data = {k: v.array.to_dense() for k, v in self._parent.items()}
return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.25.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
import_optional_dependency("scipy")
from scipy.sparse import coo_matrix
dtype = find_common_type(self._parent.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self._parent):
s = self._parent[name]
row = s.array.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.array.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self._parent.shape)
@property
def density(self) -> float:
"""
Ratio of non-sparse points to total (dense) data points.
"""
return np.mean([column.array.density for _, column in self._parent.items()])
@staticmethod
def _prep_index(data, index, columns):
import pandas.core.indexes.base as ibase
from pandas.core.indexes.api import ensure_index
N, K = data.shape
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
if len(columns) != K:
raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
if len(index) != N:
raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
return index, columns
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/test_dtypes.py | from datetime import datetime, timedelta
from importlib import reload
import string
import sys
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Series,
Timedelta,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestSeriesDtypes:
def test_dt64_series_astype_object(self):
dt64ser = Series(date_range("20130101", periods=3))
result = dt64ser.astype(object)
assert isinstance(result.iloc[0], datetime)
assert result.dtype == np.object_
def test_td64_series_astype_object(self):
tdser = Series(["59 Days", "59 Days", "NaT"], dtype="timedelta64[ns]")
result = tdser.astype(object)
assert isinstance(result.iloc[0], timedelta)
assert result.dtype == np.object_
@pytest.mark.parametrize("dtype", ["float32", "float64", "int64", "int32"])
def test_astype(self, dtype):
s = Series(np.random.randn(5), name="foo")
as_typed = s.astype(dtype)
assert as_typed.dtype == dtype
assert as_typed.name == s.name
def test_dtype(self, datetime_series):
assert datetime_series.dtype == np.dtype("float64")
assert datetime_series.dtypes == np.dtype("float64")
@pytest.mark.parametrize("value", [np.nan, np.inf])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
def test_astype_cast_nan_inf_int(self, dtype, value):
# gh-14265: check NaN and inf raise error when converting to int
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
s = Series([value])
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", [int, np.int8, np.int64])
def test_astype_cast_object_int_fail(self, dtype):
arr = Series(["car", "house", "tree", "1"])
msg = r"invalid literal for int\(\) with base 10: 'car'"
with pytest.raises(ValueError, match=msg):
arr.astype(dtype)
def test_astype_cast_object_int(self):
arr = Series(["1", "2", "3", "4"], dtype=object)
result = arr.astype(int)
tm.assert_series_equal(result, Series(np.arange(1, 5)))
def test_astype_datetime(self):
s = Series(iNaT, dtype="M8[ns]", index=range(5))
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0)])
s = s.astype("O")
assert s.dtype == np.object_
s = Series([datetime(2001, 1, 2, 0, 0) for i in range(3)])
s[1] = np.nan
assert s.dtype == "M8[ns]"
s = s.astype("O")
assert s.dtype == np.object_
def test_astype_datetime64tz(self):
s = Series(date_range("20130101", periods=3, tz="US/Eastern"))
# astype
result = s.astype(object)
expected = Series(s.astype(object), dtype=object)
tm.assert_series_equal(result, expected)
result = Series(s.values).dt.tz_localize("UTC").dt.tz_convert(s.dt.tz)
tm.assert_series_equal(result, s)
# astype - object, preserves on construction
result = Series(s.astype(object))
expected = s.astype(object)
tm.assert_series_equal(result, expected)
# astype - datetime64[ns, tz]
result = Series(s.values).astype("datetime64[ns, US/Eastern]")
tm.assert_series_equal(result, s)
result = Series(s.values).astype(s.dtype)
tm.assert_series_equal(result, s)
result = s.astype("datetime64[ns, CET]")
expected = Series(date_range("20130101 06:00:00", periods=3, tz="CET"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [str, np.str_])
@pytest.mark.parametrize(
"series",
[
Series([string.digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series([string.digits * 10, tm.rands(63), tm.rands(64), np.nan, 1.0]),
],
)
def test_astype_str_map(self, dtype, series):
# see gh-4405
result = series.astype(dtype)
expected = series.map(str)
tm.assert_series_equal(result, expected)
def test_astype_str_cast_dt64(self):
# see gh-9757
ts = Series([Timestamp("2010-01-04 00:00:00")])
s = ts.astype(str)
expected = Series([str("2010-01-04")])
tm.assert_series_equal(s, expected)
ts = Series([Timestamp("2010-01-04 00:00:00", tz="US/Eastern")])
s = ts.astype(str)
expected = Series([str("2010-01-04 00:00:00-05:00")])
tm.assert_series_equal(s, expected)
def test_astype_str_cast_td64(self):
# see gh-9757
td = Series([Timedelta(1, unit="d")])
ser = td.astype(str)
expected = Series([str("1 days")])
tm.assert_series_equal(ser, expected)
def test_astype_unicode(self):
# see gh-7758: A bit of magic is required to set
# default encoding to utf-8
digits = string.digits
test_series = [
Series([digits * 10, tm.rands(63), tm.rands(64), tm.rands(1000)]),
Series(["データーサイエンス、お前はもう死んでいる"]),
]
former_encoding = None
if sys.getdefaultencoding() == "utf-8":
test_series.append(Series(["野菜食べないとやばい".encode("utf-8")]))
for s in test_series:
res = s.astype("unicode")
expec = s.map(str)
tm.assert_series_equal(res, expec)
# Restore the former encoding
if former_encoding is not None and former_encoding != "utf-8":
reload(sys)
sys.setdefaultencoding(former_encoding)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# see gh-7271
s = Series(range(0, 10, 2), name="abc")
dt1 = dtype_class({"abc": str})
result = s.astype(dt1)
expected = Series(["0", "2", "4", "6", "8"], name="abc")
tm.assert_series_equal(result, expected)
dt2 = dtype_class({"abc": "float64"})
result = s.astype(dt2)
expected = Series([0.0, 2.0, 4.0, 6.0, 8.0], dtype="float64", name="abc")
tm.assert_series_equal(result, expected)
dt3 = dtype_class({"abc": str, "def": str})
msg = (
"Only the Series name can be used for the key in Series dtype "
r"mappings\."
)
with pytest.raises(KeyError, match=msg):
s.astype(dt3)
dt4 = dtype_class({0: str})
with pytest.raises(KeyError, match=msg):
s.astype(dt4)
# GH16717
# if dtypes provided is empty, it should error
if dtype_class is Series:
dt5 = dtype_class({}, dtype=object)
else:
dt5 = dtype_class({})
with pytest.raises(KeyError, match=msg):
s.astype(dt5)
def test_astype_categories_raises(self):
# deprecated 17636, removed in GH-27141
s = Series(["a", "b", "a"])
with pytest.raises(TypeError, match="got an unexpected"):
s.astype("category", categories=["a", "b"], ordered=True)
def test_astype_from_categorical(self):
items = ["a", "b", "c", "a"]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
items = [1, 2, 3, 1]
s = Series(items)
exp = Series(Categorical(items))
res = s.astype("category")
tm.assert_series_equal(res, exp)
df = DataFrame({"cats": [1, 2, 3, 4, 5, 6], "vals": [1, 2, 3, 4, 5, 6]})
cats = Categorical([1, 2, 3, 4, 5, 6])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
df = DataFrame(
{"cats": ["a", "b", "b", "a", "a", "d"], "vals": [1, 2, 3, 4, 5, 6]}
)
cats = Categorical(["a", "b", "b", "a", "a", "d"])
exp_df = DataFrame({"cats": cats, "vals": [1, 2, 3, 4, 5, 6]})
df["cats"] = df["cats"].astype("category")
tm.assert_frame_equal(exp_df, df)
# with keywords
lst = ["a", "b", "c", "a"]
s = Series(lst)
exp = Series(Categorical(lst, ordered=True))
res = s.astype(CategoricalDtype(None, ordered=True))
tm.assert_series_equal(res, exp)
exp = Series(Categorical(lst, categories=list("abcdef"), ordered=True))
res = s.astype(CategoricalDtype(list("abcdef"), ordered=True))
tm.assert_series_equal(res, exp)
def test_astype_categorical_to_other(self):
value = np.random.RandomState(0).randint(0, 10000, 100)
df = DataFrame({"value": value})
labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
s = df["value_group"]
expected = s
tm.assert_series_equal(s.astype("category"), expected)
tm.assert_series_equal(s.astype(CategoricalDtype()), expected)
msg = r"could not convert string to float|invalid literal for float\(\)"
with pytest.raises(ValueError, match=msg):
s.astype("float64")
cat = Series(Categorical(["a", "b", "b", "a", "a", "c", "c", "c"]))
exp = Series(["a", "b", "b", "a", "a", "c", "c", "c"])
tm.assert_series_equal(cat.astype("str"), exp)
s2 = Series(Categorical(["1", "2", "3", "4"]))
exp2 = Series([1, 2, 3, 4]).astype(int)
tm.assert_series_equal(s2.astype("int"), exp2)
# object don't sort correctly, so just compare that we have the same
# values
def cmp(a, b):
tm.assert_almost_equal(np.sort(np.unique(a)), np.sort(np.unique(b)))
expected = Series(np.array(s.values), name="value_group")
cmp(s.astype("object"), expected)
cmp(s.astype(np.object_), expected)
# array conversion
tm.assert_almost_equal(np.array(s), np.array(s.values))
tm.assert_series_equal(s.astype("category"), s)
tm.assert_series_equal(s.astype(CategoricalDtype()), s)
roundtrip_expected = s.cat.set_categories(
s.cat.categories.sort_values()
).cat.remove_unused_categories()
tm.assert_series_equal(
s.astype("object").astype("category"), roundtrip_expected
)
tm.assert_series_equal(
s.astype("object").astype(CategoricalDtype()), roundtrip_expected
)
# invalid conversion (these are NOT a dtype)
msg = (
"dtype '<class 'pandas.core.arrays.categorical.Categorical'>' "
"not understood"
)
for invalid in [
lambda x: x.astype(Categorical),
lambda x: x.astype("object").astype(Categorical),
]:
with pytest.raises(TypeError, match=msg):
invalid(s)
@pytest.mark.parametrize("name", [None, "foo"])
@pytest.mark.parametrize("dtype_ordered", [True, False])
@pytest.mark.parametrize("series_ordered", [True, False])
def test_astype_categorical_to_categorical(
self, name, dtype_ordered, series_ordered
):
# GH 10696/18593
s_data = list("abcaacbab")
s_dtype = CategoricalDtype(list("bac"), ordered=series_ordered)
s = Series(s_data, dtype=s_dtype, name=name)
# unspecified categories
dtype = CategoricalDtype(ordered=dtype_ordered)
result = s.astype(dtype)
exp_dtype = CategoricalDtype(s_dtype.categories, dtype_ordered)
expected = Series(s_data, name=name, dtype=exp_dtype)
tm.assert_series_equal(result, expected)
# different categories
dtype = CategoricalDtype(list("adc"), dtype_ordered)
result = s.astype(dtype)
expected = Series(s_data, name=name, dtype=dtype)
tm.assert_series_equal(result, expected)
if dtype_ordered is False:
# not specifying ordered, so only test once
expected = s
result = s.astype("category")
tm.assert_series_equal(result, expected)
def test_astype_bool_missing_to_categorical(self):
# GH-19182
s = Series([True, False, np.nan])
assert s.dtypes == np.object_
result = s.astype(CategoricalDtype(categories=[True, False]))
expected = Series(Categorical([True, False, np.nan], categories=[True, False]))
tm.assert_series_equal(result, expected)
def test_astype_categoricaldtype(self):
s = Series(["a", "b", "a"])
result = s.astype(CategoricalDtype(["a", "b"], ordered=True))
expected = Series(Categorical(["a", "b", "a"], ordered=True))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b"], ordered=False))
expected = Series(Categorical(["a", "b", "a"], ordered=False))
tm.assert_series_equal(result, expected)
result = s.astype(CategoricalDtype(["a", "b", "c"], ordered=False))
expected = Series(
Categorical(["a", "b", "a"], categories=["a", "b", "c"], ordered=False)
)
tm.assert_series_equal(result, expected)
tm.assert_index_equal(result.cat.categories, Index(["a", "b", "c"]))
@pytest.mark.parametrize("dtype", [np.datetime64, np.timedelta64])
def test_astype_generic_timestamp_no_frequency(self, dtype, request):
# see gh-15524, gh-15987
data = [1]
s = Series(data)
if np.dtype(dtype).name not in ["timedelta64", "datetime64"]:
mark = pytest.mark.xfail(reason="GH#33890 Is assigned ns unit")
request.node.add_marker(mark)
msg = (
fr"The '{dtype.__name__}' dtype has no unit\. "
fr"Please pass in '{dtype.__name__}\[ns\]' instead."
)
with pytest.raises(ValueError, match=msg):
s.astype(dtype)
@pytest.mark.parametrize("dtype", np.typecodes["All"])
def test_astype_empty_constructor_equality(self, dtype):
# see gh-15524
if dtype not in (
"S",
"V", # poor support (if any) currently
"M",
"m", # Generic timestamps raise a ValueError. Already tested.
):
init_empty = Series([], dtype=dtype)
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
as_type_empty = Series([]).astype(dtype)
tm.assert_series_equal(init_empty, as_type_empty)
def test_arg_for_errors_in_astype(self):
# see gh-14878
s = Series([1, 2, 3])
msg = (
r"Expected value of kwarg 'errors' to be one of \['raise', "
r"'ignore'\]\. Supplied value is 'False'"
)
with pytest.raises(ValueError, match=msg):
s.astype(np.float64, errors=False)
s.astype(np.int8, errors="raise")
def test_intercept_astype_object(self):
series = Series(date_range("1/1/2000", periods=10))
# This test no longer makes sense, as
# Series is by default already M8[ns].
expected = series.astype("object")
df = DataFrame({"a": series, "b": np.random.randn(len(series))})
exp_dtypes = Series(
[np.dtype("datetime64[ns]"), np.dtype("float64")], index=["a", "b"]
)
tm.assert_series_equal(df.dtypes, exp_dtypes)
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
df = DataFrame({"a": series, "b": ["foo"] * len(series)})
result = df.values.squeeze()
assert (result[:, 0] == expected.values).all()
def test_series_to_categorical(self):
# see gh-16524: test conversion of Series to Categorical
series = Series(["a", "b", "c"])
result = Series(series, dtype="category")
expected = Series(["a", "b", "c"], dtype="category")
tm.assert_series_equal(result, expected)
def test_infer_objects_series(self):
# GH 11221
actual = Series(np.array([1, 2, 3], dtype="O")).infer_objects()
expected = Series([1, 2, 3])
tm.assert_series_equal(actual, expected)
actual = Series(np.array([1, 2, 3, None], dtype="O")).infer_objects()
expected = Series([1.0, 2.0, 3.0, np.nan])
tm.assert_series_equal(actual, expected)
# only soft conversions, unconvertable pass thru unchanged
actual = Series(np.array([1, 2, 3, None, "a"], dtype="O")).infer_objects()
expected = Series([1, 2, 3, None, "a"])
assert actual.dtype == "object"
tm.assert_series_equal(actual, expected)
@pytest.mark.parametrize(
"data",
[
pd.period_range("2000", periods=4),
pd.IntervalIndex.from_breaks([1, 2, 3, 4]),
],
)
def test_values_compatibility(self, data):
# https://github.com/pandas-dev/pandas/issues/23995
result = pd.Series(data).values
expected = np.array(data.astype(object))
tm.assert_numpy_array_equal(result, expected)
def test_reindex_astype_order_consistency(self):
# GH 17444
s = Series([1, 2, 3], index=[2, 0, 1])
new_index = [0, 1, 2]
temp_dtype = "category"
new_dtype = str
s1 = s.reindex(new_index).astype(temp_dtype).astype(new_dtype)
s2 = s.astype(temp_dtype).reindex(new_index).astype(new_dtype)
tm.assert_series_equal(s1, s2)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test_longdouble.py | <reponame>acrucetta/Chicago_COVI_WebApp
from __future__ import division, absolute_import, print_function
import warnings
import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
LD_INFO = np.finfo(np.longdouble)
longdouble_longer_than_double = (LD_INFO.eps < np.finfo(np.double).eps)
_o = 1 + LD_INFO.eps
string_to_longdouble_inaccurate = (_o != np.longdouble(repr(_o)))
del _o
def test_scalar_extraction():
"""Confirm that extracting a value doesn't convert to python float"""
o = 1 + LD_INFO.eps
a = np.array([o, o, o])
assert_equal(a[1], o)
# Conversions string -> long double
# 0.1 not exactly representable in base 2 floating point.
repr_precision = len(repr(np.longdouble(0.1)))
# +2 from macro block starting around line 842 in scalartypes.c.src.
@pytest.mark.skipif(LD_INFO.precision + 2 >= repr_precision,
reason="repr precision not enough to show eps")
def test_repr_roundtrip():
# We will only see eps in repr if within printing precision.
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o)), o, "repr was %s" % repr(o))
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_repr_roundtrip_bytes():
o = 1 + LD_INFO.eps
assert_equal(np.longdouble(repr(o).encode("ascii")), o)
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
@pytest.mark.parametrize("strtype", (np.str_, np.bytes_, str, bytes))
def test_array_and_stringlike_roundtrip(strtype):
"""
Test that string representations of long-double roundtrip both
for array casting and scalar coercion, see also gh-15608.
"""
o = 1 + LD_INFO.eps
if strtype in (np.bytes_, bytes):
o_str = strtype(repr(o).encode("ascii"))
else:
o_str = strtype(repr(o))
# Test that `o` is correctly coerced from the string-like
assert o == np.longdouble(o_str)
# Test that arrays also roundtrip correctly:
o_strarr = np.asarray([o] * 3, dtype=strtype)
assert (o == o_strarr.astype(np.longdouble)).all()
# And array coercion and casting to string give the same as scalar repr:
assert (o_strarr == o_str).all()
assert (np.asarray([o] * 3).astype(strtype) == o_str).all()
def test_bogus_string():
assert_raises(ValueError, np.longdouble, "spam")
assert_raises(ValueError, np.longdouble, "1.0 flub")
@pytest.mark.skipif(string_to_longdouble_inaccurate, reason="Need strtold_l")
def test_fromstring():
o = 1 + LD_INFO.eps
s = (" " + repr(o))*5
a = np.array([o]*5)
assert_equal(np.fromstring(s, sep=" ", dtype=np.longdouble), a,
err_msg="reading '%s'" % s)
def test_fromstring_complex():
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator
assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
np.array([1., 2., 3., 4.]))
# Real component not specified
assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
np.array([1.]))
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
np.array([1j]))
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
np.array([1., 2., 3.]))
def test_fromstring_empty():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("xxxxx", sep="x"),
np.array([]))
def test_fromstring_missing():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
np.array([1]))
class TestFileBased(object):
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
out = ''.join([repr(t) + '\n' for t in tgt])
def test_fromfile_bogus(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
def test_fromfile_complex(self):
for ctype in ["complex", "cdouble", "cfloat"]:
# Check spacing between separator and only real component specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1, 2 , 3 ,4\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1., 2., 3., 4.]))
# Real component not specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j, -2j, 3j, 4e1j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
# Both components specified
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+2 j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+ 2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1 +2j,3\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+j\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1+\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.]))
# Spaces at wrong places
with temppath() as path:
with open(path, 'wt') as f:
f.write("1j+1\n")
with assert_warns(DeprecationWarning):
res = np.fromfile(path, dtype=ctype, sep=",")
assert_equal(res, np.array([1.j]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.fromfile(path, dtype=np.longdouble, sep="\n")
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_genfromtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.genfromtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_loadtxt(self):
with temppath() as path:
with open(path, 'wt') as f:
f.write(self.out)
res = np.loadtxt(path, dtype=np.longdouble)
assert_equal(res, self.tgt)
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_tofile_roundtrip(self):
with temppath() as path:
self.tgt.tofile(path, sep=" ")
res = np.fromfile(path, dtype=np.longdouble, sep=" ")
assert_equal(res, self.tgt)
# Conversions long double -> string
def test_repr_exact():
o = 1 + LD_INFO.eps
assert_(repr(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_format():
o = 1 + LD_INFO.eps
assert_("{0:.40g}".format(o) != '1')
@pytest.mark.skipif(longdouble_longer_than_double, reason="BUG #2376")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_percent():
o = 1 + LD_INFO.eps
assert_("%.40g" % o != '1')
@pytest.mark.skipif(longdouble_longer_than_double,
reason="array repr problem")
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_array_repr():
o = 1 + LD_INFO.eps
a = np.array([o])
b = np.array([1], dtype=np.longdouble)
if not np.all(a != b):
raise ValueError("precision loss creating arrays")
assert_(repr(a) != repr(b))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_repr_roundtrip_foreign(self):
o = 1.5
assert_equal(o, np.longdouble(repr(o)))
def test_fromstring_foreign_repr(self):
f = 1.234
a = np.fromstring(repr(f), dtype=float, sep=" ")
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
np.array([1.]))
def test_fromstring_best_effort(self):
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
a = np.fromstring(s, dtype=np.longdouble, sep=" ")
assert_equal(a[0], np.longdouble(s))
def test_fromstring_foreign_sep(self):
a = np.array([1, 2, 3, 4])
b = np.fromstring("1,2,3,4,", dtype=np.longdouble, sep=",")
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
with assert_warns(DeprecationWarning):
b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
assert_array_equal(b[0], 1)
@pytest.mark.parametrize("int_val", [
# cases discussed in gh-10723
# and gh-9968
2 ** 1024, 0])
def test_longdouble_from_int(int_val):
# for issue gh-9968
str_val = str(int_val)
# we'll expect a RuntimeWarning on platforms
# with np.longdouble equivalent to np.double
# for large integer input
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
# can be inf==inf on some platforms
assert np.longdouble(int_val) == np.longdouble(str_val)
# we can't directly compare the int and
# max longdouble value on all platforms
if np.allclose(np.finfo(np.longdouble).max,
np.finfo(np.double).max) and w:
assert w[0].category is RuntimeWarning
@pytest.mark.parametrize("bool_val", [
True, False])
def test_longdouble_from_bool(bool_val):
assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/fcompiler/pathf95.py | <filename>env/lib/python3.8/site-packages/numpy/distutils/fcompiler/pathf95.py<gh_stars>1000+
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['PathScaleFCompiler']
class PathScaleFCompiler(FCompiler):
compiler_type = 'pathf95'
description = 'PathScale Fortran Compiler'
version_pattern = r'PathScale\(TM\) Compiler Suite: Version (?P<version>[\d.]+)'
executables = {
'version_cmd' : ["pathf95", "-version"],
'compiler_f77' : ["pathf95", "-fixedform"],
'compiler_fix' : ["pathf95", "-fixedform"],
'compiler_f90' : ["pathf95"],
'linker_so' : ["pathf95", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fPIC']
module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_opt(self):
return ['-O3']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
print(customized_fcompiler(compiler='pathf95').get_version())
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/frame/test_cumulative.py | """
Tests for DataFrame cumulative operations
See also
--------
tests.series.test_cumulative
"""
import numpy as np
import pytest
from pandas import DataFrame, Series, _is_numpy_dev
import pandas._testing as tm
class TestDataFrameCumulativeOps:
# ---------------------------------------------------------------------
# Cumulative Operations - cumsum, cummax, ...
def test_cumsum_corner(self):
dm = DataFrame(np.arange(20).reshape(4, 5), index=range(4), columns=range(5))
# TODO(wesm): do something with this?
result = dm.cumsum() # noqa
def test_cumsum(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumsum = datetime_frame.cumsum()
expected = datetime_frame.apply(Series.cumsum)
tm.assert_frame_equal(cumsum, expected)
# axis = 1
cumsum = datetime_frame.cumsum(axis=1)
expected = datetime_frame.apply(Series.cumsum, axis=1)
tm.assert_frame_equal(cumsum, expected)
# works
df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
df.cumsum()
# fix issue
cumsum_xs = datetime_frame.cumsum(axis=1)
assert np.shape(cumsum_xs) == np.shape(datetime_frame)
def test_cumprod(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cumprod = datetime_frame.cumprod()
expected = datetime_frame.apply(Series.cumprod)
tm.assert_frame_equal(cumprod, expected)
# axis = 1
cumprod = datetime_frame.cumprod(axis=1)
expected = datetime_frame.apply(Series.cumprod, axis=1)
tm.assert_frame_equal(cumprod, expected)
# fix issue
cumprod_xs = datetime_frame.cumprod(axis=1)
assert np.shape(cumprod_xs) == np.shape(datetime_frame)
# ints
df = datetime_frame.fillna(0).astype(int)
df.cumprod(0)
df.cumprod(1)
# ints32
df = datetime_frame.fillna(0).astype(np.int32)
df.cumprod(0)
df.cumprod(1)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummin(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummin = datetime_frame.cummin()
expected = datetime_frame.apply(Series.cummin)
tm.assert_frame_equal(cummin, expected)
# axis = 1
cummin = datetime_frame.cummin(axis=1)
expected = datetime_frame.apply(Series.cummin, axis=1)
tm.assert_frame_equal(cummin, expected)
# it works
df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
df.cummin()
# fix issue
cummin_xs = datetime_frame.cummin(axis=1)
assert np.shape(cummin_xs) == np.shape(datetime_frame)
@pytest.mark.xfail(
_is_numpy_dev,
reason="https://github.com/pandas-dev/pandas/issues/31992",
strict=False,
)
def test_cummax(self, datetime_frame):
datetime_frame.loc[5:10, 0] = np.nan
datetime_frame.loc[10:15, 1] = np.nan
datetime_frame.loc[15:, 2] = np.nan
# axis = 0
cummax = datetime_frame.cummax()
expected = datetime_frame.apply(Series.cummax)
tm.assert_frame_equal(cummax, expected)
# axis = 1
cummax = datetime_frame.cummax(axis=1)
expected = datetime_frame.apply(Series.cummax, axis=1)
tm.assert_frame_equal(cummax, expected)
# it works
df = DataFrame({"A": np.arange(20)}, index=np.arange(20))
df.cummax()
# fix issue
cummax_xs = datetime_frame.cummax(axis=1)
assert np.shape(cummax_xs) == np.shape(datetime_frame)
def test_cumulative_ops_preserve_dtypes(self):
# GH#19296 dont incorrectly upcast to object
df = DataFrame({"A": [1, 2, 3], "B": [1, 2, 3.0], "C": [True, False, False]})
result = df.cumsum()
expected = DataFrame(
{
"A": Series([1, 3, 6], dtype=np.int64),
"B": Series([1, 3, 6], dtype=np.float64),
"C": df["C"].cumsum(),
}
)
tm.assert_frame_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/layer/_fill.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="fill", parent_name="layout.mapbox.layer", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Fill"),
data_docs=kwargs.pop(
"data_docs",
"""
outlinecolor
Sets the fill outline color
(mapbox.layer.paint.fill-outline-color). Has an
effect only when `type` is set to "fill".
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py | <filename>env/lib/python3.8/site-packages/numpy/distutils/tests/test_fcompiler_nagfor.py
from __future__ import division, absolute_import, print_function
from numpy.testing import assert_
import numpy.distutils.fcompiler
nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
'6.2(Chiyoda) Build 6200', '6.2'),
('nagfor', 'NAG Fortran Compiler Release '
'6.1(Tozai) Build 6136', '6.1'),
('nagfor', 'NAG Fortran Compiler Release '
'6.0(Hibiya) Build 1021', '6.0'),
('nagfor', 'NAG Fortran Compiler Release '
'5.3.2(971)', '5.3.2'),
('nag', 'NAGWare Fortran 95 compiler Release 5.1'
'(347,355-367,375,380-383,389,394,399,401-402,407,'
'431,435,437,446,459-460,463,472,494,496,503,508,'
'511,517,529,555,557,565)', '5.1')]
class TestNagFCompilerVersions(object):
def test_version_match(self):
for comp, vs, version in nag_version_strings:
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
v = fc.version_match(vs)
assert_(v == version)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/layer/_coordinates.py | import _plotly_utils.basevalidators
class CoordinatesValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="coordinates", parent_name="layout.mapbox.layer", **kwargs
):
super(CoordinatesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test_abc.py | from __future__ import division, absolute_import, print_function
from numpy.testing import assert_
import numbers
import numpy as np
from numpy.core.numerictypes import sctypes
class TestABC(object):
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
assert_(issubclass(np.inexact, numbers.Complex))
assert_(issubclass(np.complexfloating, numbers.Complex))
assert_(issubclass(np.floating, numbers.Real))
assert_(issubclass(np.integer, numbers.Integral))
assert_(issubclass(np.signedinteger, numbers.Integral))
assert_(issubclass(np.unsignedinteger, numbers.Integral))
def test_floats(self):
for t in sctypes['float']:
assert_(isinstance(t(), numbers.Real),
"{0} is not instance of Real".format(t.__name__))
assert_(issubclass(t, numbers.Real),
"{0} is not subclass of Real".format(t.__name__))
assert_(not isinstance(t(), numbers.Rational),
"{0} is instance of Rational".format(t.__name__))
assert_(not issubclass(t, numbers.Rational),
"{0} is subclass of Rational".format(t.__name__))
def test_complex(self):
for t in sctypes['complex']:
assert_(isinstance(t(), numbers.Complex),
"{0} is not instance of Complex".format(t.__name__))
assert_(issubclass(t, numbers.Complex),
"{0} is not subclass of Complex".format(t.__name__))
assert_(not isinstance(t(), numbers.Real),
"{0} is instance of Real".format(t.__name__))
assert_(not issubclass(t, numbers.Real),
"{0} is subclass of Real".format(t.__name__))
def test_int(self):
for t in sctypes['int']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
def test_uint(self):
for t in sctypes['uint']:
assert_(isinstance(t(), numbers.Integral),
"{0} is not instance of Integral".format(t.__name__))
assert_(issubclass(t, numbers.Integral),
"{0} is not subclass of Integral".format(t.__name__))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_newshape.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
import _plotly_utils.basevalidators
class NewshapeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="newshape", parent_name="layout", **kwargs):
super(NewshapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Newshape"),
data_docs=kwargs.pop(
"data_docs",
"""
drawdirection
When `dragmode` is set to "drawrect",
"drawline" or "drawcircle" this limits the drag
to be horizontal, vertical or diagonal. Using
"diagonal" there is no limit e.g. in drawing
lines in any direction. "ortho" limits the draw
to be either horizontal or vertical.
"horizontal" allows horizontal extend.
"vertical" allows vertical extend.
fillcolor
Sets the color filling new shapes' interior.
Please note that if using a fillcolor with
alpha greater than half, drag inside the active
shape starts moving the shape underneath,
otherwise a new shape could be started over.
fillrule
Determines the path's interior. For more info
please visit https://developer.mozilla.org/en-
US/docs/Web/SVG/Attribute/fill-rule
layer
Specifies whether new shapes are drawn below or
above traces.
line
:class:`plotly.graph_objects.layout.newshape.Li
ne` instance or dict with compatible properties
opacity
Sets the opacity of new shapes.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test_issue14735.py | import pytest
import warnings
import numpy as np
class Wrapper:
def __init__(self, array):
self.array = array
def __len__(self):
return len(self.array)
def __getitem__(self, item):
return type(self)(self.array[item])
def __getattr__(self, name):
if name.startswith("__array_"):
warnings.warn("object got converted", UserWarning, stacklevel=1)
return getattr(self.array, name)
def __repr__(self):
return "<Wrapper({self.array})>".format(self=self)
@pytest.mark.filterwarnings("error")
def test_getattr_warning():
array = Wrapper(np.arange(10))
with pytest.raises(UserWarning, match="object got converted"):
np.asarray(array)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/polar/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._angularaxis import AngularAxis
from ._domain import Domain
from ._radialaxis import RadialAxis
from . import angularaxis
from . import radialaxis
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".angularaxis", ".radialaxis"],
["._angularaxis.AngularAxis", "._domain.Domain", "._radialaxis.RadialAxis"],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexes/conftest.py | <filename>env/lib/python3.8/site-packages/pandas/tests/indexes/conftest.py
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.indexes.api import Index, MultiIndex
indices_dict = {
"unicode": tm.makeUnicodeIndex(100),
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
"uint": tm.makeUIntIndex(100),
"range": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"bool": Index([True, False]),
"categorical": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
"repeats": Index([0, 0, 1, 1, 2, 2]),
}
@pytest.fixture(params=indices_dict.keys())
def indices(request):
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
@pytest.fixture(params=[1, np.array(1, dtype=np.int64)])
def one(request):
# zero-dim integer array behaves like an integer
return request.param
zeros = [
box([0] * 5, dtype=dtype)
for box in [pd.Index, np.array]
for dtype in [np.int64, np.uint64, np.float64]
]
zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]])
zeros.extend([0, 0.0])
@pytest.fixture(params=zeros)
def zero(request):
# For testing division by (or of) zero for Index with length 5, this
# gives several scalar-zeros and length-5 vector-zeros
return request.param
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/rangeselector/button/_step.py | import _plotly_utils.basevalidators
class StepValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="step",
parent_name="layout.xaxis.rangeselector.button",
**kwargs
):
super(StepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values", ["month", "year", "day", "hour", "minute", "second", "all"]
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/grid/__init__.py | import sys
if sys.version_info < (3, 7):
from ._yside import YsideValidator
from ._ygap import YgapValidator
from ._yaxes import YaxesValidator
from ._xside import XsideValidator
from ._xgap import XgapValidator
from ._xaxes import XaxesValidator
from ._subplots import SubplotsValidator
from ._rows import RowsValidator
from ._roworder import RoworderValidator
from ._pattern import PatternValidator
from ._domain import DomainValidator
from ._columns import ColumnsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yside.YsideValidator",
"._ygap.YgapValidator",
"._yaxes.YaxesValidator",
"._xside.XsideValidator",
"._xgap.XgapValidator",
"._xaxes.XaxesValidator",
"._subplots.SubplotsValidator",
"._rows.RowsValidator",
"._roworder.RoworderValidator",
"._pattern.PatternValidator",
"._domain.DomainValidator",
"._columns.ColumnsValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/axis/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/axis/__init__.py
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._ticklen import TicklenValidator
from ._tickformatstopdefaults import TickformatstopdefaultsValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._range import RangeValidator
from ._nticks import NticksValidator
from ._exponentformat import ExponentformatValidator
from ._dtick import DtickValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._tickwidth.TickwidthValidator",
"._tickvalssrc.TickvalssrcValidator",
"._tickvals.TickvalsValidator",
"._ticktextsrc.TicktextsrcValidator",
"._ticktext.TicktextValidator",
"._ticksuffix.TicksuffixValidator",
"._ticks.TicksValidator",
"._tickprefix.TickprefixValidator",
"._tickmode.TickmodeValidator",
"._ticklen.TicklenValidator",
"._tickformatstopdefaults.TickformatstopdefaultsValidator",
"._tickformatstops.TickformatstopsValidator",
"._tickformat.TickformatValidator",
"._tickfont.TickfontValidator",
"._tickcolor.TickcolorValidator",
"._tickangle.TickangleValidator",
"._tick0.Tick0Validator",
"._showticksuffix.ShowticksuffixValidator",
"._showtickprefix.ShowtickprefixValidator",
"._showticklabels.ShowticklabelsValidator",
"._showexponent.ShowexponentValidator",
"._separatethousands.SeparatethousandsValidator",
"._range.RangeValidator",
"._nticks.NticksValidator",
"._exponentformat.ExponentformatValidator",
"._dtick.DtickValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/distutils/fcompiler/pg.py | <reponame>acrucetta/Chicago_COVI_WebApp
# http://www.pgroup.com
from __future__ import division, absolute_import, print_function
import sys
from numpy.distutils.fcompiler import FCompiler, dummy_fortran_file
from sys import platform
from os.path import join, dirname, normpath
compilers = ['PGroupFCompiler', 'PGroupFlangCompiler']
class PGroupFCompiler(FCompiler):
compiler_type = 'pg'
description = 'Portland Group Fortran Compiler'
version_pattern = r'\s*pg(f77|f90|hpf|fortran) (?P<version>[\d.-]+).*'
if platform == 'darwin':
executables = {
'version_cmd': ["<F77>", "-V"],
'compiler_f77': ["pgfortran", "-dynamiclib"],
'compiler_fix': ["pgfortran", "-Mfixed", "-dynamiclib"],
'compiler_f90': ["pgfortran", "-dynamiclib"],
'linker_so': ["libtool"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['']
else:
executables = {
'version_cmd': ["<F77>", "-V"],
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
'linker_so': ["pgfortran"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-module '
module_include_switch = '-I'
def get_flags(self):
opt = ['-Minform=inform', '-Mnosecond_underscore']
return self.pic_flags + opt
def get_flags_opt(self):
return ['-fast']
def get_flags_debug(self):
return ['-g']
if platform == 'darwin':
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
else:
def get_flags_linker_so(self):
return ["-shared", '-fpic']
def runtime_library_dir_option(self, dir):
return '-R%s' % dir
if sys.version_info >= (3, 5):
import functools
class PGroupFlangCompiler(FCompiler):
compiler_type = 'flang'
description = 'Portland Group Fortran LLVM Compiler'
version_pattern = r'\s*(flang|clang) version (?P<version>[\d.-]+).*'
ar_exe = 'lib.exe'
possible_executables = ['flang']
executables = {
'version_cmd': ["<F77>", "--version"],
'compiler_f77': ["flang"],
'compiler_fix': ["flang"],
'compiler_f90': ["flang"],
'linker_so': [None],
'archiver': [ar_exe, "/verbose", "/OUT:"],
'ranlib': None
}
library_switch = '/OUT:' # No space after /OUT:!
module_dir_switch = '-module ' # Don't remove ending space!
def get_libraries(self):
opt = FCompiler.get_libraries(self)
opt.extend(['flang', 'flangrti', 'ompstub'])
return opt
@functools.lru_cache(maxsize=128)
def get_library_dirs(self):
"""List of compiler library directories."""
opt = FCompiler.get_library_dirs(self)
flang_dir = dirname(self.executables['compiler_f77'][0])
opt.append(normpath(join(flang_dir, '..', 'lib')))
return opt
def get_flags(self):
return []
def get_flags_free(self):
return []
def get_flags_debug(self):
return ['-g']
def get_flags_opt(self):
return ['-O3']
def get_flags_arch(self):
return []
def runtime_library_dir_option(self, dir):
raise NotImplementedError
else:
from numpy.distutils.fcompiler import CompilerNotFound
# No point in supporting on older Pythons because not ABI compatible
class PGroupFlangCompiler(FCompiler):
compiler_type = 'flang'
description = 'Portland Group Fortran LLVM Compiler'
def get_version(self):
raise CompilerNotFound('Flang unsupported on Python < 3.5')
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
from numpy.distutils import customized_fcompiler
if 'flang' in sys.argv:
print(customized_fcompiler(compiler='flang').get_version())
else:
print(customized_fcompiler(compiler='pg').get_version())
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/_plotly_utils/colors/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
"""
colors
=====
Functions that manipulate colors and arrays of colors.
-----
There are three basic types of color types: rgb, hex and tuple:
rgb - An rgb color is a string of the form 'rgb(a,b,c)' where a, b and c are
integers between 0 and 255 inclusive.
hex - A hex color is a string of the form '#xxxxxx' where each x is a
character that belongs to the set [0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f]. This is
just the set of characters used in the hexadecimal numeric system.
tuple - A tuple color is a 3-tuple of the form (a,b,c) where a, b and c are
floats between 0 and 1 inclusive.
-----
Colormaps and Colorscales:
A colormap or a colorscale is a correspondence between values - Pythonic
objects such as strings and floats - to colors.
There are typically two main types of colormaps that exist: numerical and
categorical colormaps.
Numerical:
----------
Numerical colormaps are used when the coloring column being used takes a
spectrum of values or numbers.
A classic example from the Plotly library:
```
rainbow_colorscale = [
[0, 'rgb(150,0,90)'], [0.125, 'rgb(0,0,200)'],
[0.25, 'rgb(0,25,255)'], [0.375, 'rgb(0,152,255)'],
[0.5, 'rgb(44,255,150)'], [0.625, 'rgb(151,255,0)'],
[0.75, 'rgb(255,234,0)'], [0.875, 'rgb(255,111,0)'],
[1, 'rgb(255,0,0)']
]
```
Notice that this colorscale is a list of lists with each inner list containing
a number and a color. These left hand numbers in the nested lists go from 0 to
1, and they are like pointers tell you when a number is mapped to a specific
color.
If you have a column of numbers `col_num` that you want to plot, and you know
```
min(col_num) = 0
max(col_num) = 100
```
then if you pull out the number `12.5` in the list and want to figure out what
color the corresponding chart element (bar, scatter plot, etc) is going to be,
you'll figure out that proportionally 12.5 to 100 is the same as 0.125 to 1.
So, the point will be mapped to 'rgb(0,0,200)'.
All other colors between the pinned values in a colorscale are linearly
interpolated.
Categorical:
------------
Alternatively, a categorical colormap is used to assign a specific value in a
color column to a specific color everytime it appears in the dataset.
A column of strings in a panadas.dataframe that is chosen to serve as the
color index would naturally use a categorical colormap. However, you can
choose to use a categorical colormap with a column of numbers.
Be careful! If you have a lot of unique numbers in your color column you will
end up with a colormap that is massive and may slow down graphing performance.
"""
from __future__ import absolute_import
import decimal
from numbers import Number
import six
from _plotly_utils import exceptions
# Built-in qualitative color sequences and sequential,
# diverging and cyclical color scales.
#
# Initially ported over from plotly_express
from . import ( # noqa: F401
qualitative,
sequential,
diverging,
cyclical,
cmocean,
colorbrewer,
carto,
plotlyjs,
)
DEFAULT_PLOTLY_COLORS = [
"rgb(31, 119, 180)",
"rgb(255, 127, 14)",
"rgb(44, 160, 44)",
"rgb(214, 39, 40)",
"rgb(148, 103, 189)",
"rgb(140, 86, 75)",
"rgb(227, 119, 194)",
"rgb(127, 127, 127)",
"rgb(188, 189, 34)",
"rgb(23, 190, 207)",
]
PLOTLY_SCALES = {
"Greys": [[0, "rgb(0,0,0)"], [1, "rgb(255,255,255)"]],
"YlGnBu": [
[0, "rgb(8,29,88)"],
[0.125, "rgb(37,52,148)"],
[0.25, "rgb(34,94,168)"],
[0.375, "rgb(29,145,192)"],
[0.5, "rgb(65,182,196)"],
[0.625, "rgb(127,205,187)"],
[0.75, "rgb(199,233,180)"],
[0.875, "rgb(237,248,217)"],
[1, "rgb(255,255,217)"],
],
"Greens": [
[0, "rgb(0,68,27)"],
[0.125, "rgb(0,109,44)"],
[0.25, "rgb(35,139,69)"],
[0.375, "rgb(65,171,93)"],
[0.5, "rgb(116,196,118)"],
[0.625, "rgb(161,217,155)"],
[0.75, "rgb(199,233,192)"],
[0.875, "rgb(229,245,224)"],
[1, "rgb(247,252,245)"],
],
"YlOrRd": [
[0, "rgb(128,0,38)"],
[0.125, "rgb(189,0,38)"],
[0.25, "rgb(227,26,28)"],
[0.375, "rgb(252,78,42)"],
[0.5, "rgb(253,141,60)"],
[0.625, "rgb(254,178,76)"],
[0.75, "rgb(254,217,118)"],
[0.875, "rgb(255,237,160)"],
[1, "rgb(255,255,204)"],
],
"Bluered": [[0, "rgb(0,0,255)"], [1, "rgb(255,0,0)"]],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
"RdBu": [
[0, "rgb(5,10,172)"],
[0.35, "rgb(106,137,247)"],
[0.5, "rgb(190,190,190)"],
[0.6, "rgb(220,170,132)"],
[0.7, "rgb(230,145,90)"],
[1, "rgb(178,10,28)"],
],
# Scale for non-negative numeric values
"Reds": [
[0, "rgb(220,220,220)"],
[0.2, "rgb(245,195,157)"],
[0.4, "rgb(245,160,105)"],
[1, "rgb(178,10,28)"],
],
# Scale for non-positive numeric values
"Blues": [
[0, "rgb(5,10,172)"],
[0.35, "rgb(40,60,190)"],
[0.5, "rgb(70,100,245)"],
[0.6, "rgb(90,120,245)"],
[0.7, "rgb(106,137,247)"],
[1, "rgb(220,220,220)"],
],
"Picnic": [
[0, "rgb(0,0,255)"],
[0.1, "rgb(51,153,255)"],
[0.2, "rgb(102,204,255)"],
[0.3, "rgb(153,204,255)"],
[0.4, "rgb(204,204,255)"],
[0.5, "rgb(255,255,255)"],
[0.6, "rgb(255,204,255)"],
[0.7, "rgb(255,153,255)"],
[0.8, "rgb(255,102,204)"],
[0.9, "rgb(255,102,102)"],
[1, "rgb(255,0,0)"],
],
"Rainbow": [
[0, "rgb(150,0,90)"],
[0.125, "rgb(0,0,200)"],
[0.25, "rgb(0,25,255)"],
[0.375, "rgb(0,152,255)"],
[0.5, "rgb(44,255,150)"],
[0.625, "rgb(151,255,0)"],
[0.75, "rgb(255,234,0)"],
[0.875, "rgb(255,111,0)"],
[1, "rgb(255,0,0)"],
],
"Portland": [
[0, "rgb(12,51,131)"],
[0.25, "rgb(10,136,186)"],
[0.5, "rgb(242,211,56)"],
[0.75, "rgb(242,143,56)"],
[1, "rgb(217,30,30)"],
],
"Jet": [
[0, "rgb(0,0,131)"],
[0.125, "rgb(0,60,170)"],
[0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"],
[0.875, "rgb(250,0,0)"],
[1, "rgb(128,0,0)"],
],
"Hot": [
[0, "rgb(0,0,0)"],
[0.3, "rgb(230,0,0)"],
[0.6, "rgb(255,210,0)"],
[1, "rgb(255,255,255)"],
],
"Blackbody": [
[0, "rgb(0,0,0)"],
[0.2, "rgb(230,0,0)"],
[0.4, "rgb(230,210,0)"],
[0.7, "rgb(255,255,255)"],
[1, "rgb(160,200,255)"],
],
"Earth": [
[0, "rgb(0,0,130)"],
[0.1, "rgb(0,180,180)"],
[0.2, "rgb(40,210,40)"],
[0.4, "rgb(230,230,50)"],
[0.6, "rgb(120,70,20)"],
[1, "rgb(255,255,255)"],
],
"Electric": [
[0, "rgb(0,0,0)"],
[0.15, "rgb(30,0,100)"],
[0.4, "rgb(120,0,100)"],
[0.6, "rgb(160,90,0)"],
[0.8, "rgb(230,200,0)"],
[1, "rgb(255,250,220)"],
],
"Viridis": [
[0, "#440154"],
[0.06274509803921569, "#48186a"],
[0.12549019607843137, "#472d7b"],
[0.18823529411764706, "#424086"],
[0.25098039215686274, "#3b528b"],
[0.3137254901960784, "#33638d"],
[0.3764705882352941, "#2c728e"],
[0.4392156862745098, "#26828e"],
[0.5019607843137255, "#21918c"],
[0.5647058823529412, "#1fa088"],
[0.6274509803921569, "#28ae80"],
[0.6901960784313725, "#3fbc73"],
[0.7529411764705882, "#5ec962"],
[0.8156862745098039, "#84d44b"],
[0.8784313725490196, "#addc30"],
[0.9411764705882353, "#d8e219"],
[1, "#fde725"],
],
"Cividis": [
[0.000000, "rgb(0,32,76)"],
[0.058824, "rgb(0,42,102)"],
[0.117647, "rgb(0,52,110)"],
[0.176471, "rgb(39,63,108)"],
[0.235294, "rgb(60,74,107)"],
[0.294118, "rgb(76,85,107)"],
[0.352941, "rgb(91,95,109)"],
[0.411765, "rgb(104,106,112)"],
[0.470588, "rgb(117,117,117)"],
[0.529412, "rgb(131,129,120)"],
[0.588235, "rgb(146,140,120)"],
[0.647059, "rgb(161,152,118)"],
[0.705882, "rgb(176,165,114)"],
[0.764706, "rgb(192,177,109)"],
[0.823529, "rgb(209,191,102)"],
[0.882353, "rgb(225,204,92)"],
[0.941176, "rgb(243,219,79)"],
[1.000000, "rgb(255,233,69)"],
],
}
def color_parser(colors, function):
"""
Takes color(s) and a function and applies the function on the color(s)
In particular, this function identifies whether the given color object
is an iterable or not and applies the given color-parsing function to
the color or iterable of colors. If given an iterable, it will only be
able to work with it if all items in the iterable are of the same type
- rgb string, hex string or tuple
"""
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, "__iter__"):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
def validate_colors(colors, colortype="tuple"):
"""
Validates color(s) and returns a list of color(s) of a specified type
"""
from numbers import Number
if colors is None:
colors = DEFAULT_PLOTLY_COLORS
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
colors_list = colorscale_to_colors(PLOTLY_SCALES[colors])
# TODO: fix _gantt.py/_scatter.py so that they can accept the
# actual colorscale and not just a list of the first and last
# color in the plotly colorscale. In resolving this issue we
# will be removing the immediate line below
colors = [colors_list[0]] + [colors_list[-1]]
elif "rgb" in colors or "#" in colors:
colors = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color."
)
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors = [colors]
else:
colors = list(colors)
# convert color elements in list to tuple color
for j, each_color in enumerate(colors):
if "rgb" in each_color:
each_color = color_parser(each_color, unlabel_rgb)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
each_color = color_parser(each_color, unconvert_from_RGB_255)
colors[j] = each_color
if "#" in each_color:
each_color = color_parser(each_color, hex_to_rgb)
each_color = color_parser(each_color, unconvert_from_RGB_255)
colors[j] = each_color
if isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
colors[j] = each_color
if colortype == "rgb" and not isinstance(colors, six.string_types):
for j, each_color in enumerate(colors):
rgb_color = color_parser(each_color, convert_to_RGB_255)
colors[j] = color_parser(rgb_color, label_rgb)
return colors
def validate_colors_dict(colors, colortype="tuple"):
"""
Validates dictioanry of color(s)
"""
# validate each color element in the dictionary
for key in colors:
if "rgb" in colors[key]:
colors[key] = color_parser(colors[key], unlabel_rgb)
for value in colors[key]:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
colors[key] = color_parser(colors[key], unconvert_from_RGB_255)
if "#" in colors[key]:
colors[key] = color_parser(colors[key], hex_to_rgb)
colors[key] = color_parser(colors[key], unconvert_from_RGB_255)
if isinstance(colors[key], tuple):
for value in colors[key]:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
if colortype == "rgb":
for key in colors:
colors[key] = color_parser(colors[key], convert_to_RGB_255)
colors[key] = color_parser(colors[key], label_rgb)
return colors
def convert_colors_to_same_type(
colors,
colortype="rgb",
scale=None,
return_default_colors=False,
num_of_defualt_colors=2,
):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors, as well as a list of scale
values, and outputs a 2-pair of the list of color(s) converted all to an
rgb or tuple color type, aswell as the scale as the second element. If
colors is a Plotly Scale name, then 'scale' will be forced to the scale
from the respective colorscale and the colors in that colorscale will also
be coverted to the selected colortype. If colors is None, then there is an
option to return portion of the DEFAULT_PLOTLY_COLORS
:param (str|tuple|list) colors: either a plotly scale name, an rgb or hex
color, a color tuple or a list/tuple of colors
:param (list) scale: see docs for validate_scale_values()
:rtype (tuple) (colors_list, scale) if scale is None in the function call,
then scale will remain None in the returned tuple
"""
colors_list = []
if colors is None and return_default_colors is True:
colors_list = DEFAULT_PLOTLY_COLORS[0:num_of_defualt_colors]
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
colors_list = colorscale_to_colors(PLOTLY_SCALES[colors])
if scale is None:
scale = colorscale_to_scale(PLOTLY_SCALES[colors])
elif "rgb" in colors or "#" in colors:
colors_list = [colors]
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
elif isinstance(colors, list):
colors_list = colors
# validate scale
if scale is not None:
validate_scale_values(scale)
if len(colors_list) != len(scale):
raise exceptions.PlotlyError(
"Make sure that the length of your scale matches the length "
"of your list of colors which is {}.".format(len(colors_list))
)
# convert all colors to rgb
for j, each_color in enumerate(colors_list):
if "#" in each_color:
each_color = color_parser(each_color, hex_to_rgb)
each_color = color_parser(each_color, label_rgb)
colors_list[j] = each_color
elif isinstance(each_color, tuple):
each_color = color_parser(each_color, convert_to_RGB_255)
each_color = color_parser(each_color, label_rgb)
colors_list[j] = each_color
if colortype == "rgb":
return (colors_list, scale)
elif colortype == "tuple":
for j, each_color in enumerate(colors_list):
each_color = color_parser(each_color, unlabel_rgb)
each_color = color_parser(each_color, unconvert_from_RGB_255)
colors_list[j] = each_color
return (colors_list, scale)
else:
raise exceptions.PlotlyError(
"You must select either rgb or tuple " "for your colortype variable."
)
def convert_dict_colors_to_same_type(colors_dict, colortype="rgb"):
"""
Converts a colors in a dictioanry of colors to the specified color type
:param (dict) colors_dict: a dictioanry whose values are single colors
"""
for key in colors_dict:
if "#" in colors_dict[key]:
colors_dict[key] = color_parser(colors_dict[key], hex_to_rgb)
colors_dict[key] = color_parser(colors_dict[key], label_rgb)
elif isinstance(colors_dict[key], tuple):
colors_dict[key] = color_parser(colors_dict[key], convert_to_RGB_255)
colors_dict[key] = color_parser(colors_dict[key], label_rgb)
if colortype == "rgb":
return colors_dict
elif colortype == "tuple":
for key in colors_dict:
colors_dict[key] = color_parser(colors_dict[key], unlabel_rgb)
colors_dict[key] = color_parser(colors_dict[key], unconvert_from_RGB_255)
return colors_dict
else:
raise exceptions.PlotlyError(
"You must select either rgb or tuple " "for your colortype variable."
)
def validate_scale_values(scale):
"""
Validates scale values from a colorscale
:param (list) scale: a strictly increasing list of floats that begins
with 0 and ends with 1. Its usage derives from a colorscale which is
a list of two-lists (a list with two elements) of the form
[value, color] which are used to determine how interpolation weighting
works between the colors in the colorscale. Therefore scale is just
the extraction of these values from the two-lists in order
"""
if len(scale) < 2:
raise exceptions.PlotlyError(
"You must input a list of scale values " "that has at least two values."
)
if (scale[0] != 0) or (scale[-1] != 1):
raise exceptions.PlotlyError(
"The first and last number in your scale must be 0.0 and 1.0 "
"respectively."
)
if not all(x < y for x, y in zip(scale, scale[1:])):
raise exceptions.PlotlyError(
"'scale' must be a list that contains a strictly increasing "
"sequence of numbers."
)
def validate_colorscale(colorscale):
"""Validate the structure, scale values and colors of colorscale."""
if not isinstance(colorscale, list):
# TODO Write tests for these exceptions
raise exceptions.PlotlyError("A valid colorscale must be a list.")
if not all(isinstance(innerlist, list) for innerlist in colorscale):
raise exceptions.PlotlyError("A valid colorscale must be a list of lists.")
colorscale_colors = colorscale_to_colors(colorscale)
scale_values = colorscale_to_scale(colorscale)
validate_scale_values(scale_values)
validate_colors(colorscale_colors)
def make_colorscale(colors, scale=None):
"""
Makes a colorscale from a list of colors and a scale
Takes a list of colors and scales and constructs a colorscale based
on the colors in sequential order. If 'scale' is left empty, a linear-
interpolated colorscale will be generated. If 'scale' is a specificed
list, it must be the same legnth as colors and must contain all floats
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
:param (list) colors: a list of single colors
"""
colorscale = []
# validate minimum colors length of 2
if len(colors) < 2:
raise exceptions.PlotlyError(
"You must input a list of colors that " "has at least two colors."
)
if scale is None:
scale_incr = 1.0 / (len(colors) - 1)
return [[i * scale_incr, color] for i, color in enumerate(colors)]
else:
if len(colors) != len(scale):
raise exceptions.PlotlyError(
"The length of colors and scale " "must be the same."
)
validate_scale_values(scale)
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
def find_intermediate_color(lowcolor, highcolor, intermed, colortype="tuple"):
"""
Returns the color at a given distance between two colors
This function takes two color tuples, where each element is between 0
and 1, along with a value 0 < intermed < 1 and returns a color that is
intermed-percent from lowcolor to highcolor. If colortype is set to 'rgb',
the function will automatically convert the rgb type to a tuple, find the
intermediate color and return it as an rgb color.
"""
if colortype == "rgb":
# convert to tuple color, eg. (1, 0.45, 0.7)
lowcolor = unlabel_rgb(lowcolor)
highcolor = unlabel_rgb(highcolor)
diff_0 = float(highcolor[0] - lowcolor[0])
diff_1 = float(highcolor[1] - lowcolor[1])
diff_2 = float(highcolor[2] - lowcolor[2])
inter_med_tuple = (
lowcolor[0] + intermed * diff_0,
lowcolor[1] + intermed * diff_1,
lowcolor[2] + intermed * diff_2,
)
if colortype == "rgb":
# back to an rgb string, e.g. rgb(30, 20, 10)
inter_med_rgb = label_rgb(inter_med_tuple)
return inter_med_rgb
return inter_med_tuple
def unconvert_from_RGB_255(colors):
"""
Return a tuple where each element gets divided by 255
Takes a (list of) color tuple(s) where each element is between 0 and
255. Returns the same tuples where each tuple element is normalized to
a value between 0 and 1
"""
return (colors[0] / (255.0), colors[1] / (255.0), colors[2] / (255.0))
def convert_to_RGB_255(colors):
"""
Multiplies each element of a triplet by 255
Each coordinate of the color tuple is rounded to the nearest float and
then is turned into an integer. If a number is of the form x.5, then
if x is odd, the number rounds up to (x+1). Otherwise, it rounds down
to just x. This is the way rounding works in Python 3 and in current
statistical analysis to avoid rounding bias
:param (list) rgb_components: grabs the three R, G and B values to be
returned as computed in the function
"""
rgb_components = []
for component in colors:
rounded_num = decimal.Decimal(str(component * 255.0)).quantize(
decimal.Decimal("1"), rounding=decimal.ROUND_HALF_EVEN
)
# convert rounded number to an integer from 'Decimal' form
rounded_num = int(rounded_num)
rgb_components.append(rounded_num)
return (rgb_components[0], rgb_components[1], rgb_components[2])
def n_colors(lowcolor, highcolor, n_colors, colortype="tuple"):
"""
Splits a low and high color into a list of n_colors colors in it
Accepts two color tuples and returns a list of n_colors colors
which form the intermediate colors between lowcolor and highcolor
from linearly interpolating through RGB space. If colortype is 'rgb'
the function will return a list of colors in the same form.
"""
if colortype == "rgb":
# convert to tuple
lowcolor = unlabel_rgb(lowcolor)
highcolor = unlabel_rgb(highcolor)
diff_0 = float(highcolor[0] - lowcolor[0])
incr_0 = diff_0 / (n_colors - 1)
diff_1 = float(highcolor[1] - lowcolor[1])
incr_1 = diff_1 / (n_colors - 1)
diff_2 = float(highcolor[2] - lowcolor[2])
incr_2 = diff_2 / (n_colors - 1)
list_of_colors = []
for index in range(n_colors):
new_tuple = (
lowcolor[0] + (index * incr_0),
lowcolor[1] + (index * incr_1),
lowcolor[2] + (index * incr_2),
)
list_of_colors.append(new_tuple)
if colortype == "rgb":
# back to an rgb string
list_of_colors = color_parser(list_of_colors, label_rgb)
return list_of_colors
def label_rgb(colors):
"""
Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'
"""
return "rgb(%s, %s, %s)" % (colors[0], colors[1], colors[2])
def unlabel_rgb(colors):
"""
Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)
This function takes either an 'rgb(a, b, c)' color or a list of
such colors and returns the color tuples in tuple(s) (a, b, c)
"""
str_vals = ""
for index in range(len(colors)):
try:
float(colors[index])
str_vals = str_vals + colors[index]
except ValueError:
if colors[index] == "," or colors[index] == ".":
str_vals = str_vals + colors[index]
str_vals = str_vals + ","
numbers = []
str_num = ""
for char in str_vals:
if char != ",":
str_num = str_num + char
else:
numbers.append(float(str_num))
str_num = ""
return (numbers[0], numbers[1], numbers[2])
def hex_to_rgb(value):
"""
Calculates rgb values from a hex color code.
:param (string) value: Hex color string
:rtype (tuple) (r_value, g_value, b_value): tuple of rgb values
"""
value = value.lstrip("#")
hex_total_length = len(value)
rgb_section_length = hex_total_length // 3
return tuple(
int(value[i : i + rgb_section_length], 16)
for i in range(0, hex_total_length, rgb_section_length)
)
def colorscale_to_colors(colorscale):
"""
Extracts the colors from colorscale as a list
"""
color_list = []
for item in colorscale:
color_list.append(item[1])
return color_list
def colorscale_to_scale(colorscale):
"""
Extracts the interpolation scale values from colorscale as a list
"""
scale_list = []
for item in colorscale:
scale_list.append(item[0])
return scale_list
def convert_colorscale_to_rgb(colorscale):
"""
Converts the colors in a colorscale to rgb colors
A colorscale is an array of arrays, each with a numeric value as the
first item and a color as the second. This function specifically is
converting a colorscale with tuple colors (each coordinate between 0
and 1) into a colorscale with the colors transformed into rgb colors
"""
for color in colorscale:
color[1] = convert_to_RGB_255(color[1])
for color in colorscale:
color[1] = label_rgb(color[1])
return colorscale
def named_colorscales():
"""
Returns lowercased names of built-in continuous colorscales.
"""
from _plotly_utils.basevalidators import ColorscaleValidator
return [c for c in ColorscaleValidator("", "").named_colorscales]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/figure_factory/_scatterplot.py | <reponame>acrucetta/Chicago_COVI_WebApp
from __future__ import absolute_import
import six
from plotly import exceptions, optional_imports
import plotly.colors as clrs
from plotly.figure_factory import utils
from plotly.graph_objs import graph_objs
from plotly.subplots import make_subplots
pd = optional_imports.get_module("pandas")
DIAG_CHOICES = ["scatter", "histogram", "box"]
VALID_COLORMAP_TYPES = ["cat", "seq"]
def endpts_to_intervals(endpts):
"""
Returns a list of intervals for categorical colormaps
Accepts a list or tuple of sequentially increasing numbers and returns
a list representation of the mathematical intervals with these numbers
as endpoints. For example, [1, 6] returns [[-inf, 1], [1, 6], [6, inf]]
:raises: (PlotlyError) If input is not a list or tuple
:raises: (PlotlyError) If the input contains a string
:raises: (PlotlyError) If any number does not increase after the
previous one in the sequence
"""
length = len(endpts)
# Check if endpts is a list or tuple
if not (isinstance(endpts, (tuple)) or isinstance(endpts, (list))):
raise exceptions.PlotlyError(
"The intervals_endpts argument must "
"be a list or tuple of a sequence "
"of increasing numbers."
)
# Check if endpts contains only numbers
for item in endpts:
if isinstance(item, str):
raise exceptions.PlotlyError(
"The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers."
)
# Check if numbers in endpts are increasing
for k in range(length - 1):
if endpts[k] >= endpts[k + 1]:
raise exceptions.PlotlyError(
"The intervals_endpts argument "
"must be a list or tuple of a "
"sequence of increasing "
"numbers."
)
else:
intervals = []
# add -inf to intervals
intervals.append([float("-inf"), endpts[0]])
for k in range(length - 1):
interval = []
interval.append(endpts[k])
interval.append(endpts[k + 1])
intervals.append(interval)
# add +inf to intervals
intervals.append([endpts[length - 1], float("inf")])
return intervals
def hide_tick_labels_from_box_subplots(fig):
"""
Hides tick labels for box plots in scatterplotmatrix subplots.
"""
boxplot_xaxes = []
for trace in fig["data"]:
if trace["type"] == "box":
# stores the xaxes which correspond to boxplot subplots
# since we use xaxis1, xaxis2, etc, in plotly.py
boxplot_xaxes.append("xaxis{}".format(trace["xaxis"][1:]))
for xaxis in boxplot_xaxes:
fig["layout"][xaxis]["showticklabels"] = False
def validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs):
"""
Validates basic inputs for FigureFactory.create_scatterplotmatrix()
:raises: (PlotlyError) If pandas is not imported
:raises: (PlotlyError) If pandas dataframe is not inputted
:raises: (PlotlyError) If pandas dataframe has <= 1 columns
:raises: (PlotlyError) If diagonal plot choice (diag) is not one of
the viable options
:raises: (PlotlyError) If colormap_type is not a valid choice
:raises: (PlotlyError) If kwargs contains 'size', 'color' or
'colorscale'
"""
if not pd:
raise ImportError(
"FigureFactory.scatterplotmatrix requires " "a pandas DataFrame."
)
# Check if pandas dataframe
if not isinstance(df, pd.core.frame.DataFrame):
raise exceptions.PlotlyError(
"Dataframe not inputed. Please "
"use a pandas dataframe to pro"
"duce a scatterplot matrix."
)
# Check if dataframe is 1 column or less
if len(df.columns) <= 1:
raise exceptions.PlotlyError(
"Dataframe has only one column. To "
"use the scatterplot matrix, use at "
"least 2 columns."
)
# Check that diag parameter is a valid selection
if diag not in DIAG_CHOICES:
raise exceptions.PlotlyError(
"Make sure diag is set to " "one of {}".format(DIAG_CHOICES)
)
# Check that colormap_types is a valid selection
if colormap_type not in VALID_COLORMAP_TYPES:
raise exceptions.PlotlyError(
"Must choose a valid colormap type. "
"Either 'cat' or 'seq' for a cate"
"gorical and sequential colormap "
"respectively."
)
# Check for not 'size' or 'color' in 'marker' of **kwargs
if "marker" in kwargs:
FORBIDDEN_PARAMS = ["size", "color", "colorscale"]
if any(param in kwargs["marker"] for param in FORBIDDEN_PARAMS):
raise exceptions.PlotlyError(
"Your kwargs dictionary cannot "
"include the 'size', 'color' or "
"'colorscale' key words inside "
"the marker dict since 'size' is "
"already an argument of the "
"scatterplot matrix function and "
"both 'color' and 'colorscale "
"are set internally."
)
def scatterplot(dataframe, headers, diag, size, height, width, title, **kwargs):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix without index
"""
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
# Insert traces into trace_list
for listy in dataframe:
for listx in dataframe:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(x=listx, showlegend=False)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(y=listx, name=None, showlegend=False)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
trace = graph_objs.Scatter(
x=listx, y=listy, mode="markers", showlegend=False, **kwargs
)
trace_list.append(trace)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
marker=dict(size=size),
showlegend=False,
**kwargs
)
trace_list.append(trace)
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
fig["layout"].update(height=height, width=width, title=title, showlegend=True)
hide_tick_labels_from_box_subplots(fig)
return fig
def scatterplot_dict(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix with both index and colormap picked.
Used if colormap is a dictionary with index values as keys pointing to
colors. Forces colormap_type to behave categorically because it would
not make sense colors are assigned to each index value and thus
implies that a categorical approach should be taken
"""
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx, marker=dict(color=theme[name]), showlegend=True
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[name]),
showlegend=True,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[name]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[name]),
showlegend=False,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[name]),
showlegend=False,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[name]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[name]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(trace_list[trace_index][name], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True, barmode="stack"
)
return fig
else:
fig["layout"].update(height=height, width=width, title=title, showlegend=True)
return fig
def scatterplot_theme(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
):
"""
Refer to FigureFactory.create_scatterplotmatrix() for docstring
Returns fig for scatterplotmatrix with both index and colormap picked
"""
# Check if index is made of string values
if isinstance(index_vals[0], str):
unique_index_vals = []
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals.append(name)
n_colors_len = len(unique_index_vals)
# Convert colormap to list of n RGB tuples
if colormap_type == "seq":
foo = clrs.color_parser(colormap, clrs.unlabel_rgb)
foo = clrs.n_colors(foo[0], foo[1], n_colors_len)
theme = clrs.color_parser(foo, clrs.label_rgb)
if colormap_type == "cat":
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# create a dictionary for index_vals
unique_index_vals = {}
for name in index_vals:
if name not in unique_index_vals:
unique_index_vals[name] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for name in sorted(unique_index_vals.keys()):
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if index_vals[j] == name:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=name,
marker=dict(size=size, color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
unique_index_vals[name] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(unique_index_vals)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for name in sorted(trace_list[trace_index].keys()):
fig.append_trace(trace_list[trace_index][name], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height,
width=width,
title=title,
showlegend=True,
barmode="stack",
)
return fig
elif diag == "box":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
if endpts:
intervals = utils.endpts_to_intervals(endpts)
# Convert colormap to list of n RGB tuples
if colormap_type == "seq":
foo = clrs.color_parser(colormap, clrs.unlabel_rgb)
foo = clrs.n_colors(foo[0], foo[1], len(intervals))
theme = clrs.color_parser(foo, clrs.label_rgb)
if colormap_type == "cat":
# leave list of colors the same way
theme = colormap
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Work over all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
interval_labels = {}
for interval in intervals:
interval_labels[str(interval)] = []
c_indx = 0 # color index
# Fill all the rest of the names into the dictionary
for interval in intervals:
new_listx = []
new_listy = []
for j in range(len(index_vals)):
if interval[0] < index_vals[j] <= interval[1]:
new_listx.append(listx[j])
new_listy.append(listy[j])
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=True,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
(kwargs["marker"]["color"]) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
showlegend=True,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
marker=dict(size=size, color=theme[c_indx]),
showlegend=True,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=new_listx,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=new_listx,
name=None,
marker=dict(color=theme[c_indx]),
showlegend=False,
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
(kwargs["marker"]["color"]) = theme[c_indx]
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=new_listx,
y=new_listy,
mode="markers",
name=str(interval),
marker=dict(size=size, color=theme[c_indx]),
showlegend=False,
**kwargs
)
# Push the trace into dictionary
interval_labels[str(interval)] = trace
if c_indx >= (len(theme) - 1):
c_indx = -1
c_indx += 1
trace_list.append(interval_labels)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
for interval in intervals:
fig.append_trace(
trace_list[trace_index][str(interval)], y_index, x_index
)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height,
width=width,
title=title,
showlegend=True,
barmode="stack",
)
return fig
elif diag == "box":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
theme = colormap
# add a copy of rgb color to theme if it contains one color
if len(theme) <= 1:
theme.append(theme[0])
color = []
for incr in range(len(theme)):
color.append([1.0 / (len(theme) - 1) * incr, theme[incr]])
dim = len(dataframe)
fig = make_subplots(rows=dim, cols=dim, print_grid=False)
trace_list = []
legend_param = 0
# Run through all permutations of list pairs
for listy in dataframe:
for listx in dataframe:
# Generate trace with VISIBLE icon
if legend_param == 1:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=listx, marker=dict(color=theme[0]), showlegend=False
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=listx, marker=dict(color=theme[0]), showlegend=False
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = index_vals
kwargs["marker"]["colorscale"] = color
kwargs["marker"]["showscale"] = True
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=True,
),
showlegend=False,
**kwargs
)
# Generate trace with INVISIBLE icon
else:
if (listx == listy) and (diag == "histogram"):
trace = graph_objs.Histogram(
x=listx, marker=dict(color=theme[0]), showlegend=False
)
elif (listx == listy) and (diag == "box"):
trace = graph_objs.Box(
y=listx, marker=dict(color=theme[0]), showlegend=False
)
else:
if "marker" in kwargs:
kwargs["marker"]["size"] = size
kwargs["marker"]["color"] = index_vals
kwargs["marker"]["colorscale"] = color
kwargs["marker"]["showscale"] = False
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
showlegend=False,
**kwargs
)
else:
trace = graph_objs.Scatter(
x=listx,
y=listy,
mode="markers",
marker=dict(
size=size,
color=index_vals,
colorscale=color,
showscale=False,
),
showlegend=False,
**kwargs
)
# Push the trace into list
trace_list.append(trace)
legend_param += 1
trace_index = 0
indices = range(1, dim + 1)
for y_index in indices:
for x_index in indices:
fig.append_trace(trace_list[trace_index], y_index, x_index)
trace_index += 1
# Insert headers into the figure
for j in range(dim):
xaxis_key = "xaxis{}".format((dim * dim) - dim + 1 + j)
fig["layout"][xaxis_key].update(title=headers[j])
for j in range(dim):
yaxis_key = "yaxis{}".format(1 + (dim * j))
fig["layout"][yaxis_key].update(title=headers[j])
hide_tick_labels_from_box_subplots(fig)
if diag == "histogram":
fig["layout"].update(
height=height,
width=width,
title=title,
showlegend=True,
barmode="stack",
)
return fig
elif diag == "box":
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
else:
fig["layout"].update(
height=height, width=width, title=title, showlegend=True
)
return fig
def create_scatterplotmatrix(
df,
index=None,
endpts=None,
diag="scatter",
height=500,
width=500,
size=6,
title="Scatterplot Matrix",
colormap=None,
colormap_type="cat",
dataframe=None,
headers=None,
index_vals=None,
**kwargs
):
"""
Returns data for a scatterplot matrix;
**deprecated**,
use instead the plotly.graph_objects trace
:class:`plotly.graph_objects.Splom`.
:param (array) df: array of the data with column headers
:param (str) index: name of the index column in data array
:param (list|tuple) endpts: takes an increasing sequece of numbers
that defines intervals on the real line. They are used to group
the entries in an index of numbers into their corresponding
interval and therefore can be treated as categorical data
:param (str) diag: sets the chart type for the main diagonal plots.
The options are 'scatter', 'histogram' and 'box'.
:param (int|float) height: sets the height of the chart
:param (int|float) width: sets the width of the chart
:param (float) size: sets the marker size (in px)
:param (str) title: the title label of the scatterplot matrix
:param (str|tuple|list|dict) colormap: either a plotly scale name,
an rgb or hex color, a color tuple, a list of colors or a
dictionary. An rgb color is of the form 'rgb(x, y, z)' where
x, y and z belong to the interval [0, 255] and a color tuple is a
tuple of the form (a, b, c) where a, b and c belong to [0, 1].
If colormap is a list, it must contain valid color types as its
members.
If colormap is a dictionary, all the string entries in
the index column must be a key in colormap. In this case, the
colormap_type is forced to 'cat' or categorical
:param (str) colormap_type: determines how colormap is interpreted.
Valid choices are 'seq' (sequential) and 'cat' (categorical). If
'seq' is selected, only the first two colors in colormap will be
considered (when colormap is a list) and the index values will be
linearly interpolated between those two colors. This option is
forced if all index values are numeric.
If 'cat' is selected, a color from colormap will be assigned to
each category from index, including the intervals if endpts is
being used
:param (dict) **kwargs: a dictionary of scatterplot arguments
The only forbidden parameters are 'size', 'color' and
'colorscale' in 'marker'
Example 1: Vanilla Scatterplot Matrix
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['Column 1', 'Column 2'])
>>> # Create scatterplot matrix
>>> fig = create_scatterplotmatrix(df)
>>> fig.show()
Example 2: Indexing a Column
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with index
>>> df = pd.DataFrame(np.random.randn(10, 2),
... columns=['A', 'B'])
>>> # Add another column of strings to the dataframe
>>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
... 'grape', 'pear', 'pear', 'apple', 'pear'])
>>> # Create scatterplot matrix
>>> fig = create_scatterplotmatrix(df, index='Fruit', size=10)
>>> fig.show()
Example 3: Styling the Diagonal Subplots
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with index
>>> df = pd.DataFrame(np.random.randn(10, 4),
... columns=['A', 'B', 'C', 'D'])
>>> # Add another column of strings to the dataframe
>>> df['Fruit'] = pd.Series(['apple', 'apple', 'grape', 'apple', 'apple',
... 'grape', 'pear', 'pear', 'apple', 'pear'])
>>> # Create scatterplot matrix
>>> fig = create_scatterplotmatrix(df, diag='box', index='Fruit', height=1000,
... width=1000)
>>> fig.show()
Example 4: Use a Theme to Style the Subplots
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with random data
>>> df = pd.DataFrame(np.random.randn(100, 3),
... columns=['A', 'B', 'C'])
>>> # Create scatterplot matrix using a built-in
>>> # Plotly palette scale and indexing column 'A'
>>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
... colormap='Blues', height=800, width=800)
>>> fig.show()
Example 5: Example 4 with Interval Factoring
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> # Create dataframe with random data
>>> df = pd.DataFrame(np.random.randn(100, 3),
... columns=['A', 'B', 'C'])
>>> # Create scatterplot matrix using a list of 2 rgb tuples
>>> # and endpoints at -1, 0 and 1
>>> fig = create_scatterplotmatrix(df, diag='histogram', index='A',
... colormap=['rgb(140, 255, 50)',
... 'rgb(170, 60, 115)', '#6c4774',
... (0.5, 0.1, 0.8)],
... endpts=[-1, 0, 1], height=800, width=800)
>>> fig.show()
Example 6: Using the colormap as a Dictionary
>>> from plotly.graph_objs import graph_objs
>>> from plotly.figure_factory import create_scatterplotmatrix
>>> import numpy as np
>>> import pandas as pd
>>> import random
>>> # Create dataframe with random data
>>> df = pd.DataFrame(np.random.randn(100, 3),
... columns=['Column A',
... 'Column B',
... 'Column C'])
>>> # Add new color column to dataframe
>>> new_column = []
>>> strange_colors = ['turquoise', 'limegreen', 'goldenrod']
>>> for j in range(100):
... new_column.append(random.choice(strange_colors))
>>> df['Colors'] = pd.Series(new_column, index=df.index)
>>> # Create scatterplot matrix using a dictionary of hex color values
>>> # which correspond to actual color names in 'Colors' column
>>> fig = create_scatterplotmatrix(
... df, diag='box', index='Colors',
... colormap= dict(
... turquoise = '#00F5FF',
... limegreen = '#32CD32',
... goldenrod = '#DAA520'
... ),
... colormap_type='cat',
... height=800, width=800
... )
>>> fig.show()
"""
# TODO: protected until #282
if dataframe is None:
dataframe = []
if headers is None:
headers = []
if index_vals is None:
index_vals = []
validate_scatterplotmatrix(df, index, diag, colormap_type, **kwargs)
# Validate colormap
if isinstance(colormap, dict):
colormap = clrs.validate_colors_dict(colormap, "rgb")
elif (
isinstance(colormap, six.string_types)
and "rgb" not in colormap
and "#" not in colormap
):
if colormap not in clrs.PLOTLY_SCALES.keys():
raise exceptions.PlotlyError(
"If 'colormap' is a string, it must be the name "
"of a Plotly Colorscale. The available colorscale "
"names are {}".format(clrs.PLOTLY_SCALES.keys())
)
else:
# TODO change below to allow the correct Plotly colorscale
colormap = clrs.colorscale_to_colors(clrs.PLOTLY_SCALES[colormap])
# keep only first and last item - fix later
colormap = [colormap[0]] + [colormap[-1]]
colormap = clrs.validate_colors(colormap, "rgb")
else:
colormap = clrs.validate_colors(colormap, "rgb")
if not index:
for name in df:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
# Check for same data-type in df columns
utils.validate_dataframe(dataframe)
figure = scatterplot(
dataframe, headers, diag, size, height, width, title, **kwargs
)
return figure
else:
# Validate index selection
if index not in df:
raise exceptions.PlotlyError(
"Make sure you set the index "
"input variable to one of the "
"column names of your "
"dataframe."
)
index_vals = df[index].values.tolist()
for name in df:
if name != index:
headers.append(name)
for name in headers:
dataframe.append(df[name].values.tolist())
# check for same data-type in each df column
utils.validate_dataframe(dataframe)
utils.validate_index(index_vals)
# check if all colormap keys are in the index
# if colormap is a dictionary
if isinstance(colormap, dict):
for key in colormap:
if not all(index in colormap for index in index_vals):
raise exceptions.PlotlyError(
"If colormap is a "
"dictionary, all the "
"names in the index "
"must be keys."
)
figure = scatterplot_dict(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
)
return figure
else:
figure = scatterplot_theme(
dataframe,
headers,
diag,
size,
height,
width,
title,
index,
index_vals,
endpts,
colormap,
colormap_type,
**kwargs
)
return figure
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/scatter3d/__init__.py | import sys
if sys.version_info < (3, 7):
from ._error_x import ErrorX
from ._error_y import ErrorY
from ._error_z import ErrorZ
from ._hoverlabel import Hoverlabel
from ._line import Line
from ._marker import Marker
from ._projection import Projection
from ._stream import Stream
from ._textfont import Textfont
from . import hoverlabel
from . import line
from . import marker
from . import projection
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".hoverlabel", ".line", ".marker", ".projection"],
[
"._error_x.ErrorX",
"._error_y.ErrorY",
"._error_z.ErrorZ",
"._hoverlabel.Hoverlabel",
"._line.Line",
"._marker.Marker",
"._projection.Projection",
"._stream.Stream",
"._textfont.Textfont",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/_annotation.py | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Annotation(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout"
_path_str = "layout.annotation"
_valid_props = {
"align",
"arrowcolor",
"arrowhead",
"arrowside",
"arrowsize",
"arrowwidth",
"ax",
"axref",
"ay",
"ayref",
"bgcolor",
"bordercolor",
"borderpad",
"borderwidth",
"captureevents",
"clicktoshow",
"font",
"height",
"hoverlabel",
"hovertext",
"name",
"opacity",
"showarrow",
"standoff",
"startarrowhead",
"startarrowsize",
"startstandoff",
"templateitemname",
"text",
"textangle",
"valign",
"visible",
"width",
"x",
"xanchor",
"xclick",
"xref",
"xshift",
"y",
"yanchor",
"yclick",
"yref",
"yshift",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the `text` within the box. Has
an effect only if `text` spans two or more lines (i.e. `text`
contains one or more <br> HTML tags) or if an explicit width is
set to override the text width.
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# arrowcolor
# ----------
@property
def arrowcolor(self):
"""
Sets the color of the annotation arrow.
The 'arrowcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["arrowcolor"]
@arrowcolor.setter
def arrowcolor(self, val):
self["arrowcolor"] = val
# arrowhead
# ---------
@property
def arrowhead(self):
"""
Sets the end annotation arrow head style.
The 'arrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["arrowhead"]
@arrowhead.setter
def arrowhead(self, val):
self["arrowhead"] = val
# arrowside
# ---------
@property
def arrowside(self):
"""
Sets the annotation arrow head position.
The 'arrowside' property is a flaglist and may be specified
as a string containing:
- Any combination of ['end', 'start'] joined with '+' characters
(e.g. 'end+start')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["arrowside"]
@arrowside.setter
def arrowside(self, val):
self["arrowside"] = val
# arrowsize
# ---------
@property
def arrowsize(self):
"""
Sets the size of the end annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'arrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["arrowsize"]
@arrowsize.setter
def arrowsize(self, val):
self["arrowsize"] = val
# arrowwidth
# ----------
@property
def arrowwidth(self):
"""
Sets the width (in px) of annotation arrow line.
The 'arrowwidth' property is a number and may be specified as:
- An int or float in the interval [0.1, inf]
Returns
-------
int|float
"""
return self["arrowwidth"]
@arrowwidth.setter
def arrowwidth(self, val):
self["arrowwidth"] = val
# ax
# --
@property
def ax(self):
"""
Sets the x component of the arrow tail about the arrow head. If
`axref` is `pixel`, a positive (negative) component
corresponds to an arrow pointing from right to left (left to
right). If `axref` is an axis, this is an absolute value on
that axis, like `x`, NOT a relative value.
The 'ax' property accepts values of any type
Returns
-------
Any
"""
return self["ax"]
@ax.setter
def ax(self, val):
self["ax"] = val
# axref
# -----
@property
def axref(self):
"""
Indicates in what terms the tail of the annotation (ax,ay) is
specified. If `pixel`, `ax` is a relative offset in pixels
from `x`. If set to an x axis id (e.g. "x" or "x2"), `ax` is
specified in the same terms as that axis. This is useful for
trendline annotations which should continue to indicate the
correct trend when zoomed.
The 'axref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['pixel']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?$']
Returns
-------
Any
"""
return self["axref"]
@axref.setter
def axref(self, val):
self["axref"] = val
# ay
# --
@property
def ay(self):
"""
Sets the y component of the arrow tail about the arrow head. If
`ayref` is `pixel`, a positive (negative) component
corresponds to an arrow pointing from bottom to top (top to
bottom). If `ayref` is an axis, this is an absolute value on
that axis, like `y`, NOT a relative value.
The 'ay' property accepts values of any type
Returns
-------
Any
"""
return self["ay"]
@ay.setter
def ay(self, val):
self["ay"] = val
# ayref
# -----
@property
def ayref(self):
"""
Indicates in what terms the tail of the annotation (ax,ay) is
specified. If `pixel`, `ay` is a relative offset in pixels
from `y`. If set to a y axis id (e.g. "y" or "y2"), `ay` is
specified in the same terms as that axis. This is useful for
trendline annotations which should continue to indicate the
correct trend when zoomed.
The 'ayref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['pixel']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?$']
Returns
-------
Any
"""
return self["ayref"]
@ayref.setter
def ayref(self, val):
self["ayref"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the annotation.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the color of the border enclosing the annotation `text`.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderpad
# ---------
@property
def borderpad(self):
"""
Sets the padding (in px) between the `text` and the enclosing
border.
The 'borderpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderpad"]
@borderpad.setter
def borderpad(self, val):
self["borderpad"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) of the border enclosing the annotation
`text`.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# captureevents
# -------------
@property
def captureevents(self):
"""
Determines whether the annotation text box captures mouse move
and click events, or allows those events to pass through to
data points in the plot that may be behind the annotation. By
default `captureevents` is False unless `hovertext` is
provided. If you use the event `plotly_clickannotation` without
`hovertext` you must explicitly enable `captureevents`.
The 'captureevents' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["captureevents"]
@captureevents.setter
def captureevents(self, val):
self["captureevents"] = val
# clicktoshow
# -----------
@property
def clicktoshow(self):
"""
Makes this annotation respond to clicks on the plot. If you
click a data point that exactly matches the `x` and `y` values
of this annotation, and it is hidden (visible: false), it will
appear. In "onoff" mode, you must click the same point again to
make it disappear, so if you click multiple points, you can
show multiple annotations. In "onout" mode, a click anywhere
else in the plot (on another data point or not) will hide this
annotation. If you need to show/hide this annotation in
response to different `x` or `y` values, you can set `xclick`
and/or `yclick`. This is useful for example to label the side
of a bar. To label markers though, `standoff` is preferred over
`xclick` and `yclick`.
The 'clicktoshow' property is an enumeration that may be specified as:
- One of the following enumeration values:
[False, 'onoff', 'onout']
Returns
-------
Any
"""
return self["clicktoshow"]
@clicktoshow.setter
def clicktoshow(self, val):
self["clicktoshow"] = val
# font
# ----
@property
def font(self):
"""
Sets the annotation text font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.annotation.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.layout.annotation.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# height
# ------
@property
def height(self):
"""
Sets an explicit height for the text box. null (default) lets
the text set the box height. Taller text will be clipped.
The 'height' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["height"]
@height.setter
def height(self, val):
self["height"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.annotation.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
bgcolor
Sets the background color of the hover label.
By default uses the annotation's `bgcolor` made
opaque, or white if it was transparent.
bordercolor
Sets the border color of the hover label. By
default uses either dark grey or white, for
maximum contrast with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses
the global hover font and size, with color from
`hoverlabel.bordercolor`.
Returns
-------
plotly.graph_objs.layout.annotation.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets text to appear when hovering over this annotation. If
omitted or blank, no hover label will appear.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the annotation (text + arrow).
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# showarrow
# ---------
@property
def showarrow(self):
"""
Determines whether or not the annotation is drawn with an
arrow. If True, `text` is placed near the arrow's tail. If
False, `text` lines up with the `x` and `y` provided.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
# standoff
# --------
@property
def standoff(self):
"""
Sets a distance, in pixels, to move the end arrowhead away from
the position it is pointing at, for example to point at the
edge of a marker independent of zoom. Note that this shortens
the arrow from the `ax` / `ay` vector, in contrast to `xshift`
/ `yshift` which moves everything by this amount.
The 'standoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["standoff"]
@standoff.setter
def standoff(self, val):
self["standoff"] = val
# startarrowhead
# --------------
@property
def startarrowhead(self):
"""
Sets the start annotation arrow head style.
The 'startarrowhead' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 8]
Returns
-------
int
"""
return self["startarrowhead"]
@startarrowhead.setter
def startarrowhead(self, val):
self["startarrowhead"] = val
# startarrowsize
# --------------
@property
def startarrowsize(self):
"""
Sets the size of the start annotation arrow head, relative to
`arrowwidth`. A value of 1 (default) gives a head about 3x as
wide as the line.
The 'startarrowsize' property is a number and may be specified as:
- An int or float in the interval [0.3, inf]
Returns
-------
int|float
"""
return self["startarrowsize"]
@startarrowsize.setter
def startarrowsize(self, val):
self["startarrowsize"] = val
# startstandoff
# -------------
@property
def startstandoff(self):
"""
Sets a distance, in pixels, to move the start arrowhead away
from the position it is pointing at, for example to point at
the edge of a marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector, in contrast to
`xshift` / `yshift` which moves everything by this amount.
The 'startstandoff' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["startstandoff"]
@startstandoff.setter
def startstandoff(self, val):
self["startstandoff"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# text
# ----
@property
def text(self):
"""
Sets the text associated with this annotation. Plotly uses a
subset of HTML tags to do things like newline (<br>), bold
(<b></b>), italics (<i></i>), hyperlinks (<a href='...'></a>).
Tags <em>, <sup>, <sub> <span> are also supported.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textangle
# ---------
@property
def textangle(self):
"""
Sets the angle at which the `text` is drawn with respect to the
horizontal.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
# valign
# ------
@property
def valign(self):
"""
Sets the vertical alignment of the `text` within the box. Has
an effect only if an explicit height is set to override the
text height.
The 'valign' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["valign"]
@valign.setter
def valign(self, val):
self["valign"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this annotation is visible.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets an explicit width for the text box. null (default) lets
the text set the box width. Wider text will be clipped. There
is no automatic wrapping; use <br> to start a new line.
The 'width' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# x
# -
@property
def x(self):
"""
Sets the annotation's x position. If the axis `type` is "log",
then you must take the log of your desired range. If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'x' property accepts values of any type
Returns
-------
Any
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets the text box's horizontal position anchor This anchor
binds the `x` position to the "left", "center" or "right" of
the annotation. For example, if `x` is set to 1, `xref` to
"paper" and `xanchor` to "right" then the right-most portion of
the annotation lines up with the right-most edge of the
plotting area. If "auto", the anchor is equivalent to "center"
for data-referenced annotations or if there is an arrow,
whereas for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xclick
# ------
@property
def xclick(self):
"""
Toggle this annotation when clicking a data point whose `x`
value is `xclick` rather than the annotation's `x` value.
The 'xclick' property accepts values of any type
Returns
-------
Any
"""
return self["xclick"]
@xclick.setter
def xclick(self, val):
self["xclick"] = val
# xref
# ----
@property
def xref(self):
"""
Sets the annotation's x coordinate axis. If set to an x axis id
(e.g. "x" or "x2"), the `x` position refers to an x coordinate
If set to "paper", the `x` position refers to the distance from
the left side of the plotting area in normalized coordinates
where 0 (1) corresponds to the left (right) side.
The 'xref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^x([2-9]|[1-9][0-9]+)?$']
Returns
-------
Any
"""
return self["xref"]
@xref.setter
def xref(self, val):
self["xref"] = val
# xshift
# ------
@property
def xshift(self):
"""
Shifts the position of the whole annotation and arrow to the
right (positive) or left (negative) by this many pixels.
The 'xshift' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["xshift"]
@xshift.setter
def xshift(self, val):
self["xshift"] = val
# y
# -
@property
def y(self):
"""
Sets the annotation's y position. If the axis `type` is "log",
then you must take the log of your desired range. If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'y' property accepts values of any type
Returns
-------
Any
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets the text box's vertical position anchor This anchor binds
the `y` position to the "top", "middle" or "bottom" of the
annotation. For example, if `y` is set to 1, `yref` to "paper"
and `yanchor` to "top" then the top-most portion of the
annotation lines up with the top-most edge of the plotting
area. If "auto", the anchor is equivalent to "middle" for data-
referenced annotations or if there is an arrow, whereas for
paper-referenced with no arrow, the anchor picked corresponds
to the closest side.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# yclick
# ------
@property
def yclick(self):
"""
Toggle this annotation when clicking a data point whose `y`
value is `yclick` rather than the annotation's `y` value.
The 'yclick' property accepts values of any type
Returns
-------
Any
"""
return self["yclick"]
@yclick.setter
def yclick(self, val):
self["yclick"] = val
# yref
# ----
@property
def yref(self):
"""
Sets the annotation's y coordinate axis. If set to an y axis id
(e.g. "y" or "y2"), the `y` position refers to an y coordinate
If set to "paper", the `y` position refers to the distance from
the bottom of the plotting area in normalized coordinates where
0 (1) corresponds to the bottom (top).
The 'yref' property is an enumeration that may be specified as:
- One of the following enumeration values:
['paper']
- A string that matches one of the following regular expressions:
['^y([2-9]|[1-9][0-9]+)?$']
Returns
-------
Any
"""
return self["yref"]
@yref.setter
def yref(self, val):
self["yref"] = val
# yshift
# ------
@property
def yshift(self):
"""
Shifts the position of the whole annotation and arrow up
(positive) or down (negative) by this many pixels.
The 'yshift' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["yshift"]
@yshift.setter
def yshift(self, val):
self["yshift"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head. If `axref` is `pixel`, a positive (negative)
component corresponds to an arrow pointing from right
to left (left to right). If `axref` is an axis, this is
an absolute value on that axis, like `x`, NOT a
relative value.
axref
Indicates in what terms the tail of the annotation
(ax,ay) is specified. If `pixel`, `ax` is a relative
offset in pixels from `x`. If set to an x axis id
(e.g. "x" or "x2"), `ax` is specified in the same
terms as that axis. This is useful for trendline
annotations which should continue to indicate the
correct trend when zoomed.
ay
Sets the y component of the arrow tail about the arrow
head. If `ayref` is `pixel`, a positive (negative)
component corresponds to an arrow pointing from bottom
to top (top to bottom). If `ayref` is an axis, this is
an absolute value on that axis, like `y`, NOT a
relative value.
ayref
Indicates in what terms the tail of the annotation
(ax,ay) is specified. If `pixel`, `ay` is a relative
offset in pixels from `y`. If set to a y axis id (e.g.
"y" or "y2"), `ay` is specified in the same terms as
that axis. This is useful for trendline annotations
which should continue to indicate the correct trend
when zoomed.
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
clicktoshow
Makes this annotation respond to clicks on the plot. If
you click a data point that exactly matches the `x` and
`y` values of this annotation, and it is hidden
(visible: false), it will appear. In "onoff" mode, you
must click the same point again to make it disappear,
so if you click multiple points, you can show multiple
annotations. In "onout" mode, a click anywhere else in
the plot (on another data point or not) will hide this
annotation. If you need to show/hide this annotation in
response to different `x` or `y` values, you can set
`xclick` and/or `yclick`. This is useful for example to
label the side of a bar. To label markers though,
`standoff` is preferred over `xclick` and `yclick`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.annotation.Hoverlab
el` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xclick
Toggle this annotation when clicking a data point whose
`x` value is `xclick` rather than the annotation's `x`
value.
xref
Sets the annotation's x coordinate axis. If set to an x
axis id (e.g. "x" or "x2"), the `x` position refers to
an x coordinate If set to "paper", the `x` position
refers to the distance from the left side of the
plotting area in normalized coordinates where 0 (1)
corresponds to the left (right) side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yclick
Toggle this annotation when clicking a data point whose
`y` value is `yclick` rather than the annotation's `y`
value.
yref
Sets the annotation's y coordinate axis. If set to an y
axis id (e.g. "y" or "y2"), the `y` position refers to
an y coordinate If set to "paper", the `y` position
refers to the distance from the bottom of the plotting
area in normalized coordinates where 0 (1) corresponds
to the bottom (top).
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
"""
def __init__(
self,
arg=None,
align=None,
arrowcolor=None,
arrowhead=None,
arrowside=None,
arrowsize=None,
arrowwidth=None,
ax=None,
axref=None,
ay=None,
ayref=None,
bgcolor=None,
bordercolor=None,
borderpad=None,
borderwidth=None,
captureevents=None,
clicktoshow=None,
font=None,
height=None,
hoverlabel=None,
hovertext=None,
name=None,
opacity=None,
showarrow=None,
standoff=None,
startarrowhead=None,
startarrowsize=None,
startstandoff=None,
templateitemname=None,
text=None,
textangle=None,
valign=None,
visible=None,
width=None,
x=None,
xanchor=None,
xclick=None,
xref=None,
xshift=None,
y=None,
yanchor=None,
yclick=None,
yref=None,
yshift=None,
**kwargs
):
"""
Construct a new Annotation object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.Annotation`
align
Sets the horizontal alignment of the `text` within the
box. Has an effect only if `text` spans two or more
lines (i.e. `text` contains one or more <br> HTML tags)
or if an explicit width is set to override the text
width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
arrowwidth
Sets the width (in px) of annotation arrow line.
ax
Sets the x component of the arrow tail about the arrow
head. If `axref` is `pixel`, a positive (negative)
component corresponds to an arrow pointing from right
to left (left to right). If `axref` is an axis, this is
an absolute value on that axis, like `x`, NOT a
relative value.
axref
Indicates in what terms the tail of the annotation
(ax,ay) is specified. If `pixel`, `ax` is a relative
offset in pixels from `x`. If set to an x axis id
(e.g. "x" or "x2"), `ax` is specified in the same
terms as that axis. This is useful for trendline
annotations which should continue to indicate the
correct trend when zoomed.
ay
Sets the y component of the arrow tail about the arrow
head. If `ayref` is `pixel`, a positive (negative)
component corresponds to an arrow pointing from bottom
to top (top to bottom). If `ayref` is an axis, this is
an absolute value on that axis, like `y`, NOT a
relative value.
ayref
Indicates in what terms the tail of the annotation
(ax,ay) is specified. If `pixel`, `ay` is a relative
offset in pixels from `y`. If set to a y axis id (e.g.
"y" or "y2"), `ay` is specified in the same terms as
that axis. This is useful for trendline annotations
which should continue to indicate the correct trend
when zoomed.
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the annotation
`text`.
borderpad
Sets the padding (in px) between the `text` and the
enclosing border.
borderwidth
Sets the width (in px) of the border enclosing the
annotation `text`.
captureevents
Determines whether the annotation text box captures
mouse move and click events, or allows those events to
pass through to data points in the plot that may be
behind the annotation. By default `captureevents` is
False unless `hovertext` is provided. If you use the
event `plotly_clickannotation` without `hovertext` you
must explicitly enable `captureevents`.
clicktoshow
Makes this annotation respond to clicks on the plot. If
you click a data point that exactly matches the `x` and
`y` values of this annotation, and it is hidden
(visible: false), it will appear. In "onoff" mode, you
must click the same point again to make it disappear,
so if you click multiple points, you can show multiple
annotations. In "onout" mode, a click anywhere else in
the plot (on another data point or not) will hide this
annotation. If you need to show/hide this annotation in
response to different `x` or `y` values, you can set
`xclick` and/or `yclick`. This is useful for example to
label the side of a bar. To label markers though,
`standoff` is preferred over `xclick` and `yclick`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height. Taller text
will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.annotation.Hoverlab
el` instance or dict with compatible properties
hovertext
Sets text to appear when hovering over this annotation.
If omitted or blank, no hover label will appear.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
opacity
Sets the opacity of the annotation (text + arrow).
showarrow
Determines whether or not the annotation is drawn with
an arrow. If True, `text` is placed near the arrow's
tail. If False, `text` lines up with the `x` and `y`
provided.
standoff
Sets a distance, in pixels, to move the end arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow head,
relative to `arrowwidth`. A value of 1 (default) gives
a head about 3x as wide as the line.
startstandoff
Sets a distance, in pixels, to move the start arrowhead
away from the position it is pointing at, for example
to point at the edge of a marker independent of zoom.
Note that this shortens the arrow from the `ax` / `ay`
vector, in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
text
Sets the text associated with this annotation. Plotly
uses a subset of HTML tags to do things like newline
(<br>), bold (<b></b>), italics (<i></i>), hyperlinks
(<a href='...'></a>). Tags <em>, <sup>, <sub> <span>
are also supported.
textangle
Sets the angle at which the `text` is drawn with
respect to the horizontal.
valign
Sets the vertical alignment of the `text` within the
box. Has an effect only if an explicit height is set to
override the text height.
visible
Determines whether or not this annotation is visible.
width
Sets an explicit width for the text box. null (default)
lets the text set the box width. Wider text will be
clipped. There is no automatic wrapping; use <br> to
start a new line.
x
Sets the annotation's x position. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
xanchor
Sets the text box's horizontal position anchor This
anchor binds the `x` position to the "left", "center"
or "right" of the annotation. For example, if `x` is
set to 1, `xref` to "paper" and `xanchor` to "right"
then the right-most portion of the annotation lines up
with the right-most edge of the plotting area. If
"auto", the anchor is equivalent to "center" for data-
referenced annotations or if there is an arrow, whereas
for paper-referenced with no arrow, the anchor picked
corresponds to the closest side.
xclick
Toggle this annotation when clicking a data point whose
`x` value is `xclick` rather than the annotation's `x`
value.
xref
Sets the annotation's x coordinate axis. If set to an x
axis id (e.g. "x" or "x2"), the `x` position refers to
an x coordinate If set to "paper", the `x` position
refers to the distance from the left side of the
plotting area in normalized coordinates where 0 (1)
corresponds to the left (right) side.
xshift
Shifts the position of the whole annotation and arrow
to the right (positive) or left (negative) by this many
pixels.
y
Sets the annotation's y position. If the axis `type` is
"log", then you must take the log of your desired
range. If the axis `type` is "date", it should be date
strings, like date data, though Date objects and unix
milliseconds will be accepted and converted to strings.
If the axis `type` is "category", it should be numbers,
using the scale where each category is assigned a
serial number from zero in the order it appears.
yanchor
Sets the text box's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the annotation. For example, if `y` is set
to 1, `yref` to "paper" and `yanchor` to "top" then the
top-most portion of the annotation lines up with the
top-most edge of the plotting area. If "auto", the
anchor is equivalent to "middle" for data-referenced
annotations or if there is an arrow, whereas for paper-
referenced with no arrow, the anchor picked corresponds
to the closest side.
yclick
Toggle this annotation when clicking a data point whose
`y` value is `yclick` rather than the annotation's `y`
value.
yref
Sets the annotation's y coordinate axis. If set to an y
axis id (e.g. "y" or "y2"), the `y` position refers to
an y coordinate If set to "paper", the `y` position
refers to the distance from the bottom of the plotting
area in normalized coordinates where 0 (1) corresponds
to the bottom (top).
yshift
Shifts the position of the whole annotation and arrow
up (positive) or down (negative) by this many pixels.
Returns
-------
Annotation
"""
super(Annotation, self).__init__("annotations")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.Annotation
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.Annotation`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("arrowcolor", None)
_v = arrowcolor if arrowcolor is not None else _v
if _v is not None:
self["arrowcolor"] = _v
_v = arg.pop("arrowhead", None)
_v = arrowhead if arrowhead is not None else _v
if _v is not None:
self["arrowhead"] = _v
_v = arg.pop("arrowside", None)
_v = arrowside if arrowside is not None else _v
if _v is not None:
self["arrowside"] = _v
_v = arg.pop("arrowsize", None)
_v = arrowsize if arrowsize is not None else _v
if _v is not None:
self["arrowsize"] = _v
_v = arg.pop("arrowwidth", None)
_v = arrowwidth if arrowwidth is not None else _v
if _v is not None:
self["arrowwidth"] = _v
_v = arg.pop("ax", None)
_v = ax if ax is not None else _v
if _v is not None:
self["ax"] = _v
_v = arg.pop("axref", None)
_v = axref if axref is not None else _v
if _v is not None:
self["axref"] = _v
_v = arg.pop("ay", None)
_v = ay if ay is not None else _v
if _v is not None:
self["ay"] = _v
_v = arg.pop("ayref", None)
_v = ayref if ayref is not None else _v
if _v is not None:
self["ayref"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderpad", None)
_v = borderpad if borderpad is not None else _v
if _v is not None:
self["borderpad"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("captureevents", None)
_v = captureevents if captureevents is not None else _v
if _v is not None:
self["captureevents"] = _v
_v = arg.pop("clicktoshow", None)
_v = clicktoshow if clicktoshow is not None else _v
if _v is not None:
self["clicktoshow"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("height", None)
_v = height if height is not None else _v
if _v is not None:
self["height"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("showarrow", None)
_v = showarrow if showarrow is not None else _v
if _v is not None:
self["showarrow"] = _v
_v = arg.pop("standoff", None)
_v = standoff if standoff is not None else _v
if _v is not None:
self["standoff"] = _v
_v = arg.pop("startarrowhead", None)
_v = startarrowhead if startarrowhead is not None else _v
if _v is not None:
self["startarrowhead"] = _v
_v = arg.pop("startarrowsize", None)
_v = startarrowsize if startarrowsize is not None else _v
if _v is not None:
self["startarrowsize"] = _v
_v = arg.pop("startstandoff", None)
_v = startstandoff if startstandoff is not None else _v
if _v is not None:
self["startstandoff"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textangle", None)
_v = textangle if textangle is not None else _v
if _v is not None:
self["textangle"] = _v
_v = arg.pop("valign", None)
_v = valign if valign is not None else _v
if _v is not None:
self["valign"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xclick", None)
_v = xclick if xclick is not None else _v
if _v is not None:
self["xclick"] = _v
_v = arg.pop("xref", None)
_v = xref if xref is not None else _v
if _v is not None:
self["xref"] = _v
_v = arg.pop("xshift", None)
_v = xshift if xshift is not None else _v
if _v is not None:
self["xshift"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("yclick", None)
_v = yclick if yclick is not None else _v
if _v is not None:
self["yclick"] = _v
_v = arg.pop("yref", None)
_v = yref if yref is not None else _v
if _v is not None:
self["yref"] = _v
_v = arg.pop("yshift", None)
_v = yshift if yshift is not None else _v
if _v is not None:
self["yshift"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_count.py | <reponame>acrucetta/Chicago_COVI_WebApp
from pandas import DataFrame, Series
import pandas._testing as tm
class TestDataFrameCount:
def test_count(self):
# corner case
frame = DataFrame()
ct1 = frame.count(1)
assert isinstance(ct1, Series)
ct2 = frame.count(0)
assert isinstance(ct2, Series)
# GH#423
df = DataFrame(index=range(10))
result = df.count(1)
expected = Series(0, index=df.index)
tm.assert_series_equal(result, expected)
df = DataFrame(columns=range(10))
result = df.count(0)
expected = Series(0, index=df.columns)
tm.assert_series_equal(result, expected)
df = DataFrame()
result = df.count()
expected = Series(0, index=[])
tm.assert_series_equal(result, expected)
def test_count_objects(self, float_string_frame):
dm = DataFrame(float_string_frame._series)
df = DataFrame(float_string_frame._series)
tm.assert_series_equal(dm.count(), df.count())
tm.assert_series_equal(dm.count(1), df.count(1))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scattergeo.py | from plotly.graph_objs import Scattergeo
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/_choropleth.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1-10
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Choropleth(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "choropleth"
_valid_props = {
"autocolorscale",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"featureidkey",
"geo",
"geojson",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"legendgroup",
"locationmode",
"locations",
"locationssrc",
"marker",
"meta",
"metasrc",
"name",
"reversescale",
"selected",
"selectedpoints",
"showlegend",
"showscale",
"stream",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"unselected",
"visible",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.choropl
eth.colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.choropleth.colorbar.tickformatstopdefaults),
sets the default property values to use for
elements of choropleth.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.choropleth.colorba
r.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
choropleth.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
choropleth.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.choropleth.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# featureidkey
# ------------
@property
def featureidkey(self):
"""
Sets the key in GeoJSON features which is used as id to match
the items included in the `locations` array. Only has an effect
when `geojson` is set. Support nested property, for example
"properties.name".
The 'featureidkey' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["featureidkey"]
@featureidkey.setter
def featureidkey(self, val):
self["featureidkey"] = val
# geo
# ---
@property
def geo(self):
"""
Sets a reference between this trace's geospatial coordinates
and a geographic map. If "geo" (the default value), the
geospatial coordinates refer to `layout.geo`. If "geo2", the
geospatial coordinates refer to `layout.geo2`, and so on.
The 'geo' property is an identifier of a particular
subplot, of type 'geo', that may be specified as the string 'geo'
optionally followed by an integer >= 1
(e.g. 'geo', 'geo1', 'geo2', 'geo3', etc.)
Returns
-------
str
"""
return self["geo"]
@geo.setter
def geo(self, val):
self["geo"] = val
# geojson
# -------
@property
def geojson(self):
"""
Sets optional GeoJSON data associated with this trace. If not
given, the features on the base map are used. It can be set as
a valid GeoJSON object or as a URL string. Note that we only
accept GeoJSONs of type "FeatureCollection" or "Feature" with
geometries of type "Polygon" or "MultiPolygon".
The 'geojson' property accepts values of any type
Returns
-------
Any
"""
return self["geojson"]
@geojson.setter
def geojson(self, val):
self["geojson"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['location', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'location+z')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.choropleth.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>". To
hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Same as `text`.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# locationmode
# ------------
@property
def locationmode(self):
"""
Determines the set of locations used to match entries in
`locations` to regions on the map. Values "ISO-3", "USA-
states", *country names* correspond to features on the base map
and value "geojson-id" corresponds to features from a custom
GeoJSON linked to the `geojson` attribute.
The 'locationmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['ISO-3', 'USA-states', 'country names', 'geojson-id']
Returns
-------
Any
"""
return self["locationmode"]
@locationmode.setter
def locationmode(self, val):
self["locationmode"] = val
# locations
# ---------
@property
def locations(self):
"""
Sets the coordinates via location IDs or names. See
`locationmode` for more info.
The 'locations' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["locations"]
@locations.setter
def locations(self, val):
self["locations"] = val
# locationssrc
# ------------
@property
def locationssrc(self):
"""
Sets the source reference on Chart Studio Cloud for locations
.
The 'locationssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["locationssrc"]
@locationssrc.setter
def locationssrc(self, val):
self["locationssrc"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
line
:class:`plotly.graph_objects.choropleth.marker.
Line` instance or dict with compatible
properties
opacity
Sets the opacity of the locations.
opacitysrc
Sets the source reference on Chart Studio Cloud
for opacity .
Returns
-------
plotly.graph_objs.choropleth.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# selected
# --------
@property
def selected(self):
"""
The 'selected' property is an instance of Selected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Selected`
- A dict of string/value properties that will be passed
to the Selected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.choropleth.selecte
d.Marker` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.choropleth.Selected
"""
return self["selected"]
@selected.setter
def selected(self, val):
self["selected"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.choropleth.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets the text elements associated with each location.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# unselected
# ----------
@property
def unselected(self):
"""
The 'unselected' property is an instance of Unselected
that may be specified as:
- An instance of :class:`plotly.graph_objs.choropleth.Unselected`
- A dict of string/value properties that will be passed
to the Unselected constructor
Supported dict properties:
marker
:class:`plotly.graph_objects.choropleth.unselec
ted.Marker` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.choropleth.Unselected
"""
return self["unselected"]
@unselected.setter
def unselected(self, val):
self["unselected"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# z
# -
@property
def z(self):
"""
Sets the color values.
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zauto
# -----
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
# zmax
# ----
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
# zmid
# ----
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
# zmin
# ----
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choropleth.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array. Only
has an effect when `geojson` is set. Support nested
property, for example "properties.name".
geo
Sets a reference between this trace's geospatial
coordinates and a geographic map. If "geo" (the default
value), the geospatial coordinates refer to
`layout.geo`. If "geo2", the geospatial coordinates
refer to `layout.geo2`, and so on.
geojson
Sets optional GeoJSON data associated with this trace.
If not given, the features on the base map are used. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.choropleth.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
locationmode
Determines the set of locations used to match entries
in `locations` to regions on the map. Values "ISO-3",
"USA-states", *country names* correspond to features on
the base map and value "geojson-id" corresponds to
features from a custom GeoJSON linked to the `geojson`
attribute.
locations
Sets the coordinates via location IDs or names. See
`locationmode` for more info.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
marker
:class:`plotly.graph_objects.choropleth.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choropleth.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choropleth.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choropleth.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
featureidkey=None,
geo=None,
geojson=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
legendgroup=None,
locationmode=None,
locations=None,
locationssrc=None,
marker=None,
meta=None,
metasrc=None,
name=None,
reversescale=None,
selected=None,
selectedpoints=None,
showlegend=None,
showscale=None,
stream=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
unselected=None,
visible=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs
):
"""
Construct a new Choropleth object
The data that describes the choropleth value-to-color mapping
is set in `z`. The geographic locations corresponding to each
value in `z` are set in `locations`.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Choropleth`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.choropleth.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
featureidkey
Sets the key in GeoJSON features which is used as id to
match the items included in the `locations` array. Only
has an effect when `geojson` is set. Support nested
property, for example "properties.name".
geo
Sets a reference between this trace's geospatial
coordinates and a geographic map. If "geo" (the default
value), the geospatial coordinates refer to
`layout.geo`. If "geo2", the geospatial coordinates
refer to `layout.geo2`, and so on.
geojson
Sets optional GeoJSON data associated with this trace.
If not given, the features on the base map are used. It
can be set as a valid GeoJSON object or as a URL
string. Note that we only accept GeoJSONs of type
"FeatureCollection" or "Feature" with geometries of
type "Polygon" or "MultiPolygon".
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.choropleth.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Same as `text`.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
locationmode
Determines the set of locations used to match entries
in `locations` to regions on the map. Values "ISO-3",
"USA-states", *country names* correspond to features on
the base map and value "geojson-id" corresponds to
features from a custom GeoJSON linked to the `geojson`
attribute.
locations
Sets the coordinates via location IDs or names. See
`locationmode` for more info.
locationssrc
Sets the source reference on Chart Studio Cloud for
locations .
marker
:class:`plotly.graph_objects.choropleth.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
selected
:class:`plotly.graph_objects.choropleth.Selected`
instance or dict with compatible properties
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.choropleth.Stream`
instance or dict with compatible properties
text
Sets the text elements associated with each location.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
unselected
:class:`plotly.graph_objects.choropleth.Unselected`
instance or dict with compatible properties
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the color values.
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Choropleth
"""
super(Choropleth, self).__init__("choropleth")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Choropleth
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Choropleth`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("featureidkey", None)
_v = featureidkey if featureidkey is not None else _v
if _v is not None:
self["featureidkey"] = _v
_v = arg.pop("geo", None)
_v = geo if geo is not None else _v
if _v is not None:
self["geo"] = _v
_v = arg.pop("geojson", None)
_v = geojson if geojson is not None else _v
if _v is not None:
self["geojson"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("locationmode", None)
_v = locationmode if locationmode is not None else _v
if _v is not None:
self["locationmode"] = _v
_v = arg.pop("locations", None)
_v = locations if locations is not None else _v
if _v is not None:
self["locations"] = _v
_v = arg.pop("locationssrc", None)
_v = locationssrc if locationssrc is not None else _v
if _v is not None:
self["locationssrc"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("selected", None)
_v = selected if selected is not None else _v
if _v is not None:
self["selected"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("unselected", None)
_v = unselected if unselected is not None else _v
if _v is not None:
self["unselected"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zauto", None)
_v = zauto if zauto is not None else _v
if _v is not None:
self["zauto"] = _v
_v = arg.pop("zmax", None)
_v = zmax if zmax is not None else _v
if _v is not None:
self["zmax"] = _v
_v = arg.pop("zmid", None)
_v = zmid if zmid is not None else _v
if _v is not None:
self["zmid"] = _v
_v = arg.pop("zmin", None)
_v = zmin if zmin is not None else _v
if _v is not None:
self["zmin"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "choropleth"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/geo/lonaxis/__init__.py | <gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._tick0 import Tick0Validator
from ._showgrid import ShowgridValidator
from ._range import RangeValidator
from ._gridwidth import GridwidthValidator
from ._gridcolor import GridcolorValidator
from ._dtick import DtickValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._tick0.Tick0Validator",
"._showgrid.ShowgridValidator",
"._range.RangeValidator",
"._gridwidth.GridwidthValidator",
"._gridcolor.GridcolorValidator",
"._dtick.DtickValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/_spikemode.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>10-100
import _plotly_utils.basevalidators
class SpikemodeValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="spikemode", parent_name="layout.xaxis", **kwargs):
super(SpikemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
flags=kwargs.pop("flags", ["toaxis", "across", "marker"]),
role=kwargs.pop("role", "style"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_first_and_last.py | """
Note: includes tests for `last`
"""
import numpy as np
import pytest
from pandas import Series, date_range
import pandas._testing as tm
class TestFirst:
def test_first_subset(self):
rng = date_range("1/1/2000", "1/1/2010", freq="12h")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.first("10d")
assert len(result) == 20
rng = date_range("1/1/2000", "1/1/2010", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.first("10d")
assert len(result) == 10
result = ts.first("3M")
expected = ts[:"3/31/2000"]
tm.assert_series_equal(result, expected)
result = ts.first("21D")
expected = ts[:21]
tm.assert_series_equal(result, expected)
result = ts[:0].first("3M")
tm.assert_series_equal(result, ts[:0])
def test_first_raises(self):
# GH#20725
ser = Series("a b c".split())
msg = "'first' only supports a DatetimeIndex index"
with pytest.raises(TypeError, match=msg):
ser.first("1D")
def test_last_subset(self):
rng = date_range("1/1/2000", "1/1/2010", freq="12h")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.last("10d")
assert len(result) == 20
rng = date_range("1/1/2000", "1/1/2010", freq="D")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.last("10d")
assert len(result) == 10
result = ts.last("21D")
expected = ts["12/12/2009":]
tm.assert_series_equal(result, expected)
result = ts.last("21D")
expected = ts[-21:]
tm.assert_series_equal(result, expected)
result = ts[:0].last("3M")
tm.assert_series_equal(result, ts[:0])
def test_last_raises(self):
# GH#20725
ser = Series("a b c".split())
msg = "'last' only supports a DatetimeIndex index"
with pytest.raises(TypeError, match=msg):
ser.last("1D")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_pointcloud.py | from plotly.graph_objs import Pointcloud
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_setops.py | <filename>env/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_setops.py
import numpy as np
import pytest
import pandas as pd
from pandas import Int64Index, TimedeltaIndex, timedelta_range
import pandas._testing as tm
from pandas.tseries.offsets import Hour
class TestTimedeltaIndex:
def test_union(self):
i1 = timedelta_range("1day", periods=5)
i2 = timedelta_range("3day", periods=5)
result = i1.union(i2)
expected = timedelta_range("1day", periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = timedelta_range(start="1 day", periods=10, freq="D")
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_sort_false(self):
tdi = timedelta_range("1day", periods=5)
left = tdi[3:]
right = tdi[:3]
# Check that we are testing the desired code path
assert left._can_fast_union(right)
result = left.union(right)
tm.assert_index_equal(result, tdi)
result = left.union(right, sort=False)
expected = pd.TimedeltaIndex(["4 Days", "5 Days", "1 Days", "2 Day", "3 Days"])
tm.assert_index_equal(result, expected)
def test_union_coverage(self):
idx = TimedeltaIndex(["3d", "1d", "2d"])
ordered = TimedeltaIndex(idx.sort_values(), freq="infer")
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range("1 day", periods=4, freq="3H")
rng_b = timedelta_range("1 day", periods=4, freq="4H")
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(rng_a) | set(rng_b)))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(["1 day 15:19:49.695000"])
right = TimedeltaIndex(
["2 day 13:04:21.322000", "1 day 15:27:24.873000", "1 day 15:31:05.350000"]
)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(left) | set(right)))
tm.assert_index_equal(result, exp)
def test_union_freq_infer(self):
# When taking the union of two TimedeltaIndexes, we infer
# a freq even if the arguments don't have freq. This matches
# DatetimeIndex behavior.
tdi = pd.timedelta_range("1 Day", periods=5)
left = tdi[[0, 1, 3, 4]]
right = tdi[[2, 3, 1]]
assert left.freq is None
assert right.freq is None
result = left.union(right)
tm.assert_index_equal(result, tdi)
assert result.freq == "D"
def test_intersection_bug_1708(self):
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range("1 day", periods=4, freq="h")
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range("1 day 01:00:00", periods=3, freq="h")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_equal(self, sort):
# GH 24471 Test intersection outcome given the sort keyword
# for equal indicies intersection should return the original index
first = timedelta_range("1 day", periods=4, freq="h")
second = timedelta_range("1 day", periods=4, freq="h")
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize("period_1, period_2", [(0, 4), (4, 0)])
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_zero_length(self, period_1, period_2, sort):
# GH 24471 test for non overlap the intersection should be zero length
index_1 = timedelta_range("1 day", periods=period_1, freq="h")
index_2 = timedelta_range("1 day", periods=period_2, freq="h")
expected = timedelta_range("1 day", periods=0, freq="h")
result = index_1.intersection(index_2, sort=sort)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_zero_length_input_index(self, sort):
# GH 24966 test for 0-len intersections are copied
index_1 = timedelta_range("1 day", periods=0, freq="h")
index_2 = timedelta_range("1 day", periods=3, freq="h")
result = index_1.intersection(index_2, sort=sort)
assert index_1 is not result
assert index_2 is not result
tm.assert_copy(result, index_1)
@pytest.mark.parametrize(
"rng, expected",
# if target has the same name, it is preserved
[
(
timedelta_range("1 day", periods=5, freq="h", name="idx"),
timedelta_range("1 day", periods=4, freq="h", name="idx"),
),
# if target name is different, it will be reset
(
timedelta_range("1 day", periods=5, freq="h", name="other"),
timedelta_range("1 day", periods=4, freq="h", name=None),
),
# if no overlap exists return empty index
(
timedelta_range("1 day", periods=10, freq="h", name="idx")[5:],
TimedeltaIndex([], name="idx"),
),
],
)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection(self, rng, expected, sort):
# GH 4690 (with tz)
base = timedelta_range("1 day", periods=4, freq="h", name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
@pytest.mark.parametrize(
"rng, expected",
# part intersection works
[
(
TimedeltaIndex(["5 hour", "2 hour", "4 hour", "9 hour"], name="idx"),
TimedeltaIndex(["2 hour", "4 hour"], name="idx"),
),
# reordered part intersection
(
TimedeltaIndex(["2 hour", "5 hour", "5 hour", "1 hour"], name="other"),
TimedeltaIndex(["1 hour", "2 hour"], name=None),
),
# reveresed index
(
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")[
::-1
],
TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx"),
),
],
)
@pytest.mark.parametrize("sort", [None, False])
def test_intersection_non_monotonic(self, rng, expected, sort):
# 24471 non-monotonic
base = TimedeltaIndex(["1 hour", "2 hour", "4 hour", "3 hour"], name="idx")
result = base.intersection(rng, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert result.name == expected.name
# if reveresed order, frequency is still the same
if all(base == rng[::-1]) and sort is None:
assert isinstance(result.freq, Hour)
else:
assert result.freq is None
class TestTimedeltaIndexDifference:
@pytest.mark.parametrize("sort", [None, False])
def test_difference_freq(self, sort):
# GH14323: Difference of TimedeltaIndex should not preserve frequency
index = timedelta_range("0 days", "5 days", freq="D")
other = timedelta_range("1 days", "4 days", freq="D")
expected = TimedeltaIndex(["0 days", "5 days"], freq=None)
idx_diff = index.difference(other, sort)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["0 days", "1 days"], freq=None)
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
@pytest.mark.parametrize("sort", [None, False])
def test_difference_sort(self, sort):
index = pd.TimedeltaIndex(
["5 days", "3 days", "2 days", "4 days", "1 days", "0 days"]
)
other = timedelta_range("1 days", "4 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["5 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
other = timedelta_range("2 days", "5 days", freq="D")
idx_diff = index.difference(other, sort)
expected = TimedeltaIndex(["1 days", "0 days"], freq=None)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(idx_diff, expected)
tm.assert_attr_equal("freq", idx_diff, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/__init__.py | <gh_stars>1-10
import sys
if sys.version_info < (3, 7):
from ._yaxis import YaxisValidator
from ._xaxis import XaxisValidator
from ._width import WidthValidator
from ._waterfallmode import WaterfallmodeValidator
from ._waterfallgroupgap import WaterfallgroupgapValidator
from ._waterfallgap import WaterfallgapValidator
from ._violinmode import ViolinmodeValidator
from ._violingroupgap import ViolingroupgapValidator
from ._violingap import ViolingapValidator
from ._updatemenudefaults import UpdatemenudefaultsValidator
from ._updatemenus import UpdatemenusValidator
from ._uniformtext import UniformtextValidator
from ._uirevision import UirevisionValidator
from ._treemapcolorway import TreemapcolorwayValidator
from ._transition import TransitionValidator
from ._title import TitleValidator
from ._ternary import TernaryValidator
from ._template import TemplateValidator
from ._sunburstcolorway import SunburstcolorwayValidator
from ._spikedistance import SpikedistanceValidator
from ._sliderdefaults import SliderdefaultsValidator
from ._sliders import SlidersValidator
from ._showlegend import ShowlegendValidator
from ._shapedefaults import ShapedefaultsValidator
from ._shapes import ShapesValidator
from ._separators import SeparatorsValidator
from ._selectionrevision import SelectionrevisionValidator
from ._selectdirection import SelectdirectionValidator
from ._scene import SceneValidator
from ._radialaxis import RadialaxisValidator
from ._polar import PolarValidator
from ._plot_bgcolor import Plot_BgcolorValidator
from ._piecolorway import PiecolorwayValidator
from ._paper_bgcolor import Paper_BgcolorValidator
from ._orientation import OrientationValidator
from ._newshape import NewshapeValidator
from ._modebar import ModebarValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._margin import MarginValidator
from ._mapbox import MapboxValidator
from ._legend import LegendValidator
from ._imagedefaults import ImagedefaultsValidator
from ._images import ImagesValidator
from ._hovermode import HovermodeValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverdistance import HoverdistanceValidator
from ._hidesources import HidesourcesValidator
from ._hiddenlabelssrc import HiddenlabelssrcValidator
from ._hiddenlabels import HiddenlabelsValidator
from ._height import HeightValidator
from ._grid import GridValidator
from ._geo import GeoValidator
from ._funnelmode import FunnelmodeValidator
from ._funnelgroupgap import FunnelgroupgapValidator
from ._funnelgap import FunnelgapValidator
from ._funnelareacolorway import FunnelareacolorwayValidator
from ._font import FontValidator
from ._extendtreemapcolors import ExtendtreemapcolorsValidator
from ._extendsunburstcolors import ExtendsunburstcolorsValidator
from ._extendpiecolors import ExtendpiecolorsValidator
from ._extendfunnelareacolors import ExtendfunnelareacolorsValidator
from ._editrevision import EditrevisionValidator
from ._dragmode import DragmodeValidator
from ._direction import DirectionValidator
from ._datarevision import DatarevisionValidator
from ._colorway import ColorwayValidator
from ._colorscale import ColorscaleValidator
from ._coloraxis import ColoraxisValidator
from ._clickmode import ClickmodeValidator
from ._calendar import CalendarValidator
from ._boxmode import BoxmodeValidator
from ._boxgroupgap import BoxgroupgapValidator
from ._boxgap import BoxgapValidator
from ._barnorm import BarnormValidator
from ._barmode import BarmodeValidator
from ._bargroupgap import BargroupgapValidator
from ._bargap import BargapValidator
from ._autosize import AutosizeValidator
from ._annotationdefaults import AnnotationdefaultsValidator
from ._annotations import AnnotationsValidator
from ._angularaxis import AngularaxisValidator
from ._activeshape import ActiveshapeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yaxis.YaxisValidator",
"._xaxis.XaxisValidator",
"._width.WidthValidator",
"._waterfallmode.WaterfallmodeValidator",
"._waterfallgroupgap.WaterfallgroupgapValidator",
"._waterfallgap.WaterfallgapValidator",
"._violinmode.ViolinmodeValidator",
"._violingroupgap.ViolingroupgapValidator",
"._violingap.ViolingapValidator",
"._updatemenudefaults.UpdatemenudefaultsValidator",
"._updatemenus.UpdatemenusValidator",
"._uniformtext.UniformtextValidator",
"._uirevision.UirevisionValidator",
"._treemapcolorway.TreemapcolorwayValidator",
"._transition.TransitionValidator",
"._title.TitleValidator",
"._ternary.TernaryValidator",
"._template.TemplateValidator",
"._sunburstcolorway.SunburstcolorwayValidator",
"._spikedistance.SpikedistanceValidator",
"._sliderdefaults.SliderdefaultsValidator",
"._sliders.SlidersValidator",
"._showlegend.ShowlegendValidator",
"._shapedefaults.ShapedefaultsValidator",
"._shapes.ShapesValidator",
"._separators.SeparatorsValidator",
"._selectionrevision.SelectionrevisionValidator",
"._selectdirection.SelectdirectionValidator",
"._scene.SceneValidator",
"._radialaxis.RadialaxisValidator",
"._polar.PolarValidator",
"._plot_bgcolor.Plot_BgcolorValidator",
"._piecolorway.PiecolorwayValidator",
"._paper_bgcolor.Paper_BgcolorValidator",
"._orientation.OrientationValidator",
"._newshape.NewshapeValidator",
"._modebar.ModebarValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._margin.MarginValidator",
"._mapbox.MapboxValidator",
"._legend.LegendValidator",
"._imagedefaults.ImagedefaultsValidator",
"._images.ImagesValidator",
"._hovermode.HovermodeValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverdistance.HoverdistanceValidator",
"._hidesources.HidesourcesValidator",
"._hiddenlabelssrc.HiddenlabelssrcValidator",
"._hiddenlabels.HiddenlabelsValidator",
"._height.HeightValidator",
"._grid.GridValidator",
"._geo.GeoValidator",
"._funnelmode.FunnelmodeValidator",
"._funnelgroupgap.FunnelgroupgapValidator",
"._funnelgap.FunnelgapValidator",
"._funnelareacolorway.FunnelareacolorwayValidator",
"._font.FontValidator",
"._extendtreemapcolors.ExtendtreemapcolorsValidator",
"._extendsunburstcolors.ExtendsunburstcolorsValidator",
"._extendpiecolors.ExtendpiecolorsValidator",
"._extendfunnelareacolors.ExtendfunnelareacolorsValidator",
"._editrevision.EditrevisionValidator",
"._dragmode.DragmodeValidator",
"._direction.DirectionValidator",
"._datarevision.DatarevisionValidator",
"._colorway.ColorwayValidator",
"._colorscale.ColorscaleValidator",
"._coloraxis.ColoraxisValidator",
"._clickmode.ClickmodeValidator",
"._calendar.CalendarValidator",
"._boxmode.BoxmodeValidator",
"._boxgroupgap.BoxgroupgapValidator",
"._boxgap.BoxgapValidator",
"._barnorm.BarnormValidator",
"._barmode.BarmodeValidator",
"._bargroupgap.BargroupgapValidator",
"._bargap.BargapValidator",
"._autosize.AutosizeValidator",
"._annotationdefaults.AnnotationdefaultsValidator",
"._annotations.AnnotationsValidator",
"._angularaxis.AngularaxisValidator",
"._activeshape.ActiveshapeValidator",
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.