repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/io/orca.py | from ._orca import (
ensure_server,
shutdown_server,
validate_executable,
reset_status,
config,
status,
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_activeshape.py | import _plotly_utils.basevalidators
class ActiveshapeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="activeshape", parent_name="layout", **kwargs):
super(ActiveshapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Activeshape"),
data_docs=kwargs.pop(
"data_docs",
"""
fillcolor
Sets the color filling the active shape'
interior.
opacity
Sets the opacity of the active shape.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/plotly/chunked_requests.py | from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("plotly.chunked_requests")
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/doc/glossary.py | """
========
Glossary
========
.. glossary::
along an axis
Axes are defined for arrays with more than one dimension. A
2-dimensional array has two corresponding axes: the first running
vertically downwards across rows (axis 0), and the second running
horizontally across columns (axis 1).
Many operations can take place along one of these axes. For example,
we can sum each row of an array, in which case we operate along
columns, or axis 1::
>>> x = np.arange(12).reshape((3,4))
>>> x
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.sum(axis=1)
array([ 6, 22, 38])
array
A homogeneous container of numerical elements. Each element in the
array occupies a fixed amount of memory (hence homogeneous), and
can be a numerical element of a single type (such as float, int
or complex) or a combination (such as ``(float, int, float)``). Each
array has an associated data-type (or ``dtype``), which describes
the numerical type of its elements::
>>> x = np.array([1, 2, 3], float)
>>> x
array([ 1., 2., 3.])
>>> x.dtype # floating point number, 64 bits of memory per element
dtype('float64')
# More complicated data type: each array element is a combination of
# and integer and a floating point number
>>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)])
array([(1, 2.0), (3, 4.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Fast element-wise operations, called a :term:`ufunc`, operate on arrays.
array_like
Any sequence that can be interpreted as an ndarray. This includes
nested lists, tuples, scalars and existing arrays.
attribute
A property of an object that can be accessed using ``obj.attribute``,
e.g., ``shape`` is an attribute of an array::
>>> x = np.array([1, 2, 3])
>>> x.shape
(3,)
big-endian
When storing a multi-byte value in memory as a sequence of bytes, the
sequence addresses/sends/stores the most significant byte first (lowest
address) and the least significant byte last (highest address). Common in
micro-processors and used for transmission of data over network protocols.
BLAS
`Basic Linear Algebra Subprograms <https://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms>`_
broadcast
NumPy can do operations on arrays whose shapes are mismatched::
>>> x = np.array([1, 2])
>>> y = np.array([[3], [4]])
>>> x
array([1, 2])
>>> y
array([[3],
[4]])
>>> x + y
array([[4, 5],
[5, 6]])
See `numpy.doc.broadcasting` for more information.
C order
See `row-major`
column-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In column-major order, the leftmost index "varies the
fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the column-major order as::
[1, 4, 2, 5, 3, 6]
Column-major order is also known as the Fortran order, as the Fortran
programming language uses it.
decorator
An operator that transforms a function. For example, a ``log``
decorator may be defined to print debugging information upon
function execution::
>>> def log(f):
... def new_logging_func(*args, **kwargs):
... print("Logging call with parameters:", args, kwargs)
... return f(*args, **kwargs)
...
... return new_logging_func
Now, when we define a function, we can "decorate" it using ``log``::
>>> @log
... def add(a, b):
... return a + b
Calling ``add`` then yields:
>>> add(1, 2)
Logging call with parameters: (1, 2) {}
3
dictionary
Resembling a language dictionary, which provides a mapping between
words and descriptions thereof, a Python dictionary is a mapping
between two objects::
>>> x = {1: 'one', 'two': [1, 2]}
Here, `x` is a dictionary mapping keys to values, in this case
the integer 1 to the string "one", and the string "two" to
the list ``[1, 2]``. The values may be accessed using their
corresponding keys::
>>> x[1]
'one'
>>> x['two']
[1, 2]
Note that dictionaries are not stored in any specific order. Also,
most mutable (see *immutable* below) objects, such as lists, may not
be used as keys.
For more information on dictionaries, read the
`Python tutorial <https://docs.python.org/tutorial/>`_.
field
In a :term:`structured data type`, each sub-type is called a `field`.
The `field` has a name (a string), a type (any valid dtype), and
an optional `title`. See :ref:`arrays.dtypes`
Fortran order
See `column-major`
flattened
Collapsed to a one-dimensional array. See `numpy.ndarray.flatten`
for details.
homogenous
Describes a block of memory comprised of blocks, each block comprised of
items and of the same size, and blocks are interpreted in exactly the
same way. In the simplest case each block contains a single item, for
instance int32 or float64.
immutable
An object that cannot be modified after execution is called
immutable. Two common examples are strings and tuples.
instance
A class definition gives the blueprint for constructing an object::
>>> class House:
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
>>> h = House() # build a house
Now, ``h`` is called a ``House`` instance. An instance is therefore
a specific realisation of a class.
iterable
A sequence that allows "walking" (iterating) over items, typically
using a loop such as::
>>> x = [1, 2, 3]
>>> [item**2 for item in x]
[1, 4, 9]
It is often used in combination with ``enumerate``::
>>> keys = ['a','b','c']
>>> for n, k in enumerate(keys):
... print("Key %d: %s" % (n, k))
...
Key 0: a
Key 1: b
Key 2: c
itemsize
The size of the dtype element in bytes.
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
lists themselves::
>>> x = [2, 2.0, "two", [2, 2.0]]
The list `x` contains 4 items, each which can be accessed individually::
>>> x[2] # the string 'two'
'two'
>>> x[3] # a list, containing an integer 2 and a float 2.0
[2, 2.0]
It is also possible to select more than one item at a time,
using *slicing*::
>>> x[0:2] # or, equivalently, x[:2]
[2, 2.0]
In code, arrays are often conveniently expressed as nested lists::
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
For more information, read the section on lists in the `Python
tutorial <https://docs.python.org/tutorial/>`_. For a mapping
type (key-value), see *dictionary*.
little-endian
When storing a multi-byte value in memory as a sequence of bytes, the
sequence addresses/sends/stores the least significant byte first (lowest
address) and the most significant byte last (highest address). Common in
x86 processors.
mask
A boolean array, used to select only certain elements for an operation::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> mask = (x > 2)
>>> mask
array([False, False, False, True, True])
>>> x[mask] = -1
>>> x
array([ 0, 1, 2, -1, -1])
masked array
Array that suppressed values indicated by a mask::
>>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True])
>>> x
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
Masked arrays are often used when operating on arrays containing
missing or invalid entries.
matrix
A 2-dimensional ndarray that preserves its two-dimensional nature
throughout operations. It has certain special operations, such as ``*``
(matrix multiplication) and ``**`` (matrix power), defined::
>>> x = np.mat([[1, 2], [3, 4]])
>>> x
matrix([[1, 2],
[3, 4]])
>>> x**2
matrix([[ 7, 10],
[15, 22]])
method
A function associated with an object. For example, each ndarray has a
method called ``repeat``::
>>> x = np.array([1, 2, 3])
>>> x.repeat(2)
array([1, 1, 2, 2, 3, 3])
ndarray
See *array*.
record array
An :term:`ndarray` with :term:`structured data type` which has been
subclassed as ``np.recarray`` and whose dtype is of type ``np.record``,
making the fields of its data type to be accessible by attribute.
reference
If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore,
``a`` and ``b`` are different names for the same Python object.
row-major
A way to represent items in a N-dimensional array in the 1-dimensional
computer memory. In row-major order, the rightmost index "varies
the fastest": for example the array::
[[1, 2, 3],
[4, 5, 6]]
is represented in the row-major order as::
[1, 2, 3, 4, 5, 6]
Row-major order is also known as the C order, as the C programming
language uses it. New NumPy arrays are by default in row-major order.
self
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
>>> class Paintbrush:
... color = 'blue'
...
... def paint(self):
... print("Painting the city %s!" % self.color)
...
>>> p = Paintbrush()
>>> p.color = 'red'
>>> p.paint() # self refers to 'p'
Painting the city red!
slice
Used to select only certain elements from a sequence:
>>> x = range(5)
>>> x
[0, 1, 2, 3, 4]
>>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
[1, 2]
>>> x[1:5:2] # slice from 1 to 5, but skipping every second element
[1, 3]
>>> x[::-1] # slice a sequence in reverse
[4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
individually:
>>> x = np.array([[1, 2], [3, 4]])
>>> x
array([[1, 2],
[3, 4]])
>>> x[:, 1]
array([2, 4])
structure
See :term:`structured data type`
structured data type
A data type composed of other datatypes
subarray data type
A :term:`structured data type` may contain a :term:`ndarray` with its
own dtype and shape:
>>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))])
>>> np.zeros(3, dtype=dt)
array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])],
dtype=[('a', '<i4'), ('b', '<f4', (3,))])
title
In addition to field names, structured array fields may have an
associated :ref:`title <titles>` which is an alias to the name and is
commonly used for plotting.
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
changed. Similar to a list, it can be indexed and sliced::
>>> x = (1, 'one', [1, 2])
>>> x
(1, 'one', [1, 2])
>>> x[0]
1
>>> x[:2]
(1, 'one')
A useful concept is "tuple unpacking", which allows variables to
be assigned to the contents of a tuple::
>>> x, y = (1, 2)
>>> x, y = 1, 2
This is often used when a function returns multiple values:
>>> def return_many():
... return 1, 'alpha', None
>>> a, b, c = return_many()
>>> a, b, c
(1, 'alpha', None)
>>> a
1
>>> b
'alpha'
ufunc
Universal function. A fast element-wise, :term:`vectorized
<vectorization>` array operation. Examples include ``add``, ``sin`` and
``logical_or``.
vectorization
Optimizing a looping block by specialized code. In a traditional sense,
vectorization performs the same operation on multiple elements with
fixed strides between them via specialized hardware. Compilers know how
to take advantage of well-constructed loops to implement such
optimizations. NumPy uses :ref:`vectorization <whatis-vectorization>`
to mean any optimization via specialized code performing the same
operations on multiple elements, typically achieving speedups by
avoiding some of the overhead in looking up and converting the elements.
view
An array that does not own its data, but refers to another array's
data instead. For example, we may create a view that only shows
every second element of another array::
>>> x = np.arange(5)
>>> x
array([0, 1, 2, 3, 4])
>>> y = x[::2]
>>> y
array([0, 2, 4])
>>> x[0] = 3 # changing x changes y as well, since y is a view on x
>>> y
array([3, 2, 4])
wrapper
Python is a high-level (highly abstracted, or English-like) language.
This abstraction comes at a price in execution speed, and sometimes
it becomes necessary to use lower level languages to do fast
computations. A wrapper is code that provides a bridge between
high and the low level languages, allowing, e.g., Python to execute
code written in C or Fortran.
Examples include ctypes, SWIG and Cython (which wraps C and C++)
and f2py (which wraps Fortran).
"""
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/f2py/tests/test_crackfortran.py | import numpy as np
from numpy.testing import assert_array_equal
from . import util
from numpy.f2py import crackfortran
import tempfile
import textwrap
class TestNoSpace(util.F2PyTest):
# issue gh-15035: add handling for endsubroutine, endfunction with no space
# between "end" and the block name
code = """
subroutine subb(k)
real(8), intent(inout) :: k(:)
k=k+1
endsubroutine
subroutine subc(w,k)
real(8), intent(in) :: w(:)
real(8), intent(out) :: k(size(w))
k=w+1
endsubroutine
function t0(value)
character value
character t0
t0 = value
endfunction
"""
def test_module(self):
k = np.array([1, 2, 3], dtype=np.float64)
w = np.array([1, 2, 3], dtype=np.float64)
self.module.subb(k)
assert_array_equal(k, w + 1)
self.module.subc([w, k])
assert_array_equal(k, w + 1)
assert self.module.t0(23) == b'2'
class TestPublicPrivate():
def test_defaultPrivate(self, tmp_path):
f_path = tmp_path / "mod.f90"
with f_path.open('w') as ff:
ff.write(textwrap.dedent("""\
module foo
private
integer :: a
public :: setA
integer :: b
contains
subroutine setA(v)
integer, intent(in) :: v
a = v
end subroutine setA
end module foo
"""))
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
mod = mod[0]
assert 'private' in mod['vars']['a']['attrspec']
assert 'public' not in mod['vars']['a']['attrspec']
assert 'private' in mod['vars']['b']['attrspec']
assert 'public' not in mod['vars']['b']['attrspec']
assert 'private' not in mod['vars']['seta']['attrspec']
assert 'public' in mod['vars']['seta']['attrspec']
def test_defaultPublic(self, tmp_path):
f_path = tmp_path / "mod.f90"
with f_path.open('w') as ff:
ff.write(textwrap.dedent("""\
module foo
public
integer, private :: a
public :: setA
contains
subroutine setA(v)
integer, intent(in) :: v
a = v
end subroutine setA
end module foo
"""))
mod = crackfortran.crackfortran([str(f_path)])
assert len(mod) == 1
mod = mod[0]
assert 'private' in mod['vars']['a']['attrspec']
assert 'public' not in mod['vars']['a']['attrspec']
assert 'private' not in mod['vars']['seta']['attrspec']
assert 'public' in mod['vars']['seta']['attrspec']
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_splom.py | from plotly.graph_objs import Splom
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/window/test_dtypes.py | from itertools import product
import numpy as np
import pytest
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.base import DataError
# gh-12373 : rolling functions error on float32 data
# make sure rolling functions works for different dtypes
#
# NOTE that these are yielded tests and so _create_data
# is explicitly called.
#
# further note that we are only checking rolling for fully dtype
# compliance (though both expanding and ewm inherit)
class Dtype:
window = 2
funcs = {
"count": lambda v: v.count(),
"max": lambda v: v.max(),
"min": lambda v: v.min(),
"sum": lambda v: v.sum(),
"mean": lambda v: v.mean(),
"std": lambda v: v.std(),
"var": lambda v: v.var(),
"median": lambda v: v.median(),
}
def get_expects(self):
expects = {
"sr1": {
"count": Series([1, 2, 2, 2, 2], dtype="float64"),
"max": Series([np.nan, 1, 2, 3, 4], dtype="float64"),
"min": Series([np.nan, 0, 1, 2, 3], dtype="float64"),
"sum": Series([np.nan, 1, 3, 5, 7], dtype="float64"),
"mean": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
"std": Series([np.nan] + [np.sqrt(0.5)] * 4, dtype="float64"),
"var": Series([np.nan, 0.5, 0.5, 0.5, 0.5], dtype="float64"),
"median": Series([np.nan, 0.5, 1.5, 2.5, 3.5], dtype="float64"),
},
"sr2": {
"count": Series([1, 2, 2, 2, 2], dtype="float64"),
"max": Series([np.nan, 10, 8, 6, 4], dtype="float64"),
"min": Series([np.nan, 8, 6, 4, 2], dtype="float64"),
"sum": Series([np.nan, 18, 14, 10, 6], dtype="float64"),
"mean": Series([np.nan, 9, 7, 5, 3], dtype="float64"),
"std": Series([np.nan] + [np.sqrt(2)] * 4, dtype="float64"),
"var": Series([np.nan, 2, 2, 2, 2], dtype="float64"),
"median": Series([np.nan, 9, 7, 5, 3], dtype="float64"),
},
"sr3": {
"count": Series([1, 2, 2, 1, 1], dtype="float64"),
"max": Series([np.nan, 1, 2, np.nan, np.nan], dtype="float64"),
"min": Series([np.nan, 0, 1, np.nan, np.nan], dtype="float64"),
"sum": Series([np.nan, 1, 3, np.nan, np.nan], dtype="float64"),
"mean": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"),
"std": Series(
[np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2, dtype="float64"
),
"var": Series([np.nan, 0.5, 0.5, np.nan, np.nan], dtype="float64"),
"median": Series([np.nan, 0.5, 1.5, np.nan, np.nan], dtype="float64"),
},
"df": {
"count": DataFrame(
{0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])},
dtype="float64",
),
"max": DataFrame(
{0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},
dtype="float64",
),
"min": DataFrame(
{0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},
dtype="float64",
),
"sum": DataFrame(
{
0: Series([np.nan, 2, 6, 10, 14]),
1: Series([np.nan, 4, 8, 12, 16]),
},
dtype="float64",
),
"mean": DataFrame(
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
dtype="float64",
),
"std": DataFrame(
{
0: Series([np.nan] + [np.sqrt(2)] * 4),
1: Series([np.nan] + [np.sqrt(2)] * 4),
},
dtype="float64",
),
"var": DataFrame(
{0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])},
dtype="float64",
),
"median": DataFrame(
{0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},
dtype="float64",
),
},
}
return expects
def _create_dtype_data(self, dtype):
sr1 = Series(np.arange(5), dtype=dtype)
sr2 = Series(np.arange(10, 0, -2), dtype=dtype)
sr3 = sr1.copy()
sr3[3] = np.NaN
df = DataFrame(np.arange(10).reshape((5, 2)), dtype=dtype)
data = {"sr1": sr1, "sr2": sr2, "sr3": sr3, "df": df}
return data
def _create_data(self):
self.data = self._create_dtype_data(self.dtype)
self.expects = self.get_expects()
def test_dtypes(self):
self._create_data()
for f_name, d_name in product(self.funcs.keys(), self.data.keys()):
f = self.funcs[f_name]
d = self.data[d_name]
exp = self.expects[d_name][f_name]
self.check_dtypes(f, f_name, d, d_name, exp)
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
result = f(roll)
tm.assert_almost_equal(result, exp)
class TestDtype_object(Dtype):
dtype = object
class Dtype_integer(Dtype):
pass
class TestDtype_int8(Dtype_integer):
dtype = np.int8
class TestDtype_int16(Dtype_integer):
dtype = np.int16
class TestDtype_int32(Dtype_integer):
dtype = np.int32
class TestDtype_int64(Dtype_integer):
dtype = np.int64
class Dtype_uinteger(Dtype):
pass
class TestDtype_uint8(Dtype_uinteger):
dtype = np.uint8
class TestDtype_uint16(Dtype_uinteger):
dtype = np.uint16
class TestDtype_uint32(Dtype_uinteger):
dtype = np.uint32
class TestDtype_uint64(Dtype_uinteger):
dtype = np.uint64
class Dtype_float(Dtype):
pass
class TestDtype_float16(Dtype_float):
dtype = np.float16
class TestDtype_float32(Dtype_float):
dtype = np.float32
class TestDtype_float64(Dtype_float):
dtype = np.float64
class TestDtype_category(Dtype):
dtype = "category"
include_df = False
def _create_dtype_data(self, dtype):
sr1 = Series(range(5), dtype=dtype)
sr2 = Series(range(10, 0, -2), dtype=dtype)
data = {"sr1": sr1, "sr2": sr2}
return data
class DatetimeLike(Dtype):
def check_dtypes(self, f, f_name, d, d_name, exp):
roll = d.rolling(window=self.window)
if f_name == "count":
result = f(roll)
tm.assert_almost_equal(result, exp)
else:
msg = "No numeric types to aggregate"
with pytest.raises(DataError, match=msg):
f(roll)
class TestDtype_timedelta(DatetimeLike):
dtype = np.dtype("m8[ns]")
class TestDtype_datetime(DatetimeLike):
dtype = np.dtype("M8[ns]")
class TestDtype_datetime64UTC(DatetimeLike):
dtype = "datetime64[ns, UTC]"
def _create_data(self):
pytest.skip(
"direct creation of extension dtype "
"datetime64[ns, UTC] is not supported ATM"
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/volume/_slices.py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Slices(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume"
_path_str = "volume.slices"
_valid_props = {"x", "y", "z"}
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.slices.X`
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis x except
start and end.
locationssrc
Sets the source reference on Chart Studio Cloud
for locations .
show
Determines whether or not slice planes about
the x dimension are drawn.
Returns
-------
plotly.graph_objs.volume.slices.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.slices.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis y except
start and end.
locationssrc
Sets the source reference on Chart Studio Cloud
for locations .
show
Determines whether or not slice planes about
the y dimension are drawn.
Returns
-------
plotly.graph_objs.volume.slices.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.volume.slices.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis z except
start and end.
locationssrc
Sets the source reference on Chart Studio Cloud
for locations .
show
Determines whether or not slice planes about
the z dimension are drawn.
Returns
-------
plotly.graph_objs.volume.slices.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.volume.slices.X` instance
or dict with compatible properties
y
:class:`plotly.graph_objects.volume.slices.Y` instance
or dict with compatible properties
z
:class:`plotly.graph_objects.volume.slices.Z` instance
or dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Slices object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.volume.Slices`
x
:class:`plotly.graph_objects.volume.slices.X` instance
or dict with compatible properties
y
:class:`plotly.graph_objects.volume.slices.Y` instance
or dict with compatible properties
z
:class:`plotly.graph_objects.volume.slices.Z` instance
or dict with compatible properties
Returns
-------
Slices
"""
super(Slices, self).__init__("slices")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.Slices
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Slices`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/_annotations.py | <gh_stars>1000+
import _plotly_utils.basevalidators
class AnnotationsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="annotations", parent_name="layout.scene", **kwargs):
super(AnnotationsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Annotation"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the `text`
within the box. Has an effect only if `text`
spans two or more lines (i.e. `text` contains
one or more <br> HTML tags) or if an explicit
width is set to override the text width.
arrowcolor
Sets the color of the annotation arrow.
arrowhead
Sets the end annotation arrow head style.
arrowside
Sets the annotation arrow head position.
arrowsize
Sets the size of the end annotation arrow head,
relative to `arrowwidth`. A value of 1
(default) gives a head about 3x as wide as the
line.
arrowwidth
Sets the width (in px) of annotation arrow
line.
ax
Sets the x component of the arrow tail about
the arrow head (in pixels).
ay
Sets the y component of the arrow tail about
the arrow head (in pixels).
bgcolor
Sets the background color of the annotation.
bordercolor
Sets the color of the border enclosing the
annotation `text`.
borderpad
Sets the padding (in px) between the `text` and
the enclosing border.
borderwidth
Sets the width (in px) of the border enclosing
the annotation `text`.
captureevents
Determines whether the annotation text box
captures mouse move and click events, or allows
those events to pass through to data points in
the plot that may be behind the annotation. By
default `captureevents` is False unless
`hovertext` is provided. If you use the event
`plotly_clickannotation` without `hovertext`
you must explicitly enable `captureevents`.
font
Sets the annotation text font.
height
Sets an explicit height for the text box. null
(default) lets the text set the box height.
Taller text will be clipped.
hoverlabel
:class:`plotly.graph_objects.layout.scene.annot
ation.Hoverlabel` instance or dict with
compatible properties
hovertext
Sets text to appear when hovering over this
annotation. If omitted or blank, no hover label
will appear.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
opacity
Sets the opacity of the annotation (text +
arrow).
showarrow
Determines whether or not the annotation is
drawn with an arrow. If True, `text` is placed
near the arrow's tail. If False, `text` lines
up with the `x` and `y` provided.
standoff
Sets a distance, in pixels, to move the end
arrowhead away from the position it is pointing
at, for example to point at the edge of a
marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector,
in contrast to `xshift` / `yshift` which moves
everything by this amount.
startarrowhead
Sets the start annotation arrow head style.
startarrowsize
Sets the size of the start annotation arrow
head, relative to `arrowwidth`. A value of 1
(default) gives a head about 3x as wide as the
line.
startstandoff
Sets a distance, in pixels, to move the start
arrowhead away from the position it is pointing
at, for example to point at the edge of a
marker independent of zoom. Note that this
shortens the arrow from the `ax` / `ay` vector,
in contrast to `xshift` / `yshift` which moves
everything by this amount.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
text
Sets the text associated with this annotation.
Plotly uses a subset of HTML tags to do things
like newline (<br>), bold (<b></b>), italics
(<i></i>), hyperlinks (<a href='...'></a>).
Tags <em>, <sup>, <sub> <span> are also
supported.
textangle
Sets the angle at which the `text` is drawn
with respect to the horizontal.
valign
Sets the vertical alignment of the `text`
within the box. Has an effect only if an
explicit height is set to override the text
height.
visible
Determines whether or not this annotation is
visible.
width
Sets an explicit width for the text box. null
(default) lets the text set the box width.
Wider text will be clipped. There is no
automatic wrapping; use <br> to start a new
line.
x
Sets the annotation's x position.
xanchor
Sets the text box's horizontal position anchor
This anchor binds the `x` position to the
"left", "center" or "right" of the annotation.
For example, if `x` is set to 1, `xref` to
"paper" and `xanchor` to "right" then the
right-most portion of the annotation lines up
with the right-most edge of the plotting area.
If "auto", the anchor is equivalent to "center"
for data-referenced annotations or if there is
an arrow, whereas for paper-referenced with no
arrow, the anchor picked corresponds to the
closest side.
xshift
Shifts the position of the whole annotation and
arrow to the right (positive) or left
(negative) by this many pixels.
y
Sets the annotation's y position.
yanchor
Sets the text box's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the annotation.
For example, if `y` is set to 1, `yref` to
"paper" and `yanchor` to "top" then the top-
most portion of the annotation lines up with
the top-most edge of the plotting area. If
"auto", the anchor is equivalent to "middle"
for data-referenced annotations or if there is
an arrow, whereas for paper-referenced with no
arrow, the anchor picked corresponds to the
closest side.
yshift
Shifts the position of the whole annotation and
arrow up (positive) or down (negative) by this
many pixels.
z
Sets the annotation's z position.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scatterpolargl.py | from plotly.graph_objs import Scatterpolargl
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/treemap/pathbar/__init__.py | <gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._thickness import ThicknessValidator
from ._textfont import TextfontValidator
from ._side import SideValidator
from ._edgeshape import EdgeshapeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._thickness.ThicknessValidator",
"._textfont.TextfontValidator",
"._side.SideValidator",
"._edgeshape.EdgeshapeValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/arrays/categorical/test_replace.py | <gh_stars>100-1000
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.mark.parametrize(
"to_replace,value,expected,flip_categories",
[
# one-to-one
(1, 2, [2, 2, 3], False),
(1, 4, [4, 2, 3], False),
(4, 1, [1, 2, 3], False),
(5, 6, [1, 2, 3], False),
# many-to-one
([1], 2, [2, 2, 3], False),
([1, 2], 3, [3, 3, 3], False),
([1, 2], 4, [4, 4, 3], False),
((1, 2, 4), 5, [5, 5, 3], False),
((5, 6), 2, [1, 2, 3], False),
# many-to-many, handled outside of Categorical and results in separate dtype
([1], [2], [2, 2, 3], True),
([1, 4], [5, 2], [5, 2, 3], True),
# check_categorical sorts categories, which crashes on mixed dtypes
(3, "4", [1, 2, "4"], False),
([1, 2, "3"], "5", ["5", "5", 3], True),
],
)
def test_replace(to_replace, value, expected, flip_categories):
# GH 31720
stays_categorical = not isinstance(value, list)
s = pd.Series([1, 2, 3], dtype="category")
result = s.replace(to_replace, value)
expected = pd.Series(expected, dtype="category")
s.replace(to_replace, value, inplace=True)
if flip_categories:
expected = expected.cat.set_categories(expected.cat.categories[::-1])
if not stays_categorical:
# the replace call loses categorical dtype
expected = pd.Series(np.asarray(expected))
tm.assert_series_equal(
expected, result, check_category_order=False,
)
tm.assert_series_equal(
expected, s, check_category_order=False,
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/indicator/gauge/__init__.py | import sys
if sys.version_info < (3, 7):
from ._axis import Axis
from ._bar import Bar
from ._step import Step
from ._threshold import Threshold
from . import axis
from . import bar
from . import step
from . import threshold
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".axis", ".bar", ".step", ".threshold"],
["._axis.Axis", "._bar.Bar", "._step.Step", "._threshold.Threshold"],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_funnelarea.py | from plotly.graph_objs import Funnelarea
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/plotly/figure_factory/__init__.py | from __future__ import absolute_import
from plotly import optional_imports
# Require that numpy exists for figure_factory
np = optional_imports.get_module("numpy")
if np is None:
raise ImportError(
"""\
The figure factory module requires the numpy package"""
)
from plotly.figure_factory._2d_density import create_2d_density
from plotly.figure_factory._annotated_heatmap import create_annotated_heatmap
from plotly.figure_factory._bullet import create_bullet
from plotly.figure_factory._candlestick import create_candlestick
from plotly.figure_factory._dendrogram import create_dendrogram
from plotly.figure_factory._distplot import create_distplot
from plotly.figure_factory._facet_grid import create_facet_grid
from plotly.figure_factory._gantt import create_gantt
from plotly.figure_factory._ohlc import create_ohlc
from plotly.figure_factory._quiver import create_quiver
from plotly.figure_factory._scatterplot import create_scatterplotmatrix
from plotly.figure_factory._streamline import create_streamline
from plotly.figure_factory._table import create_table
from plotly.figure_factory._trisurf import create_trisurf
from plotly.figure_factory._violin import create_violin
if optional_imports.get_module("pandas") is not None:
from plotly.figure_factory._county_choropleth import create_choropleth
from plotly.figure_factory._hexbin_mapbox import create_hexbin_mapbox
else:
def create_choropleth(*args, **kwargs):
raise ImportError("Please install pandas to use `create_choropleth`")
def create_hexbin_mapbox(*args, **kwargs):
raise ImportError("Please install pandas to use `create_hexbin_mapbox`")
if optional_imports.get_module("skimage") is not None:
from plotly.figure_factory._ternary_contour import create_ternary_contour
else:
def create_ternary_contour(*args, **kwargs):
raise ImportError("Please install scikit-image to use `create_ternary_contour`")
__all__ = [
"create_2d_density",
"create_annotated_heatmap",
"create_bullet",
"create_candlestick",
"create_choropleth",
"create_dendrogram",
"create_distplot",
"create_facet_grid",
"create_gantt",
"create_hexbin_mapbox",
"create_ohlc",
"create_quiver",
"create_scatterplotmatrix",
"create_streamline",
"create_table",
"create_ternary_contour",
"create_trisurf",
"create_violin",
]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/xaxis/_rangebreak.py | from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Rangebreak(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.xaxis"
_path_str = "layout.xaxis.rangebreak"
_valid_props = {
"bounds",
"dvalue",
"enabled",
"name",
"pattern",
"templateitemname",
"values",
}
# bounds
# ------
@property
def bounds(self):
"""
Sets the lower and upper bounds of this axis rangebreak. Can be
used with `pattern`.
The 'bounds' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'bounds[0]' property accepts values of any type
(1) The 'bounds[1]' property accepts values of any type
Returns
-------
list
"""
return self["bounds"]
@bounds.setter
def bounds(self, val):
self["bounds"] = val
# dvalue
# ------
@property
def dvalue(self):
"""
Sets the size of each `values` item. The default is one day in
milliseconds.
The 'dvalue' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["dvalue"]
@dvalue.setter
def dvalue(self, val):
self["dvalue"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether this axis rangebreak is enabled or disabled.
Please note that `rangebreaks` only work for "date" axis type.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# pattern
# -------
@property
def pattern(self):
"""
Determines a pattern on the time line that generates breaks. If
*day of week* - days of the week in English e.g. 'Sunday' or
`sun` (matching is case-insensitive and considers only the
first three characters), as well as Sunday-based integers
between 0 and 6. If "hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info. Examples: - { pattern:
'day of week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8] } breaks from
5pm to 8am (i.e. skips non-work hours).
The 'pattern' property is an enumeration that may be specified as:
- One of the following enumeration values:
['day of week', 'hour', '']
Returns
-------
Any
"""
return self["pattern"]
@pattern.setter
def pattern(self, val):
self["pattern"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# values
# ------
@property
def values(self):
"""
Sets the coordinate values corresponding to the rangebreaks. An
alternative to `bounds`. Use `dvalue` to set the size of the
values along the axis.
The 'values' property is an info array that may be specified as:
* a list of elements where:
The 'values[i]' property accepts values of any type
Returns
-------
list
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The default is one
day in milliseconds.
enabled
Determines whether this axis rangebreak is enabled or
disabled. Please note that `rangebreaks` only work for
"date" axis type.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pattern
Determines a pattern on the time line that generates
breaks. If *day of week* - days of the week in English
e.g. 'Sunday' or `sun` (matching is case-insensitive
and considers only the first three characters), as well
as Sunday-based integers between 0 and 6. If "hour" -
hour (24-hour clock) as decimal numbers between 0 and
24. for more info. Examples: - { pattern: 'day of
week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips
the weekends). - { pattern: 'hour', bounds: [17, 8] }
breaks from 5pm to 8am (i.e. skips non-work hours).
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use `dvalue`
to set the size of the values along the axis.
"""
def __init__(
self,
arg=None,
bounds=None,
dvalue=None,
enabled=None,
name=None,
pattern=None,
templateitemname=None,
values=None,
**kwargs
):
"""
Construct a new Rangebreak object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.xaxis.Rangebreak`
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The default is one
day in milliseconds.
enabled
Determines whether this axis rangebreak is enabled or
disabled. Please note that `rangebreaks` only work for
"date" axis type.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
pattern
Determines a pattern on the time line that generates
breaks. If *day of week* - days of the week in English
e.g. 'Sunday' or `sun` (matching is case-insensitive
and considers only the first three characters), as well
as Sunday-based integers between 0 and 6. If "hour" -
hour (24-hour clock) as decimal numbers between 0 and
24. for more info. Examples: - { pattern: 'day of
week', bounds: [6, 1] } or simply { bounds: ['sat',
'mon'] } breaks from Saturday to Monday (i.e. skips
the weekends). - { pattern: 'hour', bounds: [17, 8] }
breaks from 5pm to 8am (i.e. skips non-work hours).
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use `dvalue`
to set the size of the values along the axis.
Returns
-------
Rangebreak
"""
super(Rangebreak, self).__init__("rangebreaks")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.xaxis.Rangebreak
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.xaxis.Rangebreak`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bounds", None)
_v = bounds if bounds is not None else _v
if _v is not None:
self["bounds"] = _v
_v = arg.pop("dvalue", None)
_v = dvalue if dvalue is not None else _v
if _v is not None:
self["dvalue"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("pattern", None)
_v = pattern if pattern is not None else _v
if _v is not None:
self["pattern"] = _v
_v = arg.pop("templateitemname", None)
_v = templateitemname if templateitemname is not None else _v
if _v is not None:
self["templateitemname"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/api/indexers/__init__.py | <filename>env/lib/python3.8/site-packages/pandas/api/indexers/__init__.py
"""
Public API for Rolling Window Indexers.
"""
from pandas.core.indexers import check_array_indexer
from pandas.core.window.indexers import BaseIndexer
__all__ = ["check_array_indexer", "BaseIndexer"]
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/extension/arrow/arrays.py | """
Rudimentary Apache Arrow-backed ExtensionArray.
At the moment, just a boolean array / type is implemented.
Eventually, we'll want to parametrize the type and support
multiple dtypes. Not all methods are implemented yet, and the
current implementation is not efficient.
"""
import copy
import itertools
import operator
from typing import Type
import numpy as np
import pyarrow as pa
import pandas as pd
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
take,
)
@register_extension_dtype
class ArrowBoolDtype(ExtensionDtype):
type = np.bool_
kind = "b"
name = "arrow_bool"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type["ArrowBoolArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowBoolArray
@property
def _is_boolean(self) -> bool:
return True
@register_extension_dtype
class ArrowStringDtype(ExtensionDtype):
type = str
kind = "U"
name = "arrow_string"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type["ArrowStringArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowStringArray
class ArrowExtensionArray(ExtensionArray):
@classmethod
def from_scalars(cls, values):
arr = pa.chunked_array([pa.array(np.asarray(values))])
return cls(arr)
@classmethod
def from_array(cls, arr):
assert isinstance(arr, pa.Array)
return cls(pa.chunked_array([arr]))
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls.from_scalars(scalars)
def __repr__(self):
return f"{type(self).__name__}({repr(self._data)})"
def __getitem__(self, item):
if pd.api.types.is_scalar(item):
return self._data.to_pandas()[item]
else:
vals = self._data.to_pandas()[item]
return type(self).from_scalars(vals)
def __len__(self):
return len(self._data)
def astype(self, dtype, copy=True):
# needed to fix this astype for the Series constructor.
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
if copy:
return self.copy()
return self
return super().astype(dtype, copy)
@property
def dtype(self):
return self._dtype
def _boolean_op(self, other, op):
if not isinstance(other, type(self)):
raise NotImplementedError()
result = op(np.array(self._data), np.array(other._data))
return ArrowBoolArray(
pa.chunked_array([pa.array(result, mask=pd.isna(self._data.to_pandas()))])
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self._boolean_op(other, operator.eq)
def __and__(self, other):
return self._boolean_op(other, operator.and_)
def __or__(self, other):
return self._boolean_op(other, operator.or_)
@property
def nbytes(self):
return sum(
x.size
for chunk in self._data.chunks
for x in chunk.buffers()
if x is not None
)
def isna(self):
nas = pd.isna(self._data.to_pandas())
return type(self).from_scalars(nas)
def take(self, indices, allow_fill=False, fill_value=None):
data = self._data.to_pandas()
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
def copy(self):
return type(self)(copy.copy(self._data))
@classmethod
def _concat_same_type(cls, to_concat):
chunks = list(itertools.chain.from_iterable(x._data.chunks for x in to_concat))
arr = pa.chunked_array(chunks)
return cls(arr)
def __invert__(self):
return type(self).from_scalars(~self._data.to_pandas())
def _reduce(self, name: str, skipna: bool = True, **kwargs):
if skipna:
arr = self[~self.isna()]
else:
arr = self
try:
op = getattr(arr, name)
except AttributeError as err:
raise TypeError from err
return op(**kwargs)
def any(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().any())
def all(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().all())
class ArrowBoolArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.bool_()
self._data = values
self._dtype = ArrowBoolDtype()
class ArrowStringArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.string()
self._data = values
self._dtype = ArrowStringDtype()
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/optional_imports.py | from _plotly_utils.optional_imports import get_module
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_contourcarpet.py | <gh_stars>1000+
from plotly.graph_objs import Contourcarpet
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/_style.py | import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="style", parent_name="layout.mapbox", **kwargs):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
values=kwargs.pop(
"values",
[
"basic",
"streets",
"outdoors",
"light",
"dark",
"satellite",
"satellite-streets",
"open-street-map",
"white-bg",
"carto-positron",
"carto-darkmatter",
"stamen-terrain",
"stamen-toner",
"stamen-watercolor",
],
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/io/gcs.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1-10
""" GCS support for remote file interactivity """
from pandas.compat._optional import import_optional_dependency
gcsfs = import_optional_dependency(
"gcsfs", extra="The gcsfs library is required to handle GCS files"
)
def get_fs():
return gcsfs.GCSFileSystem()
def get_filepath_or_buffer(
filepath_or_buffer, encoding=None, compression=None, mode=None
):
if mode is None:
mode = "rb"
fs = get_fs()
filepath_or_buffer = fs.open(filepath_or_buffer, mode)
return filepath_or_buffer, None, compression, True
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/treemap/marker/_depthfade.py | import _plotly_utils.basevalidators
class DepthfadeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="depthfade", parent_name="treemap.marker", **kwargs):
super(DepthfadeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", [True, False, "reversed"]),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_treemap.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_treemap.py
from plotly.graph_objs import Treemap
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/lib/_version.py | <gh_stars>1000+
"""Utility to compare (NumPy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
from __future__ import division, absolute_import, print_function
import re
from numpy.compat import basestring
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
NumPy has the following versioning scheme (numbers given are examples; they
can be > 9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
.. versionadded:: 1.9.0
Parameters
----------
vstring : str
NumPy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
>>> # skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
Traceback (most recent call last):
...
ValueError: Not a valid numpy version string
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (basestring, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, basestring):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/candlestick/_line.py | import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="candlestick", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
width
Sets the width (in px) of line bounding the
box(es). Note that this style setting can also
be set per direction via
`increasing.line.width` and
`decreasing.line.width`.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/_layerdefaults.py | import _plotly_utils.basevalidators
class LayerdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="layerdefaults", parent_name="layout.mapbox", **kwargs
):
super(LayerdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Layer"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/surface/contours/y/_project.py | import _plotly_utils.basevalidators
class ProjectValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="project", parent_name="surface.contours.y", **kwargs
):
super(ProjectValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Project"),
data_docs=kwargs.pop(
"data_docs",
"""
x
Determines whether or not these contour lines
are projected on the x plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
y
Determines whether or not these contour lines
are projected on the y plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
z
Determines whether or not these contour lines
are projected on the z plane. If `highlight` is
set to True (the default), the projected lines
are shown on hover. If `show` is set to True,
the projected lines are shown in permanence.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/express/_imshow.py | <reponame>acrucetta/Chicago_COVI_WebApp
import plotly.graph_objs as go
from _plotly_utils.basevalidators import ColorscaleValidator
from ._core import apply_default_cascade
import numpy as np
try:
import xarray
xarray_imported = True
except ImportError:
xarray_imported = False
_float_types = []
# Adapted from skimage.util.dtype
_integer_types = (
np.byte,
np.ubyte, # 8 bits
np.short,
np.ushort, # 16 bits
np.intc,
np.uintc, # 16 or 32 or 64 bits
np.int_,
np.uint, # 32 or 64 bits
np.longlong,
np.ulonglong,
) # 64 bits
_integer_ranges = {t: (np.iinfo(t).min, np.iinfo(t).max) for t in _integer_types}
def _vectorize_zvalue(z):
if z is None:
return z
elif np.isscalar(z):
return [z] * 3 + [1]
elif len(z) == 1:
return list(z) * 3 + [1]
elif len(z) == 3:
return list(z) + [1]
elif len(z) == 4:
return z
else:
raise ValueError(
"zmax can be a scalar, or an iterable of length 1, 3 or 4. "
"A value of %s was passed for zmax." % str(z)
)
def _infer_zmax_from_type(img):
dt = img.dtype.type
rtol = 1.05
if dt in _integer_types:
return _integer_ranges[dt][1]
else:
im_max = img[np.isfinite(img)].max()
if im_max <= 1 * rtol:
return 1
elif im_max <= 255 * rtol:
return 255
elif im_max <= 65535 * rtol:
return 65535
else:
return 2 ** 32
def imshow(
img,
zmin=None,
zmax=None,
origin=None,
labels={},
x=None,
y=None,
color_continuous_scale=None,
color_continuous_midpoint=None,
range_color=None,
title=None,
template=None,
width=None,
height=None,
aspect=None,
):
"""
Display an image, i.e. data on a 2D regular raster.
Parameters
----------
img: array-like image, or xarray
The image data. Supported array shapes are
- (M, N): an image with scalar data. The data is visualized
using a colormap.
- (M, N, 3): an image with RGB values.
- (M, N, 4): an image with RGBA values, i.e. including transparency.
zmin, zmax : scalar or iterable, optional
zmin and zmax define the scalar range that the colormap covers. By default,
zmin and zmax correspond to the min and max values of the datatype for integer
datatypes (ie [0-255] for uint8 images, [0, 65535] for uint16 images, etc.). For
a multichannel image of floats, the max of the image is computed and zmax is the
smallest power of 256 (1, 255, 65535) greater than this max value,
with a 5% tolerance. For a single-channel image, the max of the image is used.
Overridden by range_color.
origin : str, 'upper' or 'lower' (default 'upper')
position of the [0, 0] pixel of the image array, in the upper left or lower left
corner. The convention 'upper' is typically used for matrices and images.
labels : dict with str keys and str values (default `{}`)
Sets names used in the figure for axis titles (keys ``x`` and ``y``),
colorbar title and hoverlabel (key ``color``). The values should correspond
to the desired label to be displayed. If ``img`` is an xarray, dimension
names are used for axis titles, and long name for the colorbar title
(unless overridden in ``labels``). Possible keys are: x, y, and color.
x, y: list-like, optional
x and y are used to label the axes of single-channel heatmap visualizations and
their lengths must match the lengths of the second and first dimensions of the
img argument. They are auto-populated if the input is an xarray.
color_continuous_scale : str or list of str
colormap used to map scalar data to colors (for a 2D image). This parameter is
not used for RGB or RGBA images. If a string is provided, it should be the name
of a known color scale, and if a list is provided, it should be a list of CSS-
compatible colors.
color_continuous_midpoint : number
If set, computes the bounds of the continuous color scale to have the desired
midpoint. Overridden by range_color or zmin and zmax.
range_color : list of two numbers
If provided, overrides auto-scaling on the continuous color scale, including
overriding `color_continuous_midpoint`. Also overrides zmin and zmax. Used only
for single-channel images.
title : str
The figure title.
template : str or dict or plotly.graph_objects.layout.Template instance
The figure template name or definition.
width : number
The figure width in pixels.
height: number
The figure height in pixels.
aspect: 'equal', 'auto', or None
- 'equal': Ensures an aspect ratio of 1 or pixels (square pixels)
- 'auto': The axes is kept fixed and the aspect ratio of pixels is
adjusted so that the data fit in the axes. In general, this will
result in non-square pixels.
- if None, 'equal' is used for numpy arrays and 'auto' for xarrays
(which have typically heterogeneous coordinates)
Returns
-------
fig : graph_objects.Figure containing the displayed image
See also
--------
plotly.graph_objects.Image : image trace
plotly.graph_objects.Heatmap : heatmap trace
Notes
-----
In order to update and customize the returned figure, use
`go.Figure.update_traces` or `go.Figure.update_layout`.
If an xarray is passed, dimensions names and coordinates are used for
axes labels and ticks.
"""
args = locals()
apply_default_cascade(args)
labels = labels.copy()
if xarray_imported and isinstance(img, xarray.DataArray):
y_label, x_label = img.dims[0], img.dims[1]
# np.datetime64 is not handled correctly by go.Heatmap
for ax in [x_label, y_label]:
if np.issubdtype(img.coords[ax].dtype, np.datetime64):
img.coords[ax] = img.coords[ax].astype(str)
if x is None:
x = img.coords[x_label]
if y is None:
y = img.coords[y_label]
if aspect is None:
aspect = "auto"
if labels.get("x", None) is None:
labels["x"] = x_label
if labels.get("y", None) is None:
labels["y"] = y_label
if labels.get("color", None) is None:
labels["color"] = xarray.plot.utils.label_from_attrs(img)
labels["color"] = labels["color"].replace("\n", "<br>")
else:
if labels.get("x", None) is None:
labels["x"] = ""
if labels.get("y", None) is None:
labels["y"] = ""
if labels.get("color", None) is None:
labels["color"] = ""
if aspect is None:
aspect = "equal"
img = np.asanyarray(img)
# Cast bools to uint8 (also one byte)
if img.dtype == np.bool:
img = 255 * img.astype(np.uint8)
# For 2d data, use Heatmap trace
if img.ndim == 2:
if y is not None and img.shape[0] != len(y):
raise ValueError(
"The length of the y vector must match the length of the first "
+ "dimension of the img matrix."
)
if x is not None and img.shape[1] != len(x):
raise ValueError(
"The length of the x vector must match the length of the second "
+ "dimension of the img matrix."
)
trace = go.Heatmap(x=x, y=y, z=img, coloraxis="coloraxis1")
autorange = True if origin == "lower" else "reversed"
layout = dict(yaxis=dict(autorange=autorange))
if aspect == "equal":
layout["xaxis"] = dict(scaleanchor="y", constrain="domain")
layout["yaxis"]["constrain"] = "domain"
colorscale_validator = ColorscaleValidator("colorscale", "imshow")
if zmin is not None and zmax is None:
zmax = img.max()
if zmax is not None and zmin is None:
zmin = img.min()
range_color = range_color or [zmin, zmax]
layout["coloraxis1"] = dict(
colorscale=colorscale_validator.validate_coerce(
args["color_continuous_scale"]
),
cmid=color_continuous_midpoint,
cmin=range_color[0],
cmax=range_color[1],
)
if labels["color"]:
layout["coloraxis1"]["colorbar"] = dict(title_text=labels["color"])
# For 2D+RGB data, use Image trace
elif img.ndim == 3 and img.shape[-1] in [3, 4]:
if zmax is None and img.dtype is not np.uint8:
zmax = _infer_zmax_from_type(img)
zmin, zmax = _vectorize_zvalue(zmin), _vectorize_zvalue(zmax)
trace = go.Image(z=img, zmin=zmin, zmax=zmax)
layout = {}
if origin == "lower":
layout["yaxis"] = dict(autorange=True)
else:
raise ValueError(
"px.imshow only accepts 2D single-channel, RGB or RGBA images. "
"An image of shape %s was provided" % str(img.shape)
)
layout_patch = dict()
for attr_name in ["height", "width"]:
if args[attr_name]:
layout_patch[attr_name] = args[attr_name]
if args["title"]:
layout_patch["title_text"] = args["title"]
elif args["template"].layout.margin.t is None:
layout_patch["margin"] = {"t": 60}
fig = go.Figure(data=trace, layout=layout)
fig.update_layout(layout_patch)
fig.update_traces(
hovertemplate="%s: %%{x}<br>%s: %%{y}<br>%s: %%{z}<extra></extra>"
% (labels["x"] or "x", labels["y"] or "y", labels["color"] or "color",)
)
if labels["x"]:
fig.update_xaxes(title_text=labels["x"])
if labels["y"]:
fig.update_yaxes(title_text=labels["y"])
fig.update_layout(template=args["template"], overwrite=True)
return fig
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/io/_renderers.py | <reponame>acrucetta/Chicago_COVI_WebApp
from __future__ import absolute_import, division
import textwrap
from copy import copy
import six
import os
from distutils.version import LooseVersion
from plotly import optional_imports
from plotly.io._base_renderers import (
MimetypeRenderer,
ExternalRenderer,
PlotlyRenderer,
NotebookRenderer,
KaggleRenderer,
AzureRenderer,
ColabRenderer,
JsonRenderer,
PngRenderer,
JpegRenderer,
SvgRenderer,
PdfRenderer,
BrowserRenderer,
IFrameRenderer,
SphinxGalleryHtmlRenderer,
SphinxGalleryOrcaRenderer,
CoCalcRenderer,
DatabricksRenderer,
)
from plotly.io._utils import validate_coerce_fig_to_dict
ipython = optional_imports.get_module("IPython")
ipython_display = optional_imports.get_module("IPython.display")
nbformat = optional_imports.get_module("nbformat")
# Renderer configuration class
# -----------------------------
class RenderersConfig(object):
"""
Singleton object containing the current renderer configurations
"""
def __init__(self):
self._renderers = {}
self._default_name = None
self._default_renderers = []
self._render_on_display = False
self._to_activate = []
# ### Magic methods ###
# Make this act as a dict of renderers
def __len__(self):
return len(self._renderers)
def __contains__(self, item):
return item in self._renderers
def __iter__(self):
return iter(self._renderers)
def __getitem__(self, item):
renderer = self._renderers[item]
return renderer
def __setitem__(self, key, value):
if not isinstance(value, (MimetypeRenderer, ExternalRenderer)):
raise ValueError(
"""\
Renderer must be a subclass of MimetypeRenderer or ExternalRenderer.
Received value with type: {typ}""".format(
typ=type(value)
)
)
self._renderers[key] = value
def __delitem__(self, key):
# Remove template
del self._renderers[key]
# Check if we need to remove it as the default
if self._default == key:
self._default = None
def keys(self):
return self._renderers.keys()
def items(self):
return self._renderers.items()
def update(self, d={}, **kwargs):
"""
Update one or more renderers from a dict or from input keyword
arguments.
Parameters
----------
d: dict
Dictionary from renderer names to new renderer objects.
kwargs
Named argument value pairs where the name is a renderer name
and the value is a new renderer object
"""
for k, v in dict(d, **kwargs).items():
self[k] = v
# ### Properties ###
@property
def default(self):
"""
The default renderer, or None if no there is no default
If not None, the default renderer is used to render
figures when the `plotly.io.show` function is called on a Figure.
If `plotly.io.renderers.render_on_display` is True, then the default
renderer will also be used to display Figures automatically when
displayed in the Jupyter Notebook
Multiple renderers may be registered by separating their names with
'+' characters. For example, to specify rendering compatible with
the classic Jupyter Notebook, JupyterLab, and PDF export:
>>> import plotly.io as pio
>>> pio.renderers.default = 'notebook+jupyterlab+pdf'
The names of available renderers may be retrieved with:
>>> import plotly.io as pio
>>> list(pio.renderers)
Returns
-------
str
"""
return self._default_name
@default.setter
def default(self, value):
# Handle None
if not value:
# _default_name should always be a string so we can do
# pio.renderers.default.split('+')
self._default_name = ""
self._default_renderers = []
return
# Store defaults name and list of renderer(s)
renderer_names = self._validate_coerce_renderers(value)
self._default_name = value
self._default_renderers = [self[name] for name in renderer_names]
# Register renderers for activation before their next use
self._to_activate = list(self._default_renderers)
@property
def render_on_display(self):
"""
If True, the default mimetype renderers will be used to render
figures when they are displayed in an IPython context.
Returns
-------
bool
"""
return self._render_on_display
@render_on_display.setter
def render_on_display(self, val):
self._render_on_display = bool(val)
def _activate_pending_renderers(self, cls=object):
"""
Activate all renderers that are waiting in the _to_activate list
Parameters
----------
cls
Only activate renders that are subclasses of this class
"""
to_activate_with_cls = [
r for r in self._to_activate if cls and isinstance(r, cls)
]
while to_activate_with_cls:
# Activate renderers from left to right so that right-most
# renderers take precedence
renderer = to_activate_with_cls.pop(0)
renderer.activate()
self._to_activate = [
r for r in self._to_activate if not (cls and isinstance(r, cls))
]
def _validate_coerce_renderers(self, renderers_string):
"""
Input a string and validate that it contains the names of one or more
valid renderers separated on '+' characters. If valid, return
a list of the renderer names
Parameters
----------
renderers_string: str
Returns
-------
list of str
"""
# Validate value
if not isinstance(renderers_string, six.string_types):
raise ValueError("Renderer must be specified as a string")
renderer_names = renderers_string.split("+")
invalid = [name for name in renderer_names if name not in self]
if invalid:
raise ValueError(
"""
Invalid named renderer(s) received: {}""".format(
str(invalid)
)
)
return renderer_names
def __repr__(self):
return """\
Renderers configuration
-----------------------
Default renderer: {default}
Available renderers:
{available}
""".format(
default=repr(self.default), available=self._available_renderers_str()
)
def _available_renderers_str(self):
"""
Return nicely wrapped string representation of all
available renderer names
"""
available = "\n".join(
textwrap.wrap(
repr(list(self)),
width=79 - 8,
initial_indent=" " * 8,
subsequent_indent=" " * 9,
)
)
return available
def _build_mime_bundle(self, fig_dict, renderers_string=None, **kwargs):
"""
Build a mime bundle dict containing a kev/value pair for each
MimetypeRenderer specified in either the default renderer string,
or in the supplied renderers_string argument.
Note that this method skips any renderers that are not subclasses
of MimetypeRenderer.
Parameters
----------
fig_dict: dict
Figure dictionary
renderers_string: str or None (default None)
Renderer string to process rather than the current default
renderer string
Returns
-------
dict
"""
if renderers_string:
renderer_names = self._validate_coerce_renderers(renderers_string)
renderers_list = [self[name] for name in renderer_names]
# Activate these non-default renderers
for renderer in renderers_list:
if isinstance(renderer, MimetypeRenderer):
renderer.activate()
else:
# Activate any pending default renderers
self._activate_pending_renderers(cls=MimetypeRenderer)
renderers_list = self._default_renderers
bundle = {}
for renderer in renderers_list:
if isinstance(renderer, MimetypeRenderer):
renderer = copy(renderer)
for k, v in kwargs.items():
if hasattr(renderer, k):
setattr(renderer, k, v)
bundle.update(renderer.to_mimebundle(fig_dict))
return bundle
def _perform_external_rendering(self, fig_dict, renderers_string=None, **kwargs):
"""
Perform external rendering for each ExternalRenderer specified
in either the default renderer string, or in the supplied
renderers_string argument.
Note that this method skips any renderers that are not subclasses
of ExternalRenderer.
Parameters
----------
fig_dict: dict
Figure dictionary
renderers_string: str or None (default None)
Renderer string to process rather than the current default
renderer string
Returns
-------
None
"""
if renderers_string:
renderer_names = self._validate_coerce_renderers(renderers_string)
renderers_list = [self[name] for name in renderer_names]
# Activate these non-default renderers
for renderer in renderers_list:
if isinstance(renderer, ExternalRenderer):
renderer.activate()
else:
self._activate_pending_renderers(cls=ExternalRenderer)
renderers_list = self._default_renderers
for renderer in renderers_list:
if isinstance(renderer, ExternalRenderer):
renderer = copy(renderer)
for k, v in kwargs.items():
if hasattr(renderer, k):
setattr(renderer, k, v)
renderer.render(fig_dict)
# Make renderers a singleton object
# ---------------------------------
renderers = RenderersConfig()
del RenderersConfig
# Show
def show(fig, renderer=None, validate=True, **kwargs):
"""
Show a figure using either the default renderer(s) or the renderer(s)
specified by the renderer argument
Parameters
----------
fig: dict of Figure
The Figure object or figure dict to display
renderer: str or None (default None)
A string containing the names of one or more registered renderers
(separated by '+' characters) or None. If None, then the default
renderers specified in plotly.io.renderers.default are used.
validate: bool (default True)
True if the figure should be validated before being shown,
False otherwise.
Returns
-------
None
"""
fig_dict = validate_coerce_fig_to_dict(fig, validate)
# Mimetype renderers
bundle = renderers._build_mime_bundle(fig_dict, renderers_string=renderer, **kwargs)
if bundle:
if not ipython_display:
raise ValueError(
"Mime type rendering requires ipython but it is not installed"
)
if not nbformat or LooseVersion(nbformat.__version__) < LooseVersion("4.2.0"):
raise ValueError(
"Mime type rendering requires nbformat>=4.2.0 but it is not installed"
)
ipython_display.display(bundle, raw=True)
# external renderers
renderers._perform_external_rendering(fig_dict, renderers_string=renderer, **kwargs)
# Register renderers
# ------------------
# Plotly mime type
plotly_renderer = PlotlyRenderer()
renderers["plotly_mimetype"] = plotly_renderer
renderers["jupyterlab"] = plotly_renderer
renderers["nteract"] = plotly_renderer
renderers["vscode"] = plotly_renderer
# HTML-based
config = {}
renderers["notebook"] = NotebookRenderer(config=config)
renderers["notebook_connected"] = NotebookRenderer(config=config, connected=True)
renderers["kaggle"] = KaggleRenderer(config=config)
renderers["azure"] = AzureRenderer(config=config)
renderers["colab"] = ColabRenderer(config=config)
renderers["cocalc"] = CoCalcRenderer()
renderers["databricks"] = DatabricksRenderer()
# JSON
renderers["json"] = JsonRenderer()
# Static Image
img_kwargs = dict(height=450, width=700)
renderers["png"] = PngRenderer(**img_kwargs)
jpeg_renderer = JpegRenderer(**img_kwargs)
renderers["jpeg"] = jpeg_renderer
renderers["jpg"] = jpeg_renderer
renderers["svg"] = SvgRenderer(**img_kwargs)
renderers["pdf"] = PdfRenderer(**img_kwargs)
# External
renderers["browser"] = BrowserRenderer(config=config)
renderers["firefox"] = BrowserRenderer(config=config, using="firefox")
renderers["chrome"] = BrowserRenderer(config=config, using="chrome")
renderers["chromium"] = BrowserRenderer(config=config, using="chromium")
renderers["iframe"] = IFrameRenderer(config=config, include_plotlyjs=True)
renderers["iframe_connected"] = IFrameRenderer(config=config, include_plotlyjs="cdn")
renderers["sphinx_gallery"] = SphinxGalleryHtmlRenderer()
renderers["sphinx_gallery_png"] = SphinxGalleryOrcaRenderer()
# Set default renderer
# --------------------
# Version 4 renderer configuration
default_renderer = None
# Handle the PLOTLY_RENDERER environment variable
env_renderer = os.environ.get("PLOTLY_RENDERER", None)
if env_renderer:
try:
renderers._validate_coerce_renderers(env_renderer)
except ValueError:
raise ValueError(
"""
Invalid named renderer(s) specified in the 'PLOTLY_RENDERER'
environment variable: {env_renderer}""".format(
env_renderer=env_renderer
)
)
default_renderer = env_renderer
elif ipython and ipython.get_ipython():
# Try to detect environment so that we can enable a useful
# default renderer
if not default_renderer:
try:
import google.colab
default_renderer = "colab"
except ImportError:
pass
# Check if we're running in a Kaggle notebook
if not default_renderer and os.path.exists("/kaggle/input"):
default_renderer = "kaggle"
# Check if we're running in an Azure Notebook
if not default_renderer and "AZURE_NOTEBOOKS_HOST" in os.environ:
default_renderer = "azure"
# Check if we're running in VSCode
if not default_renderer and "VSCODE_PID" in os.environ:
default_renderer = "vscode"
# Check if we're running in nteract
if not default_renderer and "NTERACT_EXE" in os.environ:
default_renderer = "nteract"
# Check if we're running in CoCalc
if not default_renderer and "COCALC_PROJECT_ID" in os.environ:
default_renderer = "cocalc"
if not default_renderer and "DATABRICKS_RUNTIME_VERSION" in os.environ:
default_renderer = "databricks"
# Check if we're running in spyder and orca is installed
if not default_renderer and "SPYDER_ARGS" in os.environ:
try:
from plotly.io.orca import validate_executable
validate_executable()
default_renderer = "svg"
except ValueError:
# orca not found
pass
# Check if we're running in ipython terminal
if not default_renderer and (
ipython.get_ipython().__class__.__name__ == "TerminalInteractiveShell"
):
default_renderer = "browser"
# Fallback to renderer combination that will work automatically
# in the classic notebook (offline), jupyterlab, nteract, vscode, and
# nbconvert HTML export.
if not default_renderer:
default_renderer = "plotly_mimetype+notebook"
else:
# If ipython isn't available, try to display figures in the default
# browser
import webbrowser
try:
webbrowser.get()
default_renderer = "browser"
except webbrowser.Error:
# Default browser could not be loaded
pass
renderers.render_on_display = True
renderers.default = default_renderer
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_histogram2d.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_histogram2d.py
from plotly.graph_objs import Histogram2d
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/exceptions.py | from __future__ import absolute_import
from _plotly_utils.exceptions import *
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/config_init.py | <gh_stars>100-1000
"""
This module is imported from the pandas package __init__.py file
in order to ensure that the core.config options registered here will
be available as soon as the user loads the package. if register_option
is invoked inside specific modules, they will not be registered until that
module is imported, which may or may not be a problem.
If you need to make sure options are available even before a certain
module is imported, register them here rather than in the module.
"""
import warnings
import pandas._config.config as cf
from pandas._config.config import (
is_bool,
is_callable,
is_instance_factory,
is_int,
is_nonnegative_int,
is_one_of_factory,
is_text,
)
# compute
use_bottleneck_doc = """
: bool
Use the bottleneck library to accelerate if it is installed,
the default is True
Valid values: False,True
"""
def use_bottleneck_cb(key):
from pandas.core import nanops
nanops.set_use_bottleneck(cf.get_option(key))
use_numexpr_doc = """
: bool
Use the numexpr library to accelerate computation if it is installed,
the default is True
Valid values: False,True
"""
def use_numexpr_cb(key):
from pandas.core.computation import expressions
expressions.set_use_numexpr(cf.get_option(key))
use_numba_doc = """
: bool
Use the numba engine option for select operations if it is installed,
the default is False
Valid values: False,True
"""
def use_numba_cb(key):
from pandas.core.util import numba_
numba_.set_use_numba(cf.get_option(key))
with cf.config_prefix("compute"):
cf.register_option(
"use_bottleneck",
True,
use_bottleneck_doc,
validator=is_bool,
cb=use_bottleneck_cb,
)
cf.register_option(
"use_numexpr", True, use_numexpr_doc, validator=is_bool, cb=use_numexpr_cb
)
cf.register_option(
"use_numba", False, use_numba_doc, validator=is_bool, cb=use_numba_cb
)
#
# options from the "display" namespace
pc_precision_doc = """
: int
Floating point output precision (number of significant digits). This is
only a suggestion
"""
pc_colspace_doc = """
: int
Default space for DataFrame columns.
"""
pc_max_rows_doc = """
: int
If max_rows is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the height of the terminal and print a truncated object which fits
the screen height. The IPython notebook, IPython qtconsole, or
IDLE do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_min_rows_doc = """
: int
The numbers of rows to show in a truncated view (when `max_rows` is
exceeded). Ignored when `max_rows` is set to None or 0. When set to
None, follows the value of `max_rows`.
"""
pc_max_cols_doc = """
: int
If max_cols is exceeded, switch to truncate view. Depending on
`large_repr`, objects are either centrally truncated or printed as
a summary view. 'None' value means unlimited.
In case python/IPython is running in a terminal and `large_repr`
equals 'truncate' this can be set to 0 and pandas will auto-detect
the width of the terminal and print a truncated object which fits
the screen width. The IPython notebook, IPython qtconsole, or IDLE
do not run in a terminal and hence it is not possible to do
correct auto-detection.
"""
pc_max_categories_doc = """
: int
This sets the maximum number of categories pandas should output when
printing out a `Categorical` or a Series of dtype "category".
"""
pc_max_info_cols_doc = """
: int
max_info_columns is used in DataFrame.info method to decide if
per column information will be printed.
"""
pc_nb_repr_h_doc = """
: boolean
When True, IPython notebook will use html representation for
pandas objects (if it is available).
"""
pc_pprint_nest_depth = """
: int
Controls the number of nested levels to process when pretty-printing
"""
pc_multi_sparse_doc = """
: boolean
"sparsify" MultiIndex display (don't display repeated
elements in outer levels within groups)
"""
float_format_doc = """
: callable
The callable should accept a floating point number and return
a string with the desired format of the number. This is used
in some places like SeriesFormatter.
See formats.format.EngFormatter for an example.
"""
max_colwidth_doc = """
: int or None
The maximum width in characters of a column in the repr of
a pandas data structure. When the column overflows, a "..."
placeholder is embedded in the output. A 'None' value means unlimited.
"""
colheader_justify_doc = """
: 'left'/'right'
Controls the justification of column headers. used by DataFrameFormatter.
"""
pc_expand_repr_doc = """
: boolean
Whether to print out the full DataFrame repr for wide DataFrames across
multiple lines, `max_columns` is still respected, but the output will
wrap-around across multiple "pages" if its width exceeds `display.width`.
"""
pc_show_dimensions_doc = """
: boolean or 'truncate'
Whether to print out dimensions at the end of DataFrame repr.
If 'truncate' is specified, only print out the dimensions if the
frame is truncated (e.g. not display all rows and/or columns)
"""
pc_east_asian_width_doc = """
: boolean
Whether to use the Unicode East Asian Width to calculate the display text
width.
Enabling this may affect to the performance (default: False)
"""
pc_ambiguous_as_wide_doc = """
: boolean
Whether to handle Unicode characters belong to Ambiguous as Wide (width=2)
(default: False)
"""
pc_latex_repr_doc = """
: boolean
Whether to produce a latex DataFrame representation for jupyter
environments that support it.
(default: False)
"""
pc_table_schema_doc = """
: boolean
Whether to publish a Table Schema representation for frontends
that support it.
(default: False)
"""
pc_html_border_doc = """
: int
A ``border=value`` attribute is inserted in the ``<table>`` tag
for the DataFrame HTML repr.
"""
pc_html_use_mathjax_doc = """\
: boolean
When True, Jupyter notebook will process table contents using MathJax,
rendering mathematical expressions enclosed by the dollar symbol.
(default: True)
"""
pc_width_doc = """
: int
Width of the display in characters. In case python/IPython is running in
a terminal this can be set to None and pandas will correctly auto-detect
the width.
Note that the IPython notebook, IPython qtconsole, or IDLE do not run in a
terminal and hence it is not possible to correctly detect the width.
"""
pc_chop_threshold_doc = """
: float or None
if set to a float value, all float values smaller then the given threshold
will be displayed as exactly 0 by repr and friends.
"""
pc_max_seq_items = """
: int or None
when pretty-printing a long sequence, no more then `max_seq_items`
will be printed. If items are omitted, they will be denoted by the
addition of "..." to the resulting string.
If set to None, the number of items to be printed is unlimited.
"""
pc_max_info_rows_doc = """
: int or None
df.info() will usually show null-counts for each column.
For large frames this can be quite slow. max_info_rows and max_info_cols
limit this null check only to frames with smaller dimensions than
specified.
"""
pc_large_repr_doc = """
: 'truncate'/'info'
For DataFrames exceeding max_rows/max_cols, the repr (and HTML repr) can
show a truncated table (the default from 0.13), or switch to the view from
df.info() (the behaviour in earlier versions of pandas).
"""
pc_memory_usage_doc = """
: bool, string or None
This specifies if the memory usage of a DataFrame should be displayed when
df.info() is called. Valid values True,False,'deep'
"""
pc_latex_escape = """
: bool
This specifies if the to_latex method of a Dataframe uses escapes special
characters.
Valid values: False,True
"""
pc_latex_longtable = """
:bool
This specifies if the to_latex method of a Dataframe uses the longtable
format.
Valid values: False,True
"""
pc_latex_multicolumn = """
: bool
This specifies if the to_latex method of a Dataframe uses multicolumns
to pretty-print MultiIndex columns.
Valid values: False,True
"""
pc_latex_multicolumn_format = """
: string
This specifies the format for multicolumn headers.
Can be surrounded with '|'.
Valid values: 'l', 'c', 'r', 'p{<width>}'
"""
pc_latex_multirow = """
: bool
This specifies if the to_latex method of a Dataframe uses multirows
to pretty-print MultiIndex rows.
Valid values: False,True
"""
def table_schema_cb(key):
from pandas.io.formats.printing import _enable_data_resource_formatter
_enable_data_resource_formatter(cf.get_option(key))
def is_terminal() -> bool:
"""
Detect if Python is running in a terminal.
Returns True if Python is running in a terminal or False if not.
"""
try:
# error: Name 'get_ipython' is not defined
ip = get_ipython() # type: ignore
except NameError: # assume standard Python interpreter in a terminal
return True
else:
if hasattr(ip, "kernel"): # IPython as a Jupyter kernel
return False
else: # IPython in a terminal
return True
with cf.config_prefix("display"):
cf.register_option("precision", 6, pc_precision_doc, validator=is_nonnegative_int)
cf.register_option(
"float_format",
None,
float_format_doc,
validator=is_one_of_factory([None, is_callable]),
)
cf.register_option("column_space", 12, validator=is_int)
cf.register_option(
"max_info_rows",
1690785,
pc_max_info_rows_doc,
validator=is_instance_factory((int, type(None))),
)
cf.register_option("max_rows", 60, pc_max_rows_doc, validator=is_nonnegative_int)
cf.register_option(
"min_rows",
10,
pc_min_rows_doc,
validator=is_instance_factory([type(None), int]),
)
cf.register_option("max_categories", 8, pc_max_categories_doc, validator=is_int)
def _deprecate_negative_int_max_colwidth(key):
value = cf.get_option(key)
if value is not None and value < 0:
warnings.warn(
"Passing a negative integer is deprecated in version 1.0 and "
"will not be supported in future version. Instead, use None "
"to not limit the column width.",
FutureWarning,
stacklevel=4,
)
cf.register_option(
# TODO(2.0): change `validator=is_nonnegative_int` see GH#31569
"max_colwidth",
50,
max_colwidth_doc,
validator=is_instance_factory([type(None), int]),
cb=_deprecate_negative_int_max_colwidth,
)
if is_terminal():
max_cols = 0 # automatically determine optimal number of columns
else:
max_cols = 20 # cannot determine optimal number of columns
cf.register_option(
"max_columns", max_cols, pc_max_cols_doc, validator=is_nonnegative_int
)
cf.register_option(
"large_repr",
"truncate",
pc_large_repr_doc,
validator=is_one_of_factory(["truncate", "info"]),
)
cf.register_option("max_info_columns", 100, pc_max_info_cols_doc, validator=is_int)
cf.register_option(
"colheader_justify", "right", colheader_justify_doc, validator=is_text
)
cf.register_option("notebook_repr_html", True, pc_nb_repr_h_doc, validator=is_bool)
cf.register_option("pprint_nest_depth", 3, pc_pprint_nest_depth, validator=is_int)
cf.register_option("multi_sparse", True, pc_multi_sparse_doc, validator=is_bool)
cf.register_option("expand_frame_repr", True, pc_expand_repr_doc)
cf.register_option(
"show_dimensions",
"truncate",
pc_show_dimensions_doc,
validator=is_one_of_factory([True, False, "truncate"]),
)
cf.register_option("chop_threshold", None, pc_chop_threshold_doc)
cf.register_option("max_seq_items", 100, pc_max_seq_items)
cf.register_option(
"width", 80, pc_width_doc, validator=is_instance_factory([type(None), int])
)
cf.register_option(
"memory_usage",
True,
pc_memory_usage_doc,
validator=is_one_of_factory([None, True, False, "deep"]),
)
cf.register_option(
"unicode.east_asian_width", False, pc_east_asian_width_doc, validator=is_bool
)
cf.register_option(
"unicode.ambiguous_as_wide", False, pc_east_asian_width_doc, validator=is_bool
)
cf.register_option("latex.repr", False, pc_latex_repr_doc, validator=is_bool)
cf.register_option("latex.escape", True, pc_latex_escape, validator=is_bool)
cf.register_option("latex.longtable", False, pc_latex_longtable, validator=is_bool)
cf.register_option(
"latex.multicolumn", True, pc_latex_multicolumn, validator=is_bool
)
cf.register_option(
"latex.multicolumn_format", "l", pc_latex_multicolumn, validator=is_text
)
cf.register_option("latex.multirow", False, pc_latex_multirow, validator=is_bool)
cf.register_option(
"html.table_schema",
False,
pc_table_schema_doc,
validator=is_bool,
cb=table_schema_cb,
)
cf.register_option("html.border", 1, pc_html_border_doc, validator=is_int)
cf.register_option(
"html.use_mathjax", True, pc_html_use_mathjax_doc, validator=is_bool
)
tc_sim_interactive_doc = """
: boolean
Whether to simulate interactive mode for purposes of testing
"""
with cf.config_prefix("mode"):
cf.register_option("sim_interactive", False, tc_sim_interactive_doc)
use_inf_as_null_doc = """
: boolean
use_inf_as_null had been deprecated and will be removed in a future
version. Use `use_inf_as_na` instead.
"""
use_inf_as_na_doc = """
: boolean
True means treat None, NaN, INF, -INF as NA (old way),
False means None and NaN are null, but INF, -INF are not NA
(new way).
"""
# We don't want to start importing everything at the global context level
# or we'll hit circular deps.
def use_inf_as_na_cb(key):
from pandas.core.dtypes.missing import _use_inf_as_na
_use_inf_as_na(key)
with cf.config_prefix("mode"):
cf.register_option("use_inf_as_na", False, use_inf_as_na_doc, cb=use_inf_as_na_cb)
cf.register_option(
"use_inf_as_null", False, use_inf_as_null_doc, cb=use_inf_as_na_cb
)
cf.deprecate_option(
"mode.use_inf_as_null", msg=use_inf_as_null_doc, rkey="mode.use_inf_as_na"
)
# user warnings
chained_assignment = """
: string
Raise an exception, warn, or no action if trying to use chained assignment,
The default is warn
"""
with cf.config_prefix("mode"):
cf.register_option(
"chained_assignment",
"warn",
chained_assignment,
validator=is_one_of_factory([None, "warn", "raise"]),
)
# Set up the io.excel specific reader configuration.
reader_engine_doc = """
: string
The default Excel reader engine for '{ext}' files. Available options:
auto, {others}.
"""
_xls_options = ["xlrd"]
_xlsm_options = ["xlrd", "openpyxl"]
_xlsx_options = ["xlrd", "openpyxl"]
_ods_options = ["odf"]
_xlsb_options = ["pyxlsb"]
with cf.config_prefix("io.excel.xls"):
cf.register_option(
"reader",
"auto",
reader_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
validator=str,
)
with cf.config_prefix("io.excel.xlsm"):
cf.register_option(
"reader",
"auto",
reader_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
validator=str,
)
with cf.config_prefix("io.excel.xlsx"):
cf.register_option(
"reader",
"auto",
reader_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
validator=str,
)
with cf.config_prefix("io.excel.ods"):
cf.register_option(
"reader",
"auto",
reader_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
validator=str,
)
with cf.config_prefix("io.excel.xlsb"):
cf.register_option(
"reader",
"auto",
reader_engine_doc.format(ext="xlsb", others=", ".join(_xlsb_options)),
validator=str,
)
# Set up the io.excel specific writer configuration.
writer_engine_doc = """
: string
The default Excel writer engine for '{ext}' files. Available options:
auto, {others}.
"""
_xls_options = ["xlwt"]
_xlsm_options = ["openpyxl"]
_xlsx_options = ["openpyxl", "xlsxwriter"]
_ods_options = ["odf"]
with cf.config_prefix("io.excel.xls"):
cf.register_option(
"writer",
"auto",
writer_engine_doc.format(ext="xls", others=", ".join(_xls_options)),
validator=str,
)
with cf.config_prefix("io.excel.xlsm"):
cf.register_option(
"writer",
"auto",
writer_engine_doc.format(ext="xlsm", others=", ".join(_xlsm_options)),
validator=str,
)
with cf.config_prefix("io.excel.xlsx"):
cf.register_option(
"writer",
"auto",
writer_engine_doc.format(ext="xlsx", others=", ".join(_xlsx_options)),
validator=str,
)
with cf.config_prefix("io.excel.ods"):
cf.register_option(
"writer",
"auto",
writer_engine_doc.format(ext="ods", others=", ".join(_ods_options)),
validator=str,
)
# Set up the io.parquet specific configuration.
parquet_engine_doc = """
: string
The default parquet reader/writer engine. Available options:
'auto', 'pyarrow', 'fastparquet', the default is 'auto'
"""
with cf.config_prefix("io.parquet"):
cf.register_option(
"engine",
"auto",
parquet_engine_doc,
validator=is_one_of_factory(["auto", "pyarrow", "fastparquet"]),
)
# --------
# Plotting
# ---------
plotting_backend_doc = """
: str
The plotting backend to use. The default value is "matplotlib", the
backend provided with pandas. Other backends can be specified by
providing the name of the module that implements the backend.
"""
def register_plotting_backend_cb(key):
if key == "matplotlib":
# We defer matplotlib validation, since it's the default
return
from pandas.plotting._core import _get_plot_backend
_get_plot_backend(key)
with cf.config_prefix("plotting"):
cf.register_option(
"backend",
defval="matplotlib",
doc=plotting_backend_doc,
validator=register_plotting_backend_cb,
)
register_converter_doc = """
: bool or 'auto'.
Whether to register converters with matplotlib's units registry for
dates, times, datetimes, and Periods. Toggling to False will remove
the converters, restoring any converters that pandas overwrote.
"""
def register_converter_cb(key):
from pandas.plotting import register_matplotlib_converters
from pandas.plotting import deregister_matplotlib_converters
if cf.get_option(key):
register_matplotlib_converters()
else:
deregister_matplotlib_converters()
with cf.config_prefix("plotting.matplotlib"):
cf.register_option(
"register_converters",
"auto",
register_converter_doc,
validator=is_one_of_factory(["auto", True, False]),
cb=register_converter_cb,
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_mesh3d.py | <gh_stars>1000+
from plotly.graph_objs import Mesh3d
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_contour.py | <gh_stars>1000+
from plotly.graph_objs import Contour
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test_scalarinherit.py | # -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_
class A(object):
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class TestInherit(object):
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
y = C(2.0)
assert_(str(y) == '2.0')
z = D(3.0)
assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
assert_(str(x) == '1.0')
y = C0(2.0)
assert_(str(y) == '2.0')
class TestCharacter(object):
def test_char_radd(self):
# GH issue 9620, reached gentype_add and raise TypeError
np_s = np.string_('abc')
np_u = np.unicode_('abc')
s = b'def'
u = u'def'
assert_(np_s.__radd__(np_s) is NotImplemented)
assert_(np_s.__radd__(np_u) is NotImplemented)
assert_(np_s.__radd__(s) is NotImplemented)
assert_(np_s.__radd__(u) is NotImplemented)
assert_(np_u.__radd__(np_s) is NotImplemented)
assert_(np_u.__radd__(np_u) is NotImplemented)
assert_(np_u.__radd__(s) is NotImplemented)
assert_(np_u.__radd__(u) is NotImplemented)
assert_(s + np_s == b'defabc')
assert_(u + np_u == u'defabc')
class Mystr(str, np.generic):
# would segfault
pass
ret = s + Mystr('abc')
assert_(type(ret) is type(s))
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
res_s = b'abc' * 5
res_u = u'abc' * 5
assert_(np_s * 5 == res_s)
assert_(np_u * 5 == res_u)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/_plotly_utils/colors/plotlyjs.py | <gh_stars>10-100
# Copied from
# https://github.com/plotly/plotly.js/blob/master/src/components/colorscale/scales.js
_plotlyjs_scales = {
"Greys": [[0, "rgb(0,0,0)"], [1, "rgb(255,255,255)"]],
"YlGnBu": [
[0, "rgb(8,29,88)"],
[0.125, "rgb(37,52,148)"],
[0.25, "rgb(34,94,168)"],
[0.375, "rgb(29,145,192)"],
[0.5, "rgb(65,182,196)"],
[0.625, "rgb(127,205,187)"],
[0.75, "rgb(199,233,180)"],
[0.875, "rgb(237,248,217)"],
[1, "rgb(255,255,217)"],
],
"Greens": [
[0, "rgb(0,68,27)"],
[0.125, "rgb(0,109,44)"],
[0.25, "rgb(35,139,69)"],
[0.375, "rgb(65,171,93)"],
[0.5, "rgb(116,196,118)"],
[0.625, "rgb(161,217,155)"],
[0.75, "rgb(199,233,192)"],
[0.875, "rgb(229,245,224)"],
[1, "rgb(247,252,245)"],
],
"YlOrRd": [
[0, "rgb(128,0,38)"],
[0.125, "rgb(189,0,38)"],
[0.25, "rgb(227,26,28)"],
[0.375, "rgb(252,78,42)"],
[0.5, "rgb(253,141,60)"],
[0.625, "rgb(254,178,76)"],
[0.75, "rgb(254,217,118)"],
[0.875, "rgb(255,237,160)"],
[1, "rgb(255,255,204)"],
],
"Bluered": [[0, "rgb(0,0,255)"], [1, "rgb(255,0,0)"]],
# modified RdBu based on
# http:#www.kennethmoreland.com/color-maps/
"RdBu": [
[0, "rgb(5,10,172)"],
[0.35, "rgb(106,137,247)"],
[0.5, "rgb(190,190,190)"],
[0.6, "rgb(220,170,132)"],
[0.7, "rgb(230,145,90)"],
[1, "rgb(178,10,28)"],
],
# Scale for non-negative numeric values
"Reds": [
[0, "rgb(220,220,220)"],
[0.2, "rgb(245,195,157)"],
[0.4, "rgb(245,160,105)"],
[1, "rgb(178,10,28)"],
],
# Scale for non-positive numeric values
"Blues": [
[0, "rgb(5,10,172)"],
[0.35, "rgb(40,60,190)"],
[0.5, "rgb(70,100,245)"],
[0.6, "rgb(90,120,245)"],
[0.7, "rgb(106,137,247)"],
[1, "rgb(220,220,220)"],
],
"Picnic": [
[0, "rgb(0,0,255)"],
[0.1, "rgb(51,153,255)"],
[0.2, "rgb(102,204,255)"],
[0.3, "rgb(153,204,255)"],
[0.4, "rgb(204,204,255)"],
[0.5, "rgb(255,255,255)"],
[0.6, "rgb(255,204,255)"],
[0.7, "rgb(255,153,255)"],
[0.8, "rgb(255,102,204)"],
[0.9, "rgb(255,102,102)"],
[1, "rgb(255,0,0)"],
],
"Rainbow": [
[0, "rgb(150,0,90)"],
[0.125, "rgb(0,0,200)"],
[0.25, "rgb(0,25,255)"],
[0.375, "rgb(0,152,255)"],
[0.5, "rgb(44,255,150)"],
[0.625, "rgb(151,255,0)"],
[0.75, "rgb(255,234,0)"],
[0.875, "rgb(255,111,0)"],
[1, "rgb(255,0,0)"],
],
"Portland": [
[0, "rgb(12,51,131)"],
[0.25, "rgb(10,136,186)"],
[0.5, "rgb(242,211,56)"],
[0.75, "rgb(242,143,56)"],
[1, "rgb(217,30,30)"],
],
"Jet": [
[0, "rgb(0,0,131)"],
[0.125, "rgb(0,60,170)"],
[0.375, "rgb(5,255,255)"],
[0.625, "rgb(255,255,0)"],
[0.875, "rgb(250,0,0)"],
[1, "rgb(128,0,0)"],
],
"Hot": [
[0, "rgb(0,0,0)"],
[0.3, "rgb(230,0,0)"],
[0.6, "rgb(255,210,0)"],
[1, "rgb(255,255,255)"],
],
"Blackbody": [
[0, "rgb(0,0,0)"],
[0.2, "rgb(230,0,0)"],
[0.4, "rgb(230,210,0)"],
[0.7, "rgb(255,255,255)"],
[1, "rgb(160,200,255)"],
],
"Earth": [
[0, "rgb(0,0,130)"],
[0.1, "rgb(0,180,180)"],
[0.2, "rgb(40,210,40)"],
[0.4, "rgb(230,230,50)"],
[0.6, "rgb(120,70,20)"],
[1, "rgb(255,255,255)"],
],
"Electric": [
[0, "rgb(0,0,0)"],
[0.15, "rgb(30,0,100)"],
[0.4, "rgb(120,0,100)"],
[0.6, "rgb(160,90,0)"],
[0.8, "rgb(230,200,0)"],
[1, "rgb(255,250,220)"],
],
"Viridis": [
[0, "#440154"],
[0.06274509803921569, "#48186a"],
[0.12549019607843137, "#472d7b"],
[0.18823529411764706, "#424086"],
[0.25098039215686274, "#3b528b"],
[0.3137254901960784, "#33638d"],
[0.3764705882352941, "#2c728e"],
[0.4392156862745098, "#26828e"],
[0.5019607843137255, "#21918c"],
[0.5647058823529412, "#1fa088"],
[0.6274509803921569, "#28ae80"],
[0.6901960784313725, "#3fbc73"],
[0.7529411764705882, "#5ec962"],
[0.8156862745098039, "#84d44b"],
[0.8784313725490196, "#addc30"],
[0.9411764705882353, "#d8e219"],
[1, "#fde725"],
],
"Cividis": [
[0.000000, "rgb(0,32,76)"],
[0.058824, "rgb(0,42,102)"],
[0.117647, "rgb(0,52,110)"],
[0.176471, "rgb(39,63,108)"],
[0.235294, "rgb(60,74,107)"],
[0.294118, "rgb(76,85,107)"],
[0.352941, "rgb(91,95,109)"],
[0.411765, "rgb(104,106,112)"],
[0.470588, "rgb(117,117,117)"],
[0.529412, "rgb(131,129,120)"],
[0.588235, "rgb(146,140,120)"],
[0.647059, "rgb(161,152,118)"],
[0.705882, "rgb(176,165,114)"],
[0.764706, "rgb(192,177,109)"],
[0.823529, "rgb(209,191,102)"],
[0.882353, "rgb(225,204,92)"],
[0.941176, "rgb(243,219,79)"],
[1.000000, "rgb(255,233,69)"],
],
}
# Create variable named after each scale that contains the sequence of colors only
for scale_name, scale_pairs in _plotlyjs_scales.items():
scale_sequence = [c[1] for c in scale_pairs]
exec(
"{scale_name} = {scale_sequence}".format(
scale_name=scale_name, scale_sequence=scale_sequence
)
)
# Prefix variable names with _ so that they will not be added to the swatches
_contents = dict(globals())
for _k, _cols in _contents.items():
if _k.startswith("_") or _k == "swatches" or _k.endswith("_r"):
continue
globals()[_k + "_r"] = _cols[::-1]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_treemapcolorway.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/_treemapcolorway.py
import _plotly_utils.basevalidators
class TreemapcolorwayValidator(_plotly_utils.basevalidators.ColorlistValidator):
def __init__(self, plotly_name="treemapcolorway", parent_name="layout", **kwargs):
super(TreemapcolorwayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexes/datetimes/test_datetimelike.py | """ generic tests from the Datetimelike class """
import pytest
from pandas import DatetimeIndex, date_range
import pandas._testing as tm
from ..datetimelike import DatetimeLike
class TestDatetimeIndex(DatetimeLike):
_holder = DatetimeIndex
@pytest.fixture(
params=[tm.makeDateIndex(10), date_range("20130110", periods=10, freq="-1D")],
ids=["index_inc", "index_dec"],
)
def index(self, request):
return request.param
def create_index(self) -> DatetimeIndex:
return date_range("20130101", periods=5)
def test_shift(self):
pass # handled in test_ops
def test_pickle_compat_construction(self):
pass
def test_intersection(self):
pass # handled in test_setops
def test_union(self):
pass # handled in test_setops
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_surface.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_surface.py
import _plotly_utils.basevalidators
class SurfaceValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="surface", parent_name="layout.template.data", **kwargs
):
super(SurfaceValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Surface"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/arrays/_arrow_utils.py | from distutils.version import LooseVersion
import json
import numpy as np
import pyarrow
from pandas.core.arrays.interval import _VALID_CLOSED
_pyarrow_version_ge_015 = LooseVersion(pyarrow.__version__) >= LooseVersion("0.15")
def pyarrow_array_to_numpy_and_mask(arr, dtype):
"""
Convert a primitive pyarrow.Array to a numpy array and boolean mask based
on the buffers of the Array.
Parameters
----------
arr : pyarrow.Array
dtype : numpy.dtype
Returns
-------
(data, mask)
Tuple of two numpy arrays with the raw data (with specified dtype) and
a boolean mask (validity mask, so False means missing)
"""
buflist = arr.buffers()
data = np.frombuffer(buflist[1], dtype=dtype)[arr.offset : arr.offset + len(arr)]
bitmask = buflist[0]
if bitmask is not None:
mask = pyarrow.BooleanArray.from_buffers(
pyarrow.bool_(), len(arr), [None, bitmask]
)
mask = np.asarray(mask)
else:
mask = np.ones(len(arr), dtype=bool)
return data, mask
if _pyarrow_version_ge_015:
# the pyarrow extension types are only available for pyarrow 0.15+
class ArrowPeriodType(pyarrow.ExtensionType):
def __init__(self, freq):
# attributes need to be set first before calling
# super init (as that calls serialize)
self._freq = freq
pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period")
@property
def freq(self):
return self._freq
def __arrow_ext_serialize__(self):
metadata = {"freq": self.freq}
return json.dumps(metadata).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
metadata = json.loads(serialized.decode())
return ArrowPeriodType(metadata["freq"])
def __eq__(self, other):
if isinstance(other, pyarrow.BaseExtensionType):
return type(self) == type(other) and self.freq == other.freq
else:
return NotImplemented
def __hash__(self):
return hash((str(self), self.freq))
def to_pandas_dtype(self):
import pandas as pd
return pd.PeriodDtype(freq=self.freq)
# register the type with a dummy instance
_period_type = ArrowPeriodType("D")
pyarrow.register_extension_type(_period_type)
class ArrowIntervalType(pyarrow.ExtensionType):
def __init__(self, subtype, closed):
# attributes need to be set first before calling
# super init (as that calls serialize)
assert closed in _VALID_CLOSED
self._closed = closed
if not isinstance(subtype, pyarrow.DataType):
subtype = pyarrow.type_for_alias(str(subtype))
self._subtype = subtype
storage_type = pyarrow.struct([("left", subtype), ("right", subtype)])
pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval")
@property
def subtype(self):
return self._subtype
@property
def closed(self):
return self._closed
def __arrow_ext_serialize__(self):
metadata = {"subtype": str(self.subtype), "closed": self.closed}
return json.dumps(metadata).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
metadata = json.loads(serialized.decode())
subtype = pyarrow.type_for_alias(metadata["subtype"])
closed = metadata["closed"]
return ArrowIntervalType(subtype, closed)
def __eq__(self, other):
if isinstance(other, pyarrow.BaseExtensionType):
return (
type(self) == type(other)
and self.subtype == other.subtype
and self.closed == other.closed
)
else:
return NotImplemented
def __hash__(self):
return hash((str(self), str(self.subtype), self.closed))
def to_pandas_dtype(self):
import pandas as pd
return pd.IntervalDtype(self.subtype.to_pandas_dtype())
# register the type with a dummy instance
_interval_type = ArrowIntervalType(pyarrow.int64(), "left")
pyarrow.register_extension_type(_interval_type)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/express/_special_inputs.py | <filename>env/lib/python3.8/site-packages/plotly/express/_special_inputs.py
class IdentityMap(object):
"""
`dict`-like object which acts as if the value for any key is the key itself. Objects
of this class can be passed in to arguments like `color_discrete_map` to
use the provided data values as colors, rather than mapping them to colors cycled
from `color_discrete_sequence`. This works for any `_map` argument to Plotly Express
functions, such as `line_dash_map` and `symbol_map`.
"""
def __getitem__(self, key):
return key
def __contains__(self, key):
return True
def copy(self):
return self
class Constant(object):
"""
Objects of this class can be passed to Plotly Express functions that expect column
identifiers or list-like objects to indicate that this attribute should take on a
constant value. An optional label can be provided.
"""
def __init__(self, value, label=None):
self.value = value
self.label = label
class Range(object):
"""
Objects of this class can be passed to Plotly Express functions that expect column
identifiers or list-like objects to indicate that this attribute should be mapped
onto integers starting at 0. An optional label can be provided.
"""
def __init__(self, label=None):
self.label = label
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/waitress/trigger.py | ##############################################################################
#
# Copyright (c) 2001-2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import os
import socket
import errno
import threading
from . import wasyncore
# Wake up a call to select() running in the main thread.
#
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
#
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
#
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
class _triggerbase(object):
"""OS-independent base class for OS-dependent trigger class."""
kind = None # subclass must set to "pipe" or "loopback"; used by repr
def __init__(self):
self._closed = False
# `lock` protects the `thunks` list from being traversed and
# appended to simultaneously.
self.lock = threading.Lock()
# List of no-argument callbacks to invoke when the trigger is
# pulled. These run in the thread running the wasyncore mainloop,
# regardless of which thread pulls the trigger.
self.thunks = []
def readable(self):
return True
def writable(self):
return False
def handle_connect(self):
pass
def handle_close(self):
self.close()
# Override the wasyncore close() method, because it doesn't know about
# (so can't close) all the gimmicks we have open. Subclass must
# supply a _close() method to do platform-specific closing work. _close()
# will be called iff we're not already closed.
def close(self):
if not self._closed:
self._closed = True
self.del_channel()
self._close() # subclass does OS-specific stuff
def pull_trigger(self, thunk=None):
if thunk:
with self.lock:
self.thunks.append(thunk)
self._physical_pull()
def handle_read(self):
try:
self.recv(8192)
except (OSError, socket.error):
return
with self.lock:
for thunk in self.thunks:
try:
thunk()
except:
nil, t, v, tbinfo = wasyncore.compact_traceback()
self.log_info(
"exception in trigger thunk: (%s:%s %s)" % (t, v, tbinfo)
)
self.thunks = []
if os.name == "posix":
class trigger(_triggerbase, wasyncore.file_dispatcher):
kind = "pipe"
def __init__(self, map):
_triggerbase.__init__(self)
r, self.trigger = self._fds = os.pipe()
wasyncore.file_dispatcher.__init__(self, r, map=map)
def _close(self):
for fd in self._fds:
os.close(fd)
self._fds = []
wasyncore.file_dispatcher.close(self)
def _physical_pull(self):
os.write(self.trigger, b"x")
else: # pragma: no cover
# Windows version; uses just sockets, because a pipe isn't select'able
# on Windows.
class trigger(_triggerbase, wasyncore.dispatcher):
kind = "loopback"
def __init__(self, map):
_triggerbase.__init__(self)
# Get a pair of connected sockets. The trigger is the 'w'
# end of the pair, which is connected to 'r'. 'r' is put
# in the wasyncore socket map. "pulling the trigger" then
# means writing something on w, which will wake up r.
w = socket.socket()
# Disable buffering -- pulling the trigger sends 1 byte,
# and we want that sent immediately, to wake up wasyncore's
# select() ASAP.
w.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
count = 0
while True:
count += 1
# Bind to a local port; for efficiency, let the OS pick
# a free port for us.
# Unfortunately, stress tests showed that we may not
# be able to connect to that port ("Address already in
# use") despite that the OS picked it. This appears
# to be a race bug in the Windows socket implementation.
# So we loop until a connect() succeeds (almost always
# on the first try). See the long thread at
# http://mail.zope.org/pipermail/zope/2005-July/160433.html
# for hideous details.
a = socket.socket()
a.bind(("127.0.0.1", 0))
connect_address = a.getsockname() # assigned (host, port) pair
a.listen(1)
try:
w.connect(connect_address)
break # success
except socket.error as detail:
if detail[0] != errno.WSAEADDRINUSE:
# "Address already in use" is the only error
# I've seen on two WinXP Pro SP2 boxes, under
# Pythons 2.3.5 and 2.4.1.
raise
# (10048, 'Address already in use')
# assert count <= 2 # never triggered in Tim's tests
if count >= 10: # I've never seen it go above 2
a.close()
w.close()
raise RuntimeError("Cannot bind trigger!")
# Close `a` and try again. Note: I originally put a short
# sleep() here, but it didn't appear to help or hurt.
a.close()
r, addr = a.accept() # r becomes wasyncore's (self.)socket
a.close()
self.trigger = w
wasyncore.dispatcher.__init__(self, r, map=map)
def _close(self):
# self.socket is r, and self.trigger is w, from __init__
self.socket.close()
self.trigger.close()
def _physical_pull(self):
self.trigger.send(b"x")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/area/marker/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._symbolsrc import SymbolsrcValidator
from ._symbol import SymbolValidator
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._opacitysrc import OpacitysrcValidator
from ._opacity import OpacityValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._symbolsrc.SymbolsrcValidator",
"._symbol.SymbolValidator",
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._opacitysrc.OpacitysrcValidator",
"._opacity.OpacityValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexing/multiindex/conftest.py | <filename>env/lib/python3.8/site-packages/pandas/tests/indexing/multiindex/conftest.py<gh_stars>1000+
import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
@pytest.fixture
def multiindex_dataframe_random_data():
"""DataFrame with 2 level MultiIndex with random data"""
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
return DataFrame(
np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp")
)
@pytest.fixture
def multiindex_year_month_day_dataframe_random_data():
"""DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels], inplace=True)
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/treemap/_tiling.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class TilingValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="tiling", parent_name="treemap", **kwargs):
super(TilingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tiling"),
data_docs=kwargs.pop(
"data_docs",
"""
flip
Determines if the positions obtained from
solver are flipped on each axis.
packing
Determines d3 treemap solver. For more info
please refer to
https://github.com/d3/d3-hierarchy#treemap-
tiling
pad
Sets the inner padding (in px).
squarifyratio
When using "squarify" `packing` algorithm,
according to https://github.com/d3/d3-hierarchy
/blob/master/README.md#squarify_ratio this
option specifies the desired aspect ratio of
the generated rectangles. The ratio must be
specified as a number greater than or equal to
one. Note that the orientation of the generated
rectangles (tall or wide) is not implied by the
ratio; for example, a ratio of two will attempt
to produce a mixture of rectangles whose
width:height ratio is either 2:1 or 1:2. When
using "squarify", unlike d3 which uses the
Golden Ratio i.e. 1.618034, Plotly applies 1 to
increase squares in treemap layouts.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/resample/test_timedelta.py | <gh_stars>1000+
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
from pandas.core.indexes.timedeltas import timedelta_range
def test_asfreq_bug():
df = DataFrame(data=[1, 3], index=[timedelta(), timedelta(minutes=3)])
result = df.resample("1T").asfreq()
expected = DataFrame(
data=[1, np.nan, np.nan, 3],
index=timedelta_range("0 day", periods=4, freq="1T"),
)
tm.assert_frame_equal(result, expected)
def test_resample_with_nat():
# GH 13223
index = pd.to_timedelta(["0s", pd.NaT, "2s"])
result = DataFrame({"value": [2, 3, 5]}, index).resample("1s").mean()
expected = DataFrame(
{"value": [2.5, np.nan, 5.0]},
index=timedelta_range("0 day", periods=3, freq="1S"),
)
tm.assert_frame_equal(result, expected)
def test_resample_as_freq_with_subperiod():
# GH 13022
index = timedelta_range("00:00:00", "00:10:00", freq="5T")
df = DataFrame(data={"value": [1, 5, 10]}, index=index)
result = df.resample("2T").asfreq()
expected_data = {"value": [1, np.nan, np.nan, np.nan, np.nan, 10]}
expected = DataFrame(
data=expected_data, index=timedelta_range("00:00:00", "00:10:00", freq="2T")
)
tm.assert_frame_equal(result, expected)
def test_resample_with_timedeltas():
expected = DataFrame({"A": np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range("0 days", freq="30T", periods=50)
df = DataFrame(
{"A": np.arange(1480)}, index=pd.to_timedelta(np.arange(1480), unit="T")
)
result = df.resample("30T").sum()
tm.assert_frame_equal(result, expected)
s = df["A"]
result = s.resample("30T").sum()
tm.assert_series_equal(result, expected["A"])
def test_resample_single_period_timedelta():
s = Series(list(range(5)), index=pd.timedelta_range("1 day", freq="s", periods=5))
result = s.resample("2s").sum()
expected = Series(
[1, 5, 4], index=pd.timedelta_range("1 day", freq="2s", periods=3)
)
tm.assert_series_equal(result, expected)
def test_resample_timedelta_idempotency():
# GH 12072
index = pd.timedelta_range("0", periods=9, freq="10L")
series = Series(range(9), index=index)
result = series.resample("10L").mean()
expected = series
tm.assert_series_equal(result, expected)
def test_resample_base_with_timedeltaindex():
# GH 10530
rng = timedelta_range(start="0s", periods=25, freq="s")
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample("2s", base=5).mean()
without_base = ts.resample("2s").mean()
exp_without_base = timedelta_range(start="0s", end="25s", freq="2s")
exp_with_base = timedelta_range(start="5s", end="29s", freq="2s")
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex():
# GH #12169
df = DataFrame({"Group_obj": "A"}, index=pd.to_timedelta(list(range(20)), unit="s"))
df["Group"] = df["Group_obj"].astype("category")
result = df.resample("10s").agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame(
{"Group_obj": ["A", "A"], "Group": ["A", "A"]},
index=pd.to_timedelta([0, 10], unit="s"),
)
expected = expected.reindex(["Group_obj", "Group"], axis=1)
expected["Group"] = expected["Group_obj"]
tm.assert_frame_equal(result, expected)
def test_resample_timedelta_values():
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range("1 day", "4 day", freq="4D")
df = DataFrame({"time": times}, index=times)
times2 = timedelta_range("1 day", "4 day", freq="2D")
exp = Series(times2, index=times2, name="time")
exp.iloc[1] = pd.NaT
res = df.resample("2D").first()["time"]
tm.assert_series_equal(res, exp)
res = df["time"].resample("2D").first()
tm.assert_series_equal(res, exp)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/histogram/cumulative/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/histogram/cumulative/__init__.py
import sys
if sys.version_info < (3, 7):
from ._enabled import EnabledValidator
from ._direction import DirectionValidator
from ._currentbin import CurrentbinValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._enabled.EnabledValidator",
"._direction.DirectionValidator",
"._currentbin.CurrentbinValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/isosurface/_caps.py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Caps(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "isosurface"
_path_str = "isosurface.caps"
_valid_props = {"x", "y", "z"}
# x
# -
@property
def x(self):
"""
The 'x' property is an instance of X
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.caps.X`
- A dict of string/value properties that will be passed
to the X constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the x `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.isosurface.caps.X
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
The 'y' property is an instance of Y
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.caps.Y`
- A dict of string/value properties that will be passed
to the Y constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the y `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.isosurface.caps.Y
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
The 'z' property is an instance of Z
that may be specified as:
- An instance of :class:`plotly.graph_objs.isosurface.caps.Z`
- A dict of string/value properties that will be passed
to the Z constructor
Supported dict properties:
fill
Sets the fill ratio of the `caps`. The default
fill value of the `caps` is 1 meaning that they
are entirely shaded. On the other hand Applying
a `fill` ratio less than one would allow the
creation of openings parallel to the edges.
show
Sets the fill ratio of the `slices`. The
default fill value of the z `slices` is 1
meaning that they are entirely shaded. On the
other hand Applying a `fill` ratio less than
one would allow the creation of openings
parallel to the edges.
Returns
-------
plotly.graph_objs.isosurface.caps.Z
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
:class:`plotly.graph_objects.isosurface.caps.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.isosurface.caps.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.isosurface.caps.Z`
instance or dict with compatible properties
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Caps object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.isosurface.Caps`
x
:class:`plotly.graph_objects.isosurface.caps.X`
instance or dict with compatible properties
y
:class:`plotly.graph_objects.isosurface.caps.Y`
instance or dict with compatible properties
z
:class:`plotly.graph_objects.isosurface.caps.Z`
instance or dict with compatible properties
Returns
-------
Caps
"""
super(Caps, self).__init__("caps")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.isosurface.Caps
constructor must be a dict or
an instance of :class:`plotly.graph_objs.isosurface.Caps`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_box.py | <reponame>acrucetta/Chicago_COVI_WebApp
from plotly.graph_objs import Box
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/util/test_assert_extension_array_equal.py | <gh_stars>100-1000
import numpy as np
import pytest
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray
@pytest.mark.parametrize(
"kwargs",
[
dict(), # Default is check_exact=False
dict(check_exact=False),
dict(check_exact=True),
],
)
def test_assert_extension_array_equal_not_exact(kwargs):
# see gh-23709
arr1 = SparseArray([-0.17387645482451206, 0.3414148016424936])
arr2 = SparseArray([-0.17387645482451206, 0.3414148016424937])
if kwargs.get("check_exact", False):
msg = """\
ExtensionArray are different
ExtensionArray values are different \\(50\\.0 %\\)
\\[left\\]: \\[-0\\.17387645482.*, 0\\.341414801642.*\\]
\\[right\\]: \\[-0\\.17387645482.*, 0\\.341414801642.*\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_extension_array_equal(arr1, arr2, **kwargs)
else:
tm.assert_extension_array_equal(arr1, arr2, **kwargs)
@pytest.mark.parametrize("decimals", range(10))
def test_assert_extension_array_equal_less_precise(decimals):
rtol = 0.5 * 10 ** -decimals
arr1 = SparseArray([0.5, 0.123456])
arr2 = SparseArray([0.5, 0.123457])
if decimals >= 5:
msg = """\
ExtensionArray are different
ExtensionArray values are different \\(50\\.0 %\\)
\\[left\\]: \\[0\\.5, 0\\.123456\\]
\\[right\\]: \\[0\\.5, 0\\.123457\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_extension_array_equal(arr1, arr2, rtol=rtol)
else:
tm.assert_extension_array_equal(arr1, arr2, rtol=rtol)
def test_assert_extension_array_equal_dtype_mismatch(check_dtype):
end = 5
kwargs = dict(check_dtype=check_dtype)
arr1 = SparseArray(np.arange(end, dtype="int64"))
arr2 = SparseArray(np.arange(end, dtype="int32"))
if check_dtype:
msg = """\
ExtensionArray are different
Attribute "dtype" are different
\\[left\\]: Sparse\\[int64, 0\\]
\\[right\\]: Sparse\\[int32, 0\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_extension_array_equal(arr1, arr2, **kwargs)
else:
tm.assert_extension_array_equal(arr1, arr2, **kwargs)
def test_assert_extension_array_equal_missing_values():
arr1 = SparseArray([np.nan, 1, 2, np.nan])
arr2 = SparseArray([np.nan, 1, 2, 3])
msg = """\
ExtensionArray NA mask are different
ExtensionArray NA mask values are different \\(25\\.0 %\\)
\\[left\\]: \\[True, False, False, True\\]
\\[right\\]: \\[True, False, False, False\\]"""
with pytest.raises(AssertionError, match=msg):
tm.assert_extension_array_equal(arr1, arr2)
@pytest.mark.parametrize("side", ["left", "right"])
def test_assert_extension_array_equal_non_extension_array(side):
numpy_array = np.arange(5)
extension_array = SparseArray(numpy_array)
msg = f"{side} is not an ExtensionArray"
args = (
(numpy_array, extension_array)
if side == "left"
else (extension_array, numpy_array)
)
with pytest.raises(AssertionError, match=msg):
tm.assert_extension_array_equal(*args)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tseries/holiday.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>100-1000
from datetime import datetime, timedelta
from typing import List
import warnings
from dateutil.relativedelta import FR, MO, SA, SU, TH, TU, WE # noqa
import numpy as np
from pandas.errors import PerformanceWarning
from pandas import DateOffset, DatetimeIndex, Series, Timestamp, concat, date_range
from pandas.tseries.offsets import Day, Easter
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday:
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(
self,
name,
year=None,
month=None,
day=None,
offset=None,
observance=None,
start_date=None,
end_date=None,
days_of_week=None,
):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday(
... "Memorial Day", month=5, day=31, offset=pd.DateOffset(weekday=MO(-1))
... )
>>> USMemorialDay
Holiday: Memorial Day (month=5, day=31, offset=<DateOffset: weekday=MO(-1)>)
>>> USLaborDay = Holiday(
... "Labor Day", month=9, day=1, offset=pd.DateOffset(weekday=MO(1))
... )
>>> USLaborDay
Holiday: Labor Day (month=9, day=1, offset=<DateOffset: weekday=MO(+1)>)
>>> July3rd = Holiday("July 3rd", month=7, day=3)
>>> July3rd
Holiday: July 3rd (month=7, day=3, )
>>> NewYears = Holiday(
... "New Years Day", month=1, day=1, observance=nearest_workday
... )
>>> NewYears # doctest: +SKIP
Holiday: New Years Day (
month=1, day=1, observance=<function nearest_workday at 0x66545e9bc440>
)
>>> July3rd = Holiday("July 3rd", month=7, day=3, days_of_week=(0, 1, 2, 3))
>>> July3rd
Holiday: July 3rd (month=7, day=3, )
"""
if offset is not None and observance is not None:
raise NotImplementedError("Cannot use both offset and observance.")
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = (
Timestamp(start_date) if start_date is not None else start_date
)
self.end_date = Timestamp(end_date) if end_date is not None else end_date
self.observance = observance
assert days_of_week is None or type(days_of_week) == tuple
self.days_of_week = days_of_week
def __repr__(self) -> str:
info = ""
if self.year is not None:
info += f"year={self.year}, "
info += f"month={self.month}, day={self.day}, "
if self.offset is not None:
info += f"offset={self.offset}"
if self.observance is not None:
info += f"observance={self.observance}"
repr = f"Holiday: {self.name} ({info})"
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays observed between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
filter_start_date = start_date
filter_end_date = end_date
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = holiday_dates[
np.in1d(holiday_dates.dayofweek, self.days_of_week)
]
if self.start_date is not None:
filter_start_date = max(
self.start_date.tz_localize(filter_start_date.tz), filter_start_date
)
if self.end_date is not None:
filter_end_date = min(
self.end_date.tz_localize(filter_end_date.tz), filter_end_date
)
holiday_dates = holiday_dates[
(holiday_dates >= filter_start_date) & (holiday_dates <= filter_end_date)
]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _reference_dates(self, start_date, end_date):
"""
Get reference dates for the holiday.
Return reference dates for the holiday also returning the year
prior to the start_date and year following the end_date. This ensures
that any offsets to be applied will yield the holidays within
the passed in dates.
"""
if self.start_date is not None:
start_date = self.start_date.tz_localize(start_date.tz)
if self.end_date is not None:
end_date = self.end_date.tz_localize(start_date.tz)
year_offset = DateOffset(years=1)
reference_start_date = Timestamp(
datetime(start_date.year - 1, self.month, self.day)
)
reference_end_date = Timestamp(
datetime(end_date.year + 1, self.month, self.day)
)
# Don't process unnecessary holidays
dates = date_range(
start=reference_start_date,
end=reference_end_date,
freq=year_offset,
tz=start_date.tz,
)
return dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
# if we are adding a non-vectorized value
# ignore the PerformanceWarnings:
with warnings.catch_warnings():
warnings.simplefilter("ignore", PerformanceWarning)
dates += offset
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except AttributeError:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super().__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
class AbstractHolidayCalendar(metaclass=HolidayCalendarMetaClass):
"""
Abstract interface to create holidays following certain rules.
"""
rules: List[Holiday] = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2200, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super().__init__()
if name is None:
name = type(self).__name__
self.name = name
if rules is not None:
self.rules = rules
def rule_from_name(self, name):
for rule in self.rules:
if rule.name == name:
return rule
return None
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_name : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception(
f"Holiday Calendar {self.name} does not have any rules specified"
)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
# If we don't have a cache or the dates are outside the prior cache, we
# get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
holidays = [rule.dates(start, end, return_name=True) for rule in self.rules]
if holidays:
holidays = concat(holidays)
else:
holidays = Series(index=DatetimeIndex([]), dtype=object)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar
instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except AttributeError:
pass
if not isinstance(other, list):
other = [other]
other_holidays = {holiday.name: holiday for holiday in other}
try:
base = base.rules
except AttributeError:
pass
if not isinstance(base, list):
base = [base]
base_holidays = {holiday.name: holiday for holiday in base}
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday(
"Memorial Day", month=5, day=31, offset=DateOffset(weekday=MO(-1))
)
USLaborDay = Holiday("Labor Day", month=9, day=1, offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday(
"Columbus Day", month=10, day=1, offset=DateOffset(weekday=MO(2))
)
USThanksgivingDay = Holiday(
"Thanksgiving", month=11, day=1, offset=DateOffset(weekday=TH(4))
)
USMartinLutherKingJr = Holiday(
"<NAME> Jr. Day",
start_date=datetime(1986, 1, 1),
month=1,
day=1,
offset=DateOffset(weekday=MO(3)),
)
USPresidentsDay = Holiday(
"Presidents Day", month=2, day=1, offset=DateOffset(weekday=MO(3))
)
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified by:
https://www.opm.gov/policy-data-oversight/
snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday("New Years Day", month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday("July 4th", month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday("Veterans Day", month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday("Christmas", month=12, day=25, observance=nearest_workday),
]
def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_to_records.py | from collections import abc
import numpy as np
import pytest
from pandas import (
CategoricalDtype,
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
)
import pandas._testing as tm
class TestDataFrameToRecords:
def test_to_records_dt64(self):
df = DataFrame(
[["one", "two", "three"], ["four", "five", "six"]],
index=date_range("2012-01-01", "2012-01-02"),
)
expected = df.index.values[0]
result = df.to_records()["index"][0]
assert expected == result
def test_to_records_dt64tz_column(self):
# GH#32535 dont less tz in to_records
df = DataFrame({"A": date_range("2012-01-01", "2012-01-02", tz="US/Eastern")})
result = df.to_records()
assert result.dtype["A"] == object
val = result[0][1]
assert isinstance(val, Timestamp)
assert val == df.loc[0, "A"]
def test_to_records_with_multindex(self):
# GH#3189
index = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
data = np.zeros((8, 4))
df = DataFrame(data, index=index)
r = df.to_records(index=True)["level_0"]
assert "bar" in r
assert "one" not in r
def test_to_records_with_Mapping_type(self):
import email
from email.parser import Parser
abc.Mapping.register(email.message.Message)
headers = Parser().parsestr(
"From: <<EMAIL>>\n"
"To: <<EMAIL>>\n"
"Subject: Test message\n"
"\n"
"Body would go here\n"
)
frame = DataFrame.from_records([headers])
all(x in frame for x in ["Type", "Subject", "From"])
def test_to_records_floats(self):
df = DataFrame(np.random.rand(10, 10))
df.to_records()
def test_to_records_index_name(self):
df = DataFrame(np.random.randn(3, 3))
df.index.name = "X"
rs = df.to_records()
assert "X" in rs.dtype.fields
df = DataFrame(np.random.randn(3, 3))
rs = df.to_records()
assert "index" in rs.dtype.fields
df.index = MultiIndex.from_tuples([("a", "x"), ("a", "y"), ("b", "z")])
df.index.names = ["A", None]
rs = df.to_records()
assert "level_0" in rs.dtype.fields
def test_to_records_with_unicode_index(self):
# GH#13172
# unicode_literals conflict with to_records
result = DataFrame([{"a": "x", "b": "y"}]).set_index("a").to_records()
expected = np.rec.array([("x", "y")], dtype=[("a", "O"), ("b", "O")])
tm.assert_almost_equal(result, expected)
def test_to_records_with_unicode_column_names(self):
# xref issue: https://github.com/numpy/numpy/issues/2407
# Issue GH#11879. to_records used to raise an exception when used
# with column names containing non-ascii characters in Python 2
result = DataFrame(data={"accented_name_é": [1.0]}).to_records()
# Note that numpy allows for unicode field names but dtypes need
# to be specified using dictionary instead of list of tuples.
expected = np.rec.array(
[(0, 1.0)],
dtype={"names": ["index", "accented_name_é"], "formats": ["=i8", "=f8"]},
)
tm.assert_almost_equal(result, expected)
def test_to_records_with_categorical(self):
# GH#8626
# dict creation
df = DataFrame({"A": list("abc")}, dtype="category")
expected = Series(list("abc"), dtype="category", name="A")
tm.assert_series_equal(df["A"], expected)
# list-like creation
df = DataFrame(list("abc"), dtype="category")
expected = Series(list("abc"), dtype="category", name=0)
tm.assert_series_equal(df[0], expected)
# to record array
# this coerces
result = df.to_records()
expected = np.rec.array(
[(0, "a"), (1, "b"), (2, "c")], dtype=[("index", "=i8"), ("0", "O")]
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
# No dtypes --> default to array dtypes.
(
dict(),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "<i8"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Should have no effect in this case.
(
dict(index=True),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "<i8"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Column dtype applied across the board. Index unaffected.
(
dict(column_dtypes="<U4"),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U4"), ("B", "<U4"), ("C", "<U4")],
),
),
# Index dtype applied across the board. Columns unaffected.
(
dict(index_dtypes="<U1"),
np.rec.array(
[("0", 1, 0.2, "a"), ("1", 2, 1.5, "bc")],
dtype=[("index", "<U1"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Pass in a type instance.
(
dict(column_dtypes=str),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")],
),
),
# Pass in a dtype instance.
(
dict(column_dtypes=np.dtype("unicode")),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "<U"), ("B", "<U"), ("C", "<U")],
),
),
# Pass in a dictionary (name-only).
(
dict(column_dtypes={"A": np.int8, "B": np.float32, "C": "<U2"}),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "<U2")],
),
),
# Pass in a dictionary (indices-only).
(
dict(index_dtypes={0: "int16"}),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "i2"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Ignore index mappings if index is not True.
(
dict(index=False, index_dtypes="<U2"),
np.rec.array(
[(1, 0.2, "a"), (2, 1.5, "bc")],
dtype=[("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Non-existent names / indices in mapping should not error.
(
dict(index_dtypes={0: "int16", "not-there": "float32"}),
np.rec.array(
[(0, 1, 0.2, "a"), (1, 2, 1.5, "bc")],
dtype=[("index", "i2"), ("A", "<i8"), ("B", "<f8"), ("C", "O")],
),
),
# Names / indices not in mapping default to array dtype.
(
dict(column_dtypes={"A": np.int8, "B": np.float32}),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
),
),
# Names / indices not in dtype mapping default to array dtype.
(
dict(column_dtypes={"A": np.dtype("int8"), "B": np.dtype("float32")}),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<i8"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
),
),
# Mixture of everything.
(
dict(column_dtypes={"A": np.int8, "B": np.float32}, index_dtypes="<U2"),
np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<U2"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
),
),
# Invalid dype values.
(
dict(index=False, column_dtypes=list()),
(ValueError, "Invalid dtype \\[\\] specified for column A"),
),
(
dict(index=False, column_dtypes={"A": "int32", "B": 5}),
(ValueError, "Invalid dtype 5 specified for column B"),
),
# Numpy can't handle EA types, so check error is raised
(
dict(
index=False,
column_dtypes={"A": "int32", "B": CategoricalDtype(["a", "b"])},
),
(ValueError, "Invalid dtype category specified for column B"),
),
# Check that bad types raise
(
dict(index=False, column_dtypes={"A": "int32", "B": "foo"}),
(TypeError, "data type [\"']foo[\"'] not understood"),
),
],
)
def test_to_records_dtype(self, kwargs, expected):
# see GH#18146
df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
if not isinstance(expected, np.recarray):
with pytest.raises(expected[0], match=expected[1]):
df.to_records(**kwargs)
else:
result = df.to_records(**kwargs)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize(
"df,kwargs,expected",
[
# MultiIndex in the index.
(
DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=list("abc")
).set_index(["a", "b"]),
dict(column_dtypes="float64", index_dtypes={0: "int32", 1: "int8"}),
np.rec.array(
[(1, 2, 3.0), (4, 5, 6.0), (7, 8, 9.0)],
dtype=[("a", "<i4"), ("b", "i1"), ("c", "<f8")],
),
),
# MultiIndex in the columns.
(
DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=MultiIndex.from_tuples(
[("a", "d"), ("b", "e"), ("c", "f")]
),
),
dict(column_dtypes={0: "<U1", 2: "float32"}, index_dtypes="float32"),
np.rec.array(
[(0.0, "1", 2, 3.0), (1.0, "4", 5, 6.0), (2.0, "7", 8, 9.0)],
dtype=[
("index", "<f4"),
("('a', 'd')", "<U1"),
("('b', 'e')", "<i8"),
("('c', 'f')", "<f4"),
],
),
),
# MultiIndex in both the columns and index.
(
DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=MultiIndex.from_tuples(
[("a", "d"), ("b", "e"), ("c", "f")], names=list("ab")
),
index=MultiIndex.from_tuples(
[("d", -4), ("d", -5), ("f", -6)], names=list("cd")
),
),
dict(column_dtypes="float64", index_dtypes={0: "<U2", 1: "int8"}),
np.rec.array(
[
("d", -4, 1.0, 2.0, 3.0),
("d", -5, 4.0, 5.0, 6.0),
("f", -6, 7, 8, 9.0),
],
dtype=[
("c", "<U2"),
("d", "i1"),
("('a', 'd')", "<f8"),
("('b', 'e')", "<f8"),
("('c', 'f')", "<f8"),
],
),
),
],
)
def test_to_records_dtype_mi(self, df, kwargs, expected):
# see GH#18146
result = df.to_records(**kwargs)
tm.assert_almost_equal(result, expected)
def test_to_records_dict_like(self):
# see GH#18146
class DictLike:
def __init__(self, **kwargs):
self.d = kwargs.copy()
def __getitem__(self, key):
return self.d.__getitem__(key)
def __contains__(self, key) -> bool:
return key in self.d
def keys(self):
return self.d.keys()
df = DataFrame({"A": [1, 2], "B": [0.2, 1.5], "C": ["a", "bc"]})
dtype_mappings = dict(
column_dtypes=DictLike(**{"A": np.int8, "B": np.float32}),
index_dtypes="<U2",
)
result = df.to_records(**dtype_mappings)
expected = np.rec.array(
[("0", "1", "0.2", "a"), ("1", "2", "1.5", "bc")],
dtype=[("index", "<U2"), ("A", "i1"), ("B", "<f4"), ("C", "O")],
)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("tz", ["UTC", "GMT", "US/Eastern"])
def test_to_records_datetimeindex_with_tz(self, tz):
# GH#13937
dr = date_range("2016-01-01", periods=10, freq="S", tz=tz)
df = DataFrame({"datetime": dr}, index=dr)
expected = df.to_records()
result = df.tz_convert("UTC").to_records()
# both converted to UTC, so they are equal
tm.assert_numpy_array_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/volume/slices/_x.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="x", parent_name="volume.slices", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "X"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis x except
start and end.
locationssrc
Sets the source reference on Chart Studio Cloud
for locations .
show
Determines whether or not slice planes about
the x dimension are drawn.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/test_reductions.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>.venv/lib/python3.8/site-packages/pandas/tests/series/test_reductions.py
import pandas as pd
from pandas import Series
def test_reductions_td64_with_nat():
# GH#8617
ser = Series([0, pd.NaT], dtype="m8[ns]")
exp = ser[0]
assert ser.median() == exp
assert ser.min() == exp
assert ser.max() == exp
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/ternary/aaxis/_title.py | import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name="title", parent_name="layout.ternary.aaxis", **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this axis' title font. Note that the
title's font used to be customized by the now
deprecated `titlefont` attribute.
text
Sets the title of this axis. Note that before
the existence of `title.text`, the title's
contents used to be defined as the `title`
attribute itself. This behavior has been
deprecated.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/core/tools/datetimes.py | <filename>.venv/lib/python3.8/site-packages/pandas/core/tools/datetimes.py
from collections import abc
from datetime import datetime
from functools import partial
from itertools import islice
from typing import (
TYPE_CHECKING,
Callable,
List,
Optional,
Tuple,
TypeVar,
Union,
overload,
)
import warnings
import numpy as np
from pandas._libs import tslib, tslibs
from pandas._libs.tslibs import Timestamp, conversion, parsing
from pandas._libs.tslibs.parsing import ( # noqa
DateParseError,
_format_is_iso,
_guess_datetime_format,
)
from pandas._libs.tslibs.strptime import array_strptime
from pandas._typing import ArrayLike, Label, Timezone
from pandas.core.dtypes.common import (
ensure_object,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64tz_dtype,
is_float,
is_integer,
is_integer_dtype,
is_list_like,
is_numeric_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.missing import notna
from pandas.arrays import DatetimeArray, IntegerArray
from pandas.core import algorithms
from pandas.core.algorithms import unique
from pandas.core.arrays.datetimes import (
maybe_convert_dtype,
objects_to_datetime64ns,
tz_to_dtype,
)
from pandas.core.indexes.base import Index
from pandas.core.indexes.datetimes import DatetimeIndex
if TYPE_CHECKING:
from pandas import Series # noqa:F401
from pandas._libs.tslibs.nattype import NaTType # noqa:F401
# ---------------------------------------------------------------------
# types used in annotations
ArrayConvertible = Union[List, Tuple, ArrayLike, "Series"]
Scalar = Union[int, float, str]
DatetimeScalar = TypeVar("DatetimeScalar", Scalar, datetime)
DatetimeScalarOrArrayConvertible = Union[DatetimeScalar, ArrayConvertible]
# ---------------------------------------------------------------------
def _guess_datetime_format_for_array(arr, **kwargs):
# Try to guess the format based on the first non-NaN element
non_nan_elements = notna(arr).nonzero()[0]
if len(non_nan_elements):
return _guess_datetime_format(arr[non_nan_elements[0]], **kwargs)
def should_cache(
arg: ArrayConvertible, unique_share: float = 0.7, check_count: Optional[int] = None
) -> bool:
"""
Decides whether to do caching.
If the percent of unique elements among `check_count` elements less
than `unique_share * 100` then we can do caching.
Parameters
----------
arg: listlike, tuple, 1-d array, Series
unique_share: float, default=0.7, optional
0 < unique_share < 1
check_count: int, optional
0 <= check_count <= len(arg)
Returns
-------
do_caching: bool
Notes
-----
By default for a sequence of less than 50 items in size, we don't do
caching; for the number of elements less than 5000, we take ten percent of
all elements to check for a uniqueness share; if the sequence size is more
than 5000, then we check only the first 500 elements.
All constants were chosen empirically by.
"""
do_caching = True
# default realization
if check_count is None:
# in this case, the gain from caching is negligible
if len(arg) <= 50:
return False
if len(arg) <= 5000:
check_count = int(len(arg) * 0.1)
else:
check_count = 500
else:
assert (
0 <= check_count <= len(arg)
), "check_count must be in next bounds: [0; len(arg)]"
if check_count == 0:
return False
assert 0 < unique_share < 1, "unique_share must be in next bounds: (0; 1)"
unique_elements = set(islice(arg, check_count))
if len(unique_elements) > check_count * unique_share:
do_caching = False
return do_caching
def _maybe_cache(
arg: ArrayConvertible,
format: Optional[str],
cache: bool,
convert_listlike: Callable,
) -> "Series":
"""
Create a cache of unique dates from an array of dates
Parameters
----------
arg : listlike, tuple, 1-d array, Series
format : string
Strftime format to parse time
cache : boolean
True attempts to create a cache of converted values
convert_listlike : function
Conversion function to apply on dates
Returns
-------
cache_array : Series
Cache of converted, unique dates. Can be empty
"""
from pandas import Series
cache_array = Series(dtype=object)
if cache:
# Perform a quicker unique check
if not should_cache(arg):
return cache_array
unique_dates = unique(arg)
if len(unique_dates) < len(arg):
cache_dates = convert_listlike(unique_dates, format)
cache_array = Series(cache_dates, index=unique_dates)
return cache_array
def _box_as_indexlike(
dt_array: ArrayLike, utc: Optional[bool] = None, name: Label = None
) -> Index:
"""
Properly boxes the ndarray of datetimes to DatetimeIndex
if it is possible or to generic Index instead
Parameters
----------
dt_array: 1-d array
Array of datetimes to be wrapped in an Index.
tz : object
None or 'utc'
name : string, default None
Name for a resulting index
Returns
-------
result : datetime of converted dates
- DatetimeIndex if convertible to sole datetime64 type
- general Index otherwise
"""
if is_datetime64_dtype(dt_array):
tz = "utc" if utc else None
return DatetimeIndex(dt_array, tz=tz, name=name)
return Index(dt_array, name=name)
def _convert_and_box_cache(
arg: DatetimeScalarOrArrayConvertible,
cache_array: "Series",
name: Optional[str] = None,
) -> "Index":
"""
Convert array of dates with a cache and wrap the result in an Index.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
cache_array : Series
Cache of converted, unique dates
name : string, default None
Name for a DatetimeIndex
Returns
-------
result : Index-like of converted dates
"""
from pandas import Series
result = Series(arg).map(cache_array)
return _box_as_indexlike(result, utc=None, name=name)
def _return_parsed_timezone_results(result, timezones, tz, name):
"""
Return results from array_strptime if a %z or %Z directive was passed.
Parameters
----------
result : ndarray
int64 date representations of the dates
timezones : ndarray
pytz timezone objects
tz : object
None or pytz timezone object
name : string, default None
Name for a DatetimeIndex
Returns
-------
tz_result : Index-like of parsed dates with timezone
"""
tz_results = np.array(
[Timestamp(res).tz_localize(zone) for res, zone in zip(result, timezones)]
)
if tz is not None:
# Convert to the same tz
tz_results = np.array([tz_result.tz_convert(tz) for tz_result in tz_results])
return Index(tz_results, name=name)
def _convert_listlike_datetimes(
arg,
format: Optional[str],
name: Label = None,
tz: Optional[Timezone] = None,
unit: Optional[str] = None,
errors: Optional[str] = None,
infer_datetime_format: Optional[bool] = None,
dayfirst: Optional[bool] = None,
yearfirst: Optional[bool] = None,
exact: Optional[bool] = None,
):
"""
Helper function for to_datetime. Performs the conversions of 1D listlike
of dates
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be parsed
name : object
None or string for the Index name
tz : object
None or 'utc'
unit : string
None or string of the frequency of the passed data
errors : string
error handing behaviors from to_datetime, 'raise', 'coerce', 'ignore'
infer_datetime_format : boolean
inferring format behavior from to_datetime
dayfirst : boolean
dayfirst parsing behavior from to_datetime
yearfirst : boolean
yearfirst parsing behavior from to_datetime
exact : boolean
exact format matching behavior from to_datetime
Returns
-------
Index-like of parsed dates
"""
if isinstance(arg, (list, tuple)):
arg = np.array(arg, dtype="O")
arg_dtype = getattr(arg, "dtype", None)
# these are shortcutable
if is_datetime64tz_dtype(arg_dtype):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
return DatetimeIndex(arg, tz=tz, name=name)
if tz == "utc":
# error: Item "DatetimeIndex" of "Union[DatetimeArray, DatetimeIndex]" has
# no attribute "tz_convert"
arg = arg.tz_convert(None).tz_localize(tz) # type: ignore
return arg
elif is_datetime64_ns_dtype(arg_dtype):
if not isinstance(arg, (DatetimeArray, DatetimeIndex)):
try:
return DatetimeIndex(arg, tz=tz, name=name)
except ValueError:
pass
elif tz:
# DatetimeArray, DatetimeIndex
return arg.tz_localize(tz)
return arg
elif unit is not None:
if format is not None:
raise ValueError("cannot specify both format and unit")
arg = getattr(arg, "_values", arg)
# GH 30050 pass an ndarray to tslib.array_with_unit_to_datetime
# because it expects an ndarray argument
if isinstance(arg, IntegerArray):
result = arg.astype(f"datetime64[{unit}]")
tz_parsed = None
else:
result, tz_parsed = tslib.array_with_unit_to_datetime(
arg, unit, errors=errors
)
if errors == "ignore":
result = Index(result, name=name)
else:
result = DatetimeIndex(result, name=name)
# GH 23758: We may still need to localize the result with tz
# GH 25546: Apply tz_parsed first (from arg), then tz (from caller)
# result will be naive but in UTC
try:
result = result.tz_localize("UTC").tz_convert(tz_parsed)
except AttributeError:
# Regular Index from 'ignore' path
return result
if tz is not None:
if result.tz is None:
result = result.tz_localize(tz)
else:
result = result.tz_convert(tz)
return result
elif getattr(arg, "ndim", 1) > 1:
raise TypeError(
"arg must be a string, datetime, list, tuple, 1-d array, or Series"
)
# warn if passing timedelta64, raise for PeriodDtype
# NB: this must come after unit transformation
orig_arg = arg
try:
arg, _ = maybe_convert_dtype(arg, copy=False)
except TypeError:
if errors == "coerce":
result = np.array(["NaT"], dtype="datetime64[ns]").repeat(len(arg))
return DatetimeIndex(result, name=name)
elif errors == "ignore":
result = Index(arg, name=name)
return result
raise
arg = ensure_object(arg)
require_iso8601 = False
if infer_datetime_format and format is None:
format = _guess_datetime_format_for_array(arg, dayfirst=dayfirst)
if format is not None:
# There is a special fast-path for iso8601 formatted
# datetime strings, so in those cases don't use the inferred
# format because this path makes process slower in this
# special case
format_is_iso8601 = _format_is_iso(format)
if format_is_iso8601:
require_iso8601 = not infer_datetime_format
format = None
tz_parsed = None
result = None
if format is not None:
try:
# shortcut formatting here
if format == "%Y%m%d":
try:
# pass orig_arg as float-dtype may have been converted to
# datetime64[ns]
orig_arg = ensure_object(orig_arg)
result = _attempt_YYYYMMDD(orig_arg, errors=errors)
except (ValueError, TypeError, tslibs.OutOfBoundsDatetime) as err:
raise ValueError(
"cannot convert the input to '%Y%m%d' date format"
) from err
# fallback
if result is None:
try:
result, timezones = array_strptime(
arg, format, exact=exact, errors=errors
)
if "%Z" in format or "%z" in format:
return _return_parsed_timezone_results(
result, timezones, tz, name
)
except tslibs.OutOfBoundsDatetime:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError:
# if format was inferred, try falling back
# to array_to_datetime - terminate here
# for specified formats
if not infer_datetime_format:
if errors == "raise":
raise
elif errors == "coerce":
result = np.empty(arg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult.fill(tslibs.iNaT)
else:
result = arg
except ValueError as e:
# Fallback to try to convert datetime objects if timezone-aware
# datetime objects are found without passing `utc=True`
try:
values, tz = conversion.datetime_to_datetime64(arg)
dta = DatetimeArray(values, dtype=tz_to_dtype(tz))
return DatetimeIndex._simple_new(dta, name=name)
except (ValueError, TypeError):
raise e
if result is None:
assert format is None or infer_datetime_format
utc = tz == "utc"
result, tz_parsed = objects_to_datetime64ns(
arg,
dayfirst=dayfirst,
yearfirst=yearfirst,
utc=utc,
errors=errors,
require_iso8601=require_iso8601,
allow_object=True,
)
if tz_parsed is not None:
# We can take a shortcut since the datetime64 numpy array
# is in UTC
dta = DatetimeArray(result, dtype=tz_to_dtype(tz_parsed))
return DatetimeIndex._simple_new(dta, name=name)
utc = tz == "utc"
return _box_as_indexlike(result, utc=utc, name=name)
def _adjust_to_origin(arg, origin, unit):
"""
Helper function for to_datetime.
Adjust input argument to the specified origin
Parameters
----------
arg : list, tuple, ndarray, Series, Index
date to be adjusted
origin : 'julian' or Timestamp
origin offset for the arg
unit : string
passed unit from to_datetime, must be 'D'
Returns
-------
ndarray or scalar of adjusted date(s)
"""
if origin == "julian":
original = arg
j0 = Timestamp(0).to_julian_date()
if unit != "D":
raise ValueError("unit must be 'D' for origin='julian'")
try:
arg = arg - j0
except TypeError as err:
raise ValueError(
"incompatible 'arg' type for given 'origin'='julian'"
) from err
# preemptively check this for a nice range
j_max = Timestamp.max.to_julian_date() - j0
j_min = Timestamp.min.to_julian_date() - j0
if np.any(arg > j_max) or np.any(arg < j_min):
raise tslibs.OutOfBoundsDatetime(
f"{original} is Out of Bounds for origin='julian'"
)
else:
# arg must be numeric
if not (
(is_scalar(arg) and (is_integer(arg) or is_float(arg)))
or is_numeric_dtype(np.asarray(arg))
):
raise ValueError(
f"'{arg}' is not compatible with origin='{origin}'; "
"it must be numeric with a unit specified"
)
# we are going to offset back to unix / epoch time
try:
offset = Timestamp(origin)
except tslibs.OutOfBoundsDatetime as err:
raise tslibs.OutOfBoundsDatetime(
f"origin {origin} is Out of Bounds"
) from err
except ValueError as err:
raise ValueError(
f"origin {origin} cannot be converted to a Timestamp"
) from err
if offset.tz is not None:
raise ValueError(f"origin offset {offset} must be tz-naive")
offset -= Timestamp(0)
# convert the offset to the unit of the arg
# this should be lossless in terms of precision
offset = offset // tslibs.Timedelta(1, unit=unit)
# scalars & ndarray-like can handle the addition
if is_list_like(arg) and not isinstance(arg, (ABCSeries, Index, np.ndarray)):
arg = np.asarray(arg)
arg = arg + offset
return arg
@overload
def to_datetime(
arg: DatetimeScalar,
errors: str = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: Optional[bool] = ...,
format: Optional[str] = ...,
exact: bool = ...,
unit: Optional[str] = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> Union[DatetimeScalar, "NaTType"]:
...
@overload
def to_datetime(
arg: "Series",
errors: str = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: Optional[bool] = ...,
format: Optional[str] = ...,
exact: bool = ...,
unit: Optional[str] = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> "Series":
...
@overload
def to_datetime(
arg: Union[List, Tuple],
errors: str = ...,
dayfirst: bool = ...,
yearfirst: bool = ...,
utc: Optional[bool] = ...,
format: Optional[str] = ...,
exact: bool = ...,
unit: Optional[str] = ...,
infer_datetime_format: bool = ...,
origin=...,
cache: bool = ...,
) -> DatetimeIndex:
...
def to_datetime(
arg: DatetimeScalarOrArrayConvertible,
errors: str = "raise",
dayfirst: bool = False,
yearfirst: bool = False,
utc: Optional[bool] = None,
format: Optional[str] = None,
exact: bool = True,
unit: Optional[str] = None,
infer_datetime_format: bool = False,
origin="unix",
cache: bool = True,
) -> Union[DatetimeIndex, "Series", DatetimeScalar, "NaTType"]:
"""
Convert argument to datetime.
Parameters
----------
arg : int, float, str, datetime, list, tuple, 1-d array, Series, DataFrame/dict-like
The object to convert to a datetime.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaT.
- If 'ignore', then invalid parsing will return the input.
dayfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
If True, parses dates with the day first, eg 10/11/12 is parsed as
2012-11-10.
Warning: dayfirst=True is not strict, but will prefer to parse
with day first (this is a known bug, based on dateutil behavior).
yearfirst : bool, default False
Specify a date parse order if `arg` is str or its list-likes.
- If True parses dates with the year first, eg 10/11/12 is parsed as
2010-11-12.
- If both dayfirst and yearfirst are True, yearfirst is preceded (same
as dateutil).
Warning: yearfirst=True is not strict, but will prefer to parse
with year first (this is a known bug, based on dateutil behavior).
utc : bool, default None
Return UTC DatetimeIndex if True (converting any tz-aware
datetime.datetime objects as well).
format : str, default None
The strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
See strftime documentation for more information on choices:
https://docs.python.org/3/library/datetime.html#strftime-and-strptime-behavior.
exact : bool, True by default
Behaves as:
- If True, require an exact format match.
- If False, allow the format to match anywhere in the target string.
unit : str, default 'ns'
The unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : bool, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings based on the first non-NaN element,
and if it can be inferred, switch to a faster method of parsing them.
In some cases this can increase the parsing speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
cache : bool, default True
If True, use a cache of unique, converted dates to apply the datetime
conversion. May produce significant speed-up when parsing duplicate
date strings, especially ones with timezone offsets. The cache is only
used when there are at least 50 values. The presence of out-of-bounds
values will render the cache unusable and may slow down parsing.
.. versionadded:: 0.23.0
.. versionchanged:: 0.25.0
- changed default value from False to True.
Returns
-------
datetime
If parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_timedelta : Convert argument to timedelta.
convert_dtypes : Convert dtypes.
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = pd.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> pd.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> pd.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = pd.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> %timeit pd.to_datetime(s, infer_datetime_format=True) # doctest: +SKIP
100 loops, best of 3: 10.4 ms per loop
>>> %timeit pd.to_datetime(s, infer_datetime_format=False) # doctest: +SKIP
1 loop, best of 3: 471 ms per loop
Using a unix epoch time
>>> pd.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> pd.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
.. warning:: For float arg, precision rounding might happen. To prevent
unexpected behavior use a fixed-width exact type.
Using a non-unix epoch origin
>>> pd.to_datetime([1, 2, 3], unit='D',
... origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], \
dtype='datetime64[ns]', freq=None)
"""
if arg is None:
return None
if origin != "unix":
arg = _adjust_to_origin(arg, origin, unit)
tz = "utc" if utc else None
convert_listlike = partial(
_convert_listlike_datetimes,
tz=tz,
unit=unit,
dayfirst=dayfirst,
yearfirst=yearfirst,
errors=errors,
exact=exact,
infer_datetime_format=infer_datetime_format,
)
if isinstance(arg, Timestamp):
result = arg
if tz is not None:
if arg.tz is not None:
result = result.tz_convert(tz)
else:
result = result.tz_localize(tz)
elif isinstance(arg, ABCSeries):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = arg.map(cache_array)
else:
values = convert_listlike(arg._values, format)
result = arg._constructor(values, index=arg.index, name=arg.name)
elif isinstance(arg, (ABCDataFrame, abc.MutableMapping)):
result = _assemble_from_unit_mappings(arg, errors, tz)
elif isinstance(arg, Index):
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array, name=arg.name)
else:
result = convert_listlike(arg, format, name=arg.name)
elif is_list_like(arg):
try:
cache_array = _maybe_cache(arg, format, cache, convert_listlike)
except tslibs.OutOfBoundsDatetime:
# caching attempts to create a DatetimeIndex, which may raise
# an OOB. If that's the desired behavior, then just reraise...
if errors == "raise":
raise
# ... otherwise, continue without the cache.
from pandas import Series
cache_array = Series([], dtype=object) # just an empty array
if not cache_array.empty:
result = _convert_and_box_cache(arg, cache_array)
else:
result = convert_listlike(arg, format)
else:
result = convert_listlike(np.array([arg]), format)[0]
return result
# mappings for assembling units
_unit_map = {
"year": "year",
"years": "year",
"month": "month",
"months": "month",
"day": "day",
"days": "day",
"hour": "h",
"hours": "h",
"minute": "m",
"minutes": "m",
"second": "s",
"seconds": "s",
"ms": "ms",
"millisecond": "ms",
"milliseconds": "ms",
"us": "us",
"microsecond": "us",
"microseconds": "us",
"ns": "ns",
"nanosecond": "ns",
"nanoseconds": "ns",
}
def _assemble_from_unit_mappings(arg, errors, tz):
"""
assemble the unit specified fields from the arg (DataFrame)
Return a Series for actual parsing
Parameters
----------
arg : DataFrame
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
tz : None or 'utc'
Returns
-------
Series
"""
from pandas import to_timedelta, to_numeric, DataFrame
arg = DataFrame(arg)
if not arg.columns.is_unique:
raise ValueError("cannot assemble with duplicate keys")
# replace passed unit with _unit_map
def f(value):
if value in _unit_map:
return _unit_map[value]
# m is case significant
if value.lower() in _unit_map:
return _unit_map[value.lower()]
return value
unit = {k: f(k) for k in arg.keys()}
unit_rev = {v: k for k, v in unit.items()}
# we require at least Ymd
required = ["year", "month", "day"]
req = sorted(set(required) - set(unit_rev.keys()))
if len(req):
_required = ",".join(req)
raise ValueError(
"to assemble mappings requires at least that "
f"[year, month, day] be specified: [{_required}] is missing"
)
# keys we don't recognize
excess = sorted(set(unit_rev.keys()) - set(_unit_map.values()))
if len(excess):
_excess = ",".join(excess)
raise ValueError(
f"extra keys have been passed to the datetime assemblage: [{_excess}]"
)
def coerce(values):
# we allow coercion to if errors allows
values = to_numeric(values, errors=errors)
# prevent overflow in case of int8 or int16
if is_integer_dtype(values):
values = values.astype("int64", copy=False)
return values
values = (
coerce(arg[unit_rev["year"]]) * 10000
+ coerce(arg[unit_rev["month"]]) * 100
+ coerce(arg[unit_rev["day"]])
)
try:
values = to_datetime(values, format="%Y%m%d", errors=errors, utc=tz)
except (TypeError, ValueError) as err:
raise ValueError(f"cannot assemble the datetimes: {err}") from err
for u in ["h", "m", "s", "ms", "us", "ns"]:
value = unit_rev.get(u)
if value is not None and value in arg:
try:
values += to_timedelta(coerce(arg[value]), unit=u, errors=errors)
except (TypeError, ValueError) as err:
raise ValueError(
f"cannot assemble the datetimes [{value}]: {err}"
) from err
return values
def _attempt_YYYYMMDD(arg, errors):
"""
try to parse the YYYYMMDD/%Y%m%d format, try to deal with NaT-like,
arg is a passed in as an object dtype, but could really be ints/strings
with nan-like/or floats (e.g. with nan)
Parameters
----------
arg : passed value
errors : 'raise','ignore','coerce'
"""
def calc(carg):
# calculate the actual result
carg = carg.astype(object)
parsed = parsing.try_parse_year_month_day(
carg / 10000, carg / 100 % 100, carg % 100
)
return tslib.array_to_datetime(parsed, errors=errors)[0]
def calc_with_mask(carg, mask):
result = np.empty(carg.shape, dtype="M8[ns]")
iresult = result.view("i8")
iresult[~mask] = tslibs.iNaT
masked_result = calc(carg[mask].astype(np.float64).astype(np.int64))
result[mask] = masked_result.astype("M8[ns]")
return result
# try intlike / strings that are ints
try:
return calc(arg.astype(np.int64))
except (ValueError, OverflowError, TypeError):
pass
# a float with actual np.nan
try:
carg = arg.astype(np.float64)
return calc_with_mask(carg, notna(carg))
except (ValueError, OverflowError, TypeError):
pass
# string with NaN-like
try:
mask = ~algorithms.isin(arg, list(tslibs.nat_strings))
return calc_with_mask(arg, mask)
except (ValueError, OverflowError, TypeError):
pass
return None
def to_time(arg, format=None, infer_time_format=False, errors="raise"):
# GH#34145
warnings.warn(
"`to_time` has been moved, should be imported from pandas.core.tools.times. "
"This alias will be removed in a future version.",
FutureWarning,
stacklevel=2,
)
from pandas.core.tools.times import to_time
return to_time(arg, format, infer_time_format, errors)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/series/methods/test_truncate.py | <filename>env/lib/python3.8/site-packages/pandas/tests/series/methods/test_truncate.py
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.tseries.offsets import BDay
class TestTruncate:
def test_truncate(self, datetime_series):
offset = BDay()
ts = datetime_series[::3]
start, end = datetime_series.index[3], datetime_series.index[6]
start_missing, end_missing = datetime_series.index[2], datetime_series.index[7]
# neither specified
truncated = ts.truncate()
tm.assert_series_equal(truncated, ts)
# both specified
expected = ts[1:3]
truncated = ts.truncate(start, end)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(start_missing, end_missing)
tm.assert_series_equal(truncated, expected)
# start specified
expected = ts[1:]
truncated = ts.truncate(before=start)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(before=start_missing)
tm.assert_series_equal(truncated, expected)
# end specified
expected = ts[:3]
truncated = ts.truncate(after=end)
tm.assert_series_equal(truncated, expected)
truncated = ts.truncate(after=end_missing)
tm.assert_series_equal(truncated, expected)
# corner case, empty series returned
truncated = ts.truncate(after=datetime_series.index[0] - offset)
assert len(truncated) == 0
truncated = ts.truncate(before=datetime_series.index[-1] + offset)
assert len(truncated) == 0
msg = "Truncate: 1999-12-31 00:00:00 must be after 2000-02-14 00:00:00"
with pytest.raises(ValueError, match=msg):
ts.truncate(
before=datetime_series.index[-1] + offset,
after=datetime_series.index[0] - offset,
)
def test_truncate_nonsortedindex(self):
# GH#17935
s = pd.Series(["a", "b", "c", "d", "e"], index=[5, 3, 2, 9, 0])
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
s.truncate(before=3, after=9)
rng = pd.date_range("2011-01-01", "2012-01-01", freq="W")
ts = pd.Series(np.random.randn(len(rng)), index=rng)
msg = "truncate requires a sorted index"
with pytest.raises(ValueError, match=msg):
ts.sort_values(ascending=False).truncate(before="2011-11", after="2011-12")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/waitress/channel.py | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import socket
import threading
import time
import traceback
from waitress.buffers import (
OverflowableBuffer,
ReadOnlyFileBasedBuffer,
)
from waitress.parser import HTTPRequestParser
from waitress.task import (
ErrorTask,
WSGITask,
)
from waitress.utilities import InternalServerError
from . import wasyncore
class ClientDisconnected(Exception):
""" Raised when attempting to write to a closed socket."""
class HTTPChannel(wasyncore.dispatcher, object):
"""
Setting self.requests = [somerequest] prevents more requests from being
received until the out buffers have been flushed.
Setting self.requests = [] allows more requests to be received.
"""
task_class = WSGITask
error_task_class = ErrorTask
parser_class = HTTPRequestParser
request = None # A request parser instance
last_activity = 0 # Time of last activity
will_close = False # set to True to close the socket.
close_when_flushed = False # set to True to close the socket when flushed
requests = () # currently pending requests
sent_continue = False # used as a latch after sending 100 continue
total_outbufs_len = 0 # total bytes ready to send
current_outbuf_count = 0 # total bytes written to current outbuf
#
# ASYNCHRONOUS METHODS (including __init__)
#
def __init__(
self, server, sock, addr, adj, map=None,
):
self.server = server
self.adj = adj
self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
self.creation_time = self.last_activity = time.time()
self.sendbuf_len = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
# task_lock used to push/pop requests
self.task_lock = threading.Lock()
# outbuf_lock used to access any outbuf (expected to use an RLock)
self.outbuf_lock = threading.Condition()
wasyncore.dispatcher.__init__(self, sock, map=map)
# Don't let wasyncore.dispatcher throttle self.addr on us.
self.addr = addr
def writable(self):
# if there's data in the out buffer or we've been instructed to close
# the channel (possibly by our server maintenance logic), run
# handle_write
return self.total_outbufs_len or self.will_close or self.close_when_flushed
def handle_write(self):
# Precondition: there's data in the out buffer to be sent, or
# there's a pending will_close request
if not self.connected:
# we dont want to close the channel twice
return
# try to flush any pending output
if not self.requests:
# 1. There are no running tasks, so we don't need to try to lock
# the outbuf before sending
# 2. The data in the out buffer should be sent as soon as possible
# because it's either data left over from task output
# or a 100 Continue line sent within "received".
flush = self._flush_some
elif self.total_outbufs_len >= self.adj.send_bytes:
# 1. There's a running task, so we need to try to lock
# the outbuf before sending
# 2. Only try to send if the data in the out buffer is larger
# than self.adj_bytes to avoid TCP fragmentation
flush = self._flush_some_if_lockable
else:
# 1. There's not enough data in the out buffer to bother to send
# right now.
flush = None
if flush:
try:
flush()
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception("Socket error")
self.will_close = True
except Exception:
self.logger.exception("Unexpected exception when flushing")
self.will_close = True
if self.close_when_flushed and not self.total_outbufs_len:
self.close_when_flushed = False
self.will_close = True
if self.will_close:
self.handle_close()
def readable(self):
# We might want to create a new task. We can only do this if:
# 1. We're not already about to close the connection.
# 2. There's no already currently running task(s).
# 3. There's no data in the output buffer that needs to be sent
# before we potentially create a new task.
return not (self.will_close or self.requests or self.total_outbufs_len)
def handle_read(self):
try:
data = self.recv(self.adj.recv_bytes)
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception("Socket error")
self.handle_close()
return
if data:
self.last_activity = time.time()
self.received(data)
def received(self, data):
"""
Receives input asynchronously and assigns one or more requests to the
channel.
"""
# Preconditions: there's no task(s) already running
request = self.request
requests = []
if not data:
return False
while data:
if request is None:
request = self.parser_class(self.adj)
n = request.received(data)
if request.expect_continue and request.headers_finished:
# guaranteed by parser to be a 1.1 request
request.expect_continue = False
if not self.sent_continue:
# there's no current task, so we don't need to try to
# lock the outbuf to append to it.
outbuf_payload = b"HTTP/1.1 100 Continue\r\n\r\n"
num_bytes = len(outbuf_payload)
self.outbufs[-1].append(outbuf_payload)
self.current_outbuf_count += num_bytes
self.total_outbufs_len += num_bytes
self.sent_continue = True
self._flush_some()
request.completed = False
if request.completed:
# The request (with the body) is ready to use.
self.request = None
if not request.empty:
requests.append(request)
request = None
else:
self.request = request
if n >= len(data):
break
data = data[n:]
if requests:
self.requests = requests
self.server.add_task(self)
return True
def _flush_some_if_lockable(self):
# Since our task may be appending to the outbuf, we try to acquire
# the lock, but we don't block if we can't.
if self.outbuf_lock.acquire(False):
try:
self._flush_some()
if self.total_outbufs_len < self.adj.outbuf_high_watermark:
self.outbuf_lock.notify()
finally:
self.outbuf_lock.release()
def _flush_some(self):
# Send as much data as possible to our client
sent = 0
dobreak = False
while True:
outbuf = self.outbufs[0]
# use outbuf.__len__ rather than len(outbuf) FBO of not getting
# OverflowError on 32-bit Python
outbuflen = outbuf.__len__()
while outbuflen > 0:
chunk = outbuf.get(self.sendbuf_len)
num_sent = self.send(chunk)
if num_sent:
outbuf.skip(num_sent, True)
outbuflen -= num_sent
sent += num_sent
self.total_outbufs_len -= num_sent
else:
# failed to write anything, break out entirely
dobreak = True
break
else:
# self.outbufs[-1] must always be a writable outbuf
if len(self.outbufs) > 1:
toclose = self.outbufs.pop(0)
try:
toclose.close()
except Exception:
self.logger.exception("Unexpected error when closing an outbuf")
else:
# caught up, done flushing for now
dobreak = True
if dobreak:
break
if sent:
self.last_activity = time.time()
return True
return False
def handle_close(self):
with self.outbuf_lock:
for outbuf in self.outbufs:
try:
outbuf.close()
except Exception:
self.logger.exception(
"Unknown exception while trying to close outbuf"
)
self.total_outbufs_len = 0
self.connected = False
self.outbuf_lock.notify()
wasyncore.dispatcher.close(self)
def add_channel(self, map=None):
"""See wasyncore.dispatcher
This hook keeps track of opened channels.
"""
wasyncore.dispatcher.add_channel(self, map)
self.server.active_channels[self._fileno] = self
def del_channel(self, map=None):
"""See wasyncore.dispatcher
This hook keeps track of closed channels.
"""
fd = self._fileno # next line sets this to None
wasyncore.dispatcher.del_channel(self, map)
ac = self.server.active_channels
if fd in ac:
del ac[fd]
#
# SYNCHRONOUS METHODS
#
def write_soon(self, data):
if not self.connected:
# if the socket is closed then interrupt the task so that it
# can cleanup possibly before the app_iter is exhausted
raise ClientDisconnected
if data:
# the async mainloop might be popping data off outbuf; we can
# block here waiting for it because we're in a task thread
with self.outbuf_lock:
self._flush_outbufs_below_high_watermark()
if not self.connected:
raise ClientDisconnected
num_bytes = len(data)
if data.__class__ is ReadOnlyFileBasedBuffer:
# they used wsgi.file_wrapper
self.outbufs.append(data)
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
self.outbufs.append(nextbuf)
self.current_outbuf_count = 0
else:
if self.current_outbuf_count >= self.adj.outbuf_high_watermark:
# rotate to a new buffer if the current buffer has hit
# the watermark to avoid it growing unbounded
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
self.outbufs.append(nextbuf)
self.current_outbuf_count = 0
self.outbufs[-1].append(data)
self.current_outbuf_count += num_bytes
self.total_outbufs_len += num_bytes
if self.total_outbufs_len >= self.adj.send_bytes:
self.server.pull_trigger()
return num_bytes
return 0
def _flush_outbufs_below_high_watermark(self):
# check first to avoid locking if possible
if self.total_outbufs_len > self.adj.outbuf_high_watermark:
with self.outbuf_lock:
while (
self.connected
and self.total_outbufs_len > self.adj.outbuf_high_watermark
):
self.server.pull_trigger()
self.outbuf_lock.wait()
def service(self):
"""Execute all pending requests """
with self.task_lock:
while self.requests:
request = self.requests[0]
if request.error:
task = self.error_task_class(self, request)
else:
task = self.task_class(self, request)
try:
task.service()
except ClientDisconnected:
self.logger.info(
"Client disconnected while serving %s" % task.request.path
)
task.close_on_finish = True
except Exception:
self.logger.exception(
"Exception while serving %s" % task.request.path
)
if not task.wrote_header:
if self.adj.expose_tracebacks:
body = traceback.format_exc()
else:
body = (
"The server encountered an unexpected "
"internal server error"
)
req_version = request.version
req_headers = request.headers
request = self.parser_class(self.adj)
request.error = InternalServerError(body)
# copy some original request attributes to fulfill
# HTTP 1.1 requirements
request.version = req_version
try:
request.headers["CONNECTION"] = req_headers["CONNECTION"]
except KeyError:
pass
task = self.error_task_class(self, request)
try:
task.service() # must not fail
except ClientDisconnected:
task.close_on_finish = True
else:
task.close_on_finish = True
# we cannot allow self.requests to drop to empty til
# here; otherwise the mainloop gets confused
if task.close_on_finish:
self.close_when_flushed = True
for request in self.requests:
request.close()
self.requests = []
else:
# before processing a new request, ensure there is not too
# much data in the outbufs waiting to be flushed
# NB: currently readable() returns False while we are
# flushing data so we know no new requests will come in
# that we need to account for, otherwise it'd be better
# to do this check at the start of the request instead of
# at the end to account for consecutive service() calls
if len(self.requests) > 1:
self._flush_outbufs_below_high_watermark()
# this is a little hacky but basically it's forcing the
# next request to create a new outbuf to avoid sharing
# outbufs across requests which can cause outbufs to
# not be deallocated regularly when a connection is open
# for a long time
if self.current_outbuf_count > 0:
self.current_outbuf_count = self.adj.outbuf_high_watermark
request = self.requests.pop(0)
request.close()
if self.connected:
self.server.pull_trigger()
self.last_activity = time.time()
def cancel(self):
""" Cancels all pending / active requests """
self.will_close = True
self.connected = False
self.last_activity = time.time()
self.requests = []
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/volume/lighting/__init__.py | import sys
if sys.version_info < (3, 7):
from ._vertexnormalsepsilon import VertexnormalsepsilonValidator
from ._specular import SpecularValidator
from ._roughness import RoughnessValidator
from ._fresnel import FresnelValidator
from ._facenormalsepsilon import FacenormalsepsilonValidator
from ._diffuse import DiffuseValidator
from ._ambient import AmbientValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._vertexnormalsepsilon.VertexnormalsepsilonValidator",
"._specular.SpecularValidator",
"._roughness.RoughnessValidator",
"._fresnel.FresnelValidator",
"._facenormalsepsilon.FacenormalsepsilonValidator",
"._diffuse.DiffuseValidator",
"._ambient.AmbientValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/testing/print_coercion_tables.py | <filename>env/lib/python3.8/site-packages/numpy/testing/print_coercion_tables.py
#!/usr/bin/env python
"""Prints type-coercion tables for the built-in NumPy types
"""
from __future__ import division, absolute_import, print_function
import numpy as np
# Generic object that can be added, but doesn't do anything else
class GenericObject(object):
def __init__(self, v):
self.v = v
def __add__(self, other):
return self
def __radd__(self, other):
return self
dtype = np.dtype('O')
def print_cancast_table(ntypes):
print('X', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
print(row, end=' ')
for col in ntypes:
print(int(np.can_cast(row, col)), end=' ')
print()
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
print('+', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
if row == 'O':
rowtype = GenericObject
else:
rowtype = np.obj2sctype(row)
print(row, end=' ')
for col in ntypes:
if col == 'O':
coltype = GenericObject
else:
coltype = np.obj2sctype(col)
try:
if firstarray:
rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
else:
rowvalue = rowtype(inputfirstvalue)
colvalue = coltype(inputsecondvalue)
if use_promote_types:
char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
else:
value = np.add(rowvalue, colvalue)
if isinstance(value, np.ndarray):
char = value.dtype.char
else:
char = np.dtype(type(value)).char
except ValueError:
char = '!'
except OverflowError:
char = '@'
except TypeError:
char = '#'
print(char, end=' ')
print()
if __name__ == '__main__':
print("can cast")
print_cancast_table(np.typecodes['All'])
print()
print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
print()
print("scalar + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, False)
print()
print("scalar + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, False)
print()
print("array + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, True)
print()
print("array + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, True)
print()
print("promote_types")
print_coercion_table(np.typecodes['All'], 0, 0, False, True)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_ohlc.py | from plotly.graph_objs import Ohlc
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/_plotly_future_/__init__.py | import warnings
import functools
# Initialize _future_flags with all future flags that are now always in
# effect.
_future_flags = {
"renderer_defaults",
"template_defaults",
"extract_chart_studio",
"remove_deprecations",
"v4_subplots",
"orca_defaults",
"timezones",
"trace_uids",
}
def _assert_plotly_not_imported():
import sys
if "plotly" in sys.modules:
raise ImportError(
"""\
The _plotly_future_ module must be imported before the plotly module"""
)
warnings.filterwarnings(
"default", ".*?is deprecated, please use chart_studio*", DeprecationWarning
)
def _chart_studio_warning(submodule):
warnings.warn(
"The plotly.{submodule} module is deprecated, "
"please use chart_studio.{submodule} instead".format(submodule=submodule),
DeprecationWarning,
stacklevel=2,
)
def _chart_studio_error(submodule):
raise ImportError(
"""
The plotly.{submodule} module is deprecated,
please install the chart-studio package and use the
chart_studio.{submodule} module instead.
""".format(
submodule=submodule
)
)
def _chart_studio_deprecation(fn):
fn_name = fn.__name__
fn_module = fn.__module__
plotly_name = ".".join(["plotly"] + fn_module.split(".")[1:] + [fn_name])
chart_studio_name = ".".join(
["chart_studio"] + fn_module.split(".")[1:] + [fn_name]
)
msg = """\
{plotly_name} is deprecated, please use {chart_studio_name}\
""".format(
plotly_name=plotly_name, chart_studio_name=chart_studio_name
)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
warnings.warn(msg, DeprecationWarning, stacklevel=2)
return fn(*args, **kwargs)
return wrapper
__all__ = ["_future_flags", "_chart_studio_error"]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/mapbox/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._zoom import ZoomValidator
from ._uirevision import UirevisionValidator
from ._style import StyleValidator
from ._pitch import PitchValidator
from ._layerdefaults import LayerdefaultsValidator
from ._layers import LayersValidator
from ._domain import DomainValidator
from ._center import CenterValidator
from ._bearing import BearingValidator
from ._accesstoken import AccesstokenValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zoom.ZoomValidator",
"._uirevision.UirevisionValidator",
"._style.StyleValidator",
"._pitch.PitchValidator",
"._layerdefaults.LayerdefaultsValidator",
"._layers.LayersValidator",
"._domain.DomainValidator",
"._center.CenterValidator",
"._bearing.BearingValidator",
"._accesstoken.AccesstokenValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/core/__init__.py | """
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
Please note that this module is private. All functions and objects
are available in the main ``numpy`` namespace - use that instead.
"""
from numpy.version import version as __version__
import os
# disables OpenBLAS affinity setting of the main thread that limits
# python threads or processes to one core
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
try:
from . import multiarray
except ImportError as exc:
import sys
msg = """
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python%d.%d from "%s"
* The NumPy version is: "%s"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: %s
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
__version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
del os.environ[envkey]
del envkey
del env_added
del os
from . import umath
# Check that multiarray,umath are pure python modules wrapping
# _multiarray_umath and not either of the old c-extension modules
if not (hasattr(multiarray, '_multiarray_umath') and
hasattr(umath, '_multiarray_umath')):
import sys
path = sys.modules['numpy'].__path__
msg = ("Something is wrong with the numpy installation. "
"While importing we detected an older version of "
"numpy in {}. One method of fixing this is to repeatedly uninstall "
"numpy until none is found, then reinstall this version.")
raise ImportError(msg.format(path))
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
from .numeric import *
from . import fromnumeric
from .fromnumeric import *
from . import defchararray as char
from . import records as rec
from .records import *
from .memmap import *
from .defchararray import chararray
from . import function_base
from .function_base import *
from . import machar
from .machar import *
from . import getlimits
from .getlimits import *
from . import shape_base
from .shape_base import *
from . import einsumfunc
from .einsumfunc import *
del nt
from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
# add these for module-freeze analysis (like PyInstaller)
from . import _dtype_ctypes
from . import _internal
from . import _dtype
from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
__all__ += fromnumeric.__all__
__all__ += rec.__all__
__all__ += ['chararray']
__all__ += function_base.__all__
__all__ += machar.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
# Make it possible so that ufuncs can be pickled
# Here are the loading and unloading functions
# The name numpy.core._ufunc_reconstruct must be
# available for unpickling to work.
def _ufunc_reconstruct(module, name):
# The `fromlist` kwarg is required to ensure that `mod` points to the
# inner-most module rather than the parent package when module name is
# nested. This makes it possible to pickle non-toplevel ufuncs such as
# scipy.special.expit for instance.
mod = __import__(module, fromlist=[name])
return getattr(mod, name)
def _ufunc_reduce(func):
from pickle import whichmodule
name = func.__name__
return _ufunc_reconstruct, (whichmodule(func, name), name)
import copyreg
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct)
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling)
del copyreg
del _ufunc_reduce
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/random/tests/test_seed_sequence.py | import numpy as np
from numpy.testing import assert_array_equal
from numpy.random import SeedSequence
def test_reference_data():
""" Check that SeedSequence generates data the same as the C++ reference.
https://gist.github.com/imneme/540829265469e673d045
"""
inputs = [
[3735928559, 195939070, 229505742, 305419896],
[3668361503, 4165561550, 1661411377, 3634257570],
[164546577, 4166754639, 1765190214, 1303880213],
[446610472, 3941463886, 522937693, 1882353782],
[1864922766, 1719732118, 3882010307, 1776744564],
[4141682960, 3310988675, 553637289, 902896340],
[1134851934, 2352871630, 3699409824, 2648159817],
[1240956131, 3107113773, 1283198141, 1924506131],
[2669565031, 579818610, 3042504477, 2774880435],
[2766103236, 2883057919, 4029656435, 862374500],
]
outputs = [
[3914649087, 576849849, 3593928901, 2229911004],
[2240804226, 3691353228, 1365957195, 2654016646],
[3562296087, 3191708229, 1147942216, 3726991905],
[1403443605, 3591372999, 1291086759, 441919183],
[1086200464, 2191331643, 560336446, 3658716651],
[3249937430, 2346751812, 847844327, 2996632307],
[2584285912, 4034195531, 3523502488, 169742686],
[959045797, 3875435559, 1886309314, 359682705],
[3978441347, 432478529, 3223635119, 138903045],
[296367413, 4262059219, 13109864, 3283683422],
]
outputs64 = [
[2477551240072187391, 9577394838764454085],
[15854241394484835714, 11398914698975566411],
[13708282465491374871, 16007308345579681096],
[15424829579845884309, 1898028439751125927],
[9411697742461147792, 15714068361935982142],
[10079222287618677782, 12870437757549876199],
[17326737873898640088, 729039288628699544],
[16644868984619524261, 1544825456798124994],
[1857481142255628931, 596584038813451439],
[18305404959516669237, 14103312907920476776],
]
for seed, expected, expected64 in zip(inputs, outputs, outputs64):
expected = np.array(expected, dtype=np.uint32)
ss = SeedSequence(seed)
state = ss.generate_state(len(expected))
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/frame/test_period.py | import numpy as np
from pandas import DataFrame, Index, PeriodIndex, period_range
import pandas._testing as tm
class TestPeriodIndex:
def test_as_frame_columns(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH # 1211
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_frame_setitem(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_frame_index_to_string(self):
index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/histogram2dcontour/_histnorm.py | import _plotly_utils.basevalidators
class HistnormValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="histnorm", parent_name="histogram2dcontour", **kwargs
):
super(HistnormValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop(
"values",
["", "percent", "probability", "density", "probability density"],
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/scattergeo/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._visible import VisibleValidator
from ._unselected import UnselectedValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._texttemplatesrc import TexttemplatesrcValidator
from ._texttemplate import TexttemplateValidator
from ._textsrc import TextsrcValidator
from ._textpositionsrc import TextpositionsrcValidator
from ._textposition import TextpositionValidator
from ._textfont import TextfontValidator
from ._text import TextValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._selected import SelectedValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._mode import ModeValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._marker import MarkerValidator
from ._lonsrc import LonsrcValidator
from ._lon import LonValidator
from ._locationssrc import LocationssrcValidator
from ._locations import LocationsValidator
from ._locationmode import LocationmodeValidator
from ._line import LineValidator
from ._legendgroup import LegendgroupValidator
from ._latsrc import LatsrcValidator
from ._lat import LatValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hovertextsrc import HovertextsrcValidator
from ._hovertext import HovertextValidator
from ._hovertemplatesrc import HovertemplatesrcValidator
from ._hovertemplate import HovertemplateValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._geojson import GeojsonValidator
from ._geo import GeoValidator
from ._fillcolor import FillcolorValidator
from ._fill import FillValidator
from ._featureidkey import FeatureidkeyValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._connectgaps import ConnectgapsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._visible.VisibleValidator",
"._unselected.UnselectedValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._texttemplatesrc.TexttemplatesrcValidator",
"._texttemplate.TexttemplateValidator",
"._textsrc.TextsrcValidator",
"._textpositionsrc.TextpositionsrcValidator",
"._textposition.TextpositionValidator",
"._textfont.TextfontValidator",
"._text.TextValidator",
"._stream.StreamValidator",
"._showlegend.ShowlegendValidator",
"._selectedpoints.SelectedpointsValidator",
"._selected.SelectedValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._mode.ModeValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._marker.MarkerValidator",
"._lonsrc.LonsrcValidator",
"._lon.LonValidator",
"._locationssrc.LocationssrcValidator",
"._locations.LocationsValidator",
"._locationmode.LocationmodeValidator",
"._line.LineValidator",
"._legendgroup.LegendgroupValidator",
"._latsrc.LatsrcValidator",
"._lat.LatValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hovertextsrc.HovertextsrcValidator",
"._hovertext.HovertextValidator",
"._hovertemplatesrc.HovertemplatesrcValidator",
"._hovertemplate.HovertemplateValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._geojson.GeojsonValidator",
"._geo.GeoValidator",
"._fillcolor.FillcolorValidator",
"._fill.FillValidator",
"._featureidkey.FeatureidkeyValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._connectgaps.ConnectgapsValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_diff.py | <filename>env/lib/python3.8/site-packages/pandas/tests/frame/methods/test_diff.py
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, Timestamp, date_range
import pandas._testing as tm
class TestDataFrameDiff:
def test_diff(self, datetime_frame):
the_diff = datetime_frame.diff(1)
tm.assert_series_equal(
the_diff["A"], datetime_frame["A"] - datetime_frame["A"].shift(1)
)
# int dtype
a = 10000000000000000
b = a + 1
s = Series([a, b])
rs = DataFrame({"s": s}).diff()
assert rs.s[1] == 1
# mixed numeric
tf = datetime_frame.astype("float32")
the_diff = tf.diff(1)
tm.assert_series_equal(the_diff["A"], tf["A"] - tf["A"].shift(1))
# GH#10907
df = pd.DataFrame({"y": pd.Series([2]), "z": pd.Series([3])})
df.insert(0, "x", 1)
result = df.diff(axis=1)
expected = pd.DataFrame(
{"x": np.nan, "y": pd.Series(1), "z": pd.Series(1)}
).astype("float64")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_diff_datetime_axis0(self, tz):
# GH#18578
df = DataFrame(
{
0: date_range("2010", freq="D", periods=2, tz=tz),
1: date_range("2010", freq="D", periods=2, tz=tz),
}
)
result = df.diff(axis=0)
expected = DataFrame(
{
0: pd.TimedeltaIndex(["NaT", "1 days"]),
1: pd.TimedeltaIndex(["NaT", "1 days"]),
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_diff_datetime_axis1(self, tz):
# GH#18578
df = DataFrame(
{
0: date_range("2010", freq="D", periods=2, tz=tz),
1: date_range("2010", freq="D", periods=2, tz=tz),
}
)
if tz is None:
result = df.diff(axis=1)
expected = DataFrame(
{
0: pd.TimedeltaIndex(["NaT", "NaT"]),
1: pd.TimedeltaIndex(["0 days", "0 days"]),
}
)
tm.assert_frame_equal(result, expected)
else:
with pytest.raises(NotImplementedError):
result = df.diff(axis=1)
def test_diff_timedelta(self):
# GH#4533
df = DataFrame(
dict(
time=[Timestamp("20130101 9:01"), Timestamp("20130101 9:02")],
value=[1.0, 2.0],
)
)
res = df.diff()
exp = DataFrame(
[[pd.NaT, np.nan], [pd.Timedelta("00:01:00"), 1]], columns=["time", "value"]
)
tm.assert_frame_equal(res, exp)
def test_diff_mixed_dtype(self):
df = DataFrame(np.random.randn(5, 3))
df["A"] = np.array([1, 2, 3, 4, 5], dtype=object)
result = df.diff()
assert result[0].dtype == np.float64
def test_diff_neg_n(self, datetime_frame):
rs = datetime_frame.diff(-1)
xp = datetime_frame - datetime_frame.shift(-1)
tm.assert_frame_equal(rs, xp)
def test_diff_float_n(self, datetime_frame):
rs = datetime_frame.diff(1.0)
xp = datetime_frame.diff(1)
tm.assert_frame_equal(rs, xp)
def test_diff_axis(self):
# GH#9727
df = DataFrame([[1.0, 2.0], [3.0, 4.0]])
tm.assert_frame_equal(
df.diff(axis=1), DataFrame([[np.nan, 1.0], [np.nan, 1.0]])
)
tm.assert_frame_equal(
df.diff(axis=0), DataFrame([[np.nan, np.nan], [2.0, 2.0]])
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/data/_scattermapbox.py | <gh_stars>1000+
import _plotly_utils.basevalidators
class ScattermapboxValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="scattermapbox", parent_name="layout.template.data", **kwargs
):
super(ScattermapboxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Scattermapbox"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
ToughBishop/censor-fix | censorfix/__init__.py | <reponame>ToughBishop/censor-fix
from censorfix.censorfix import *
from censorfix.compile_stan import *
__all__=['censorfix','compile_stan','test_censor']
|
ToughBishop/censor-fix | censorfix/compile_stan.py | import pystan
import hashlib
import os
import joblib
stan_compiled = os.path.dirname(__file__) + '/../stan_compiled/'
stan_code = os.path.dirname(__file__) + '/../stan_code/'
def compile_code(code, code_name):
"""
Compiles stan code in the stan_code directory store in stan_compiled directory
Avoids repeat compilation by checking hashes
Parameters
----------
code_name: string
the name of the stan code to compile
code: string
the code location
"""
hash_name = hashlib.md5(code.encode('utf-8')).hexdigest()[0:10]
if not os.path.exists(stan_compiled + hash_name + '.stanc'): #check if compilation already exists
print('compiled ' + code_name)
c_code = pystan.StanModel(model_code=code)
joblib.dump(c_code, stan_compiled + code_name + '.stan')
joblib.dump(' ', stan_compiled + hash_name + '.stanc')
else:
print('already compiled ' + code_name)
def compile_all():
"""
Runs compile_code on all the stan codes in the stan code directory
"""
print('This may take several minutes')
print('The compilation only happens the first time the program is run')
files = os.listdir(stan_code)
for f in files:
if '.st' in f:
with open(stan_code + f, "r") as myfile:
data = myfile.readlines()
code = ''.join(data)
compile_code(code, f[:-3])
print('all done')
|
ToughBishop/censor-fix | censorfix/test_censor.py | <filename>censorfix/test_censor.py
import numpy as np
import pandas as pd
import joblib
from censorfix import censorfix
def create_data():
"""
Returns two dataframes a copy of each other
"""
c = 0.5
n = 3
cov = c + np.identity(n) * (1 - c)
size = 100
full_data = np.random.multivariate_normal(
[0 for i in range(n)], cov, size=size)
df = pd.DataFrame(full_data)
df2 = df.copy()
return df, df2
def single_dim_test():
"""
Test censorfix in one dimension
with a gaussian distribution of data
"""
df, df2 = create_data()
censor_high = 1.5
censor_low =- 0.5
df.loc[df[0] > censor_high, 0] = censor_high
df.loc[df[0] < censor_low, 0] = censor_low
imp = censorfix.censorImputer(
debug=False, no_columns=2, sample_posterior=True)
df = df.sort_values(by=0, ascending=True)
imp.impute_once(df[0], df[[1, 2]], censor_high, censor_low)
fig, ax = plt.subplots(1, 1)
df2.plot(kind='scatter', x=0, y=2, ax=ax, color='pink',label='imputed')
df.plot(kind='scatter', x=0, y=2, ax=ax,label='true')
plt.title('single imputation of censored values')
plt.show()
return df,df2
def multi_imp_test(plot=True):
"""
Tests the creation of multiple imputations
plots results or returns dataframe and the imputed data
with gaussian distribution
"""
df, df2 = create_data()
# censor the first dataframe
censor_high_1=0.8
censor_high_2=1
censor_low_1=-0.6
censor_low_2=-2
df.loc[df[0] > censor_high_1, 0] = censor_high_1
df.loc[df[0] < censor_low_1, 0] = censor_low_1
df.loc[df[1] > censor_high_2, 1] = censor_high_2
df.loc[df[1] < censor_low_2, 1] = censor_low_2
imp = censorfix.censorImputer(
debug=False, sample_posterior=True,number_imputations=3)
U = [censor_high_1, censor_high_2, 'NA'] # the upper censor values
L = [censor_low_1, censor_low_2, 'NA'] # the lower censor values
data_mi = imp.impute(df, U, L, iter_val=2)
if plot:
fig, ax = plt.subplots(1, 1)
colours=['red','yellow','green']
for i,data in enumerate(data_mi):
data.plot(kind='scatter',x=0,y=1,color=colours[i],label='imputation {}'.format(i),ax=ax)
df2.plot(kind='scatter',x=0,y=1,color='blue',label='original',ax=ax)
plt.title('Multiple imputations comparison')
plt.legend()
plt.show()
return df2, data_mi
def multi_dim_test():
"""
Test censorfix for doing multiple imputation of multivariate
gaussian distribution
"""
df, df2 = create_data()
# censor the first dataframe
censor_high_1=0.8
censor_high_2=0.5
censor_low_1=-0.3
censor_low_2=-0.7
df.loc[df[0] > censor_high_1, 0] = censor_high_1
df.loc[df[0] < censor_low_1, 0] = censor_low_1
df.loc[df[1] > censor_high_2, 1] = censor_high_2
df.loc[df[1] < censor_low_2, 1] = censor_low_2
imp = censorfix.censorImputer(
debug=False, sample_posterior=True)
U = [censor_high_1, censor_high_2, 'NA'] # the upper censor values
L = [censor_low_1, censor_low_2, 'NA'] # the lower censor values
fig, ax = plt.subplots(1, 1)
df.plot(kind='scatter', x=0, y=1, ax=ax, color='yellow', label='censored')
df = imp.impute(df, U, L, iter_val=2)
df2.plot(
kind='scatter',
x=0,
y=1,
ax=ax,
color='pink',
label='imputed_values')
df.plot(kind='scatter', x=0, y=1, ax=ax, label='actual')
plt.legend()
plt.title('Multivariate Censor Imputation')
plt.show()
return df,df2
|
ToughBishop/censor-fix | setup.py | from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
package_name='censor-fix'
setup(
name='censor-fix',
version='0.0.2',
description='A library for multiple imputation of censored data',
long_description=readme(),
long_description_content_type='text/markdown',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
install_requires=[
'numpy',
'pandas',
'joblib',
'tqdm',
'pystan',
'sklearn'
],
python_requires='>=3.5',
packages=['censorfix'],
zip_safe=False,
include_package_data=True
)
|
ToughBishop/censor-fix | censorfix/censorfix.py | <reponame>ToughBishop/censor-fix
import numpy as np
import pandas as pd
import joblib
import os
from tqdm import tqdm
import pkg_resources
import censorfix
stan_dir = os.path.dirname(__file__) +'/../stan_compiled/'
class censorImputer():
def __init__(self,
sample_posterior=True,
column_choice='auto',
initial_point='auto',
distribution='gaussian',
missing_values=np.NAN,
no_columns='all',
max_iter=5,
stan_iterations=4000,
debug=True,
n_jobs=8,
imputation_order='ascending',
number_imputations=1):
"""
Multivariate imputer that multiply imputes censored values.
This is a strategy for dealing with missing and censored values in data sets.
It can handle both lower and upper censoring points.
Parameters
----------
sample_posterior : bool
whether to use the best prediction at each step or a bayesian imputation
distribution : gaussian, t-distribution, skew-normal, exponential
the distribution to use for the experiment
missing_values : str
the placeholder for missing values that will be imputed
max_iter : int
the number of cycles
no_columns : int
how many columns to use for the imputation
stan_iterations : int
number of iterations for Stan to run
imputation_order : str ascending
the order of imputations
debug: bool
display debug information
number_imputations : int
the number of imputations required
"""
if not os.path.isfile(os.path.dirname(__file__) +'/../stan_compiled/s3.stan'):
print('Compiling stan code')
censorfix.compile_stan.compile_all()
self.sample_posterior = sample_posterior
self.column_choice = column_choice
self.initial_point = initial_point
self.distribution = distribution
self.no_columns = no_columns
self.max_iter = max_iter
self.stan_iterations = stan_iterations
self.data = None
self.debug = debug
self.n_jobs = n_jobs
self.number_imputations=number_imputations
if distribution == 'exponential':
self.stan_model =joblib.load(stan_dir + 's2.stan')
elif distribution == 'gaussian':
self.stan_model = joblib.load(stan_dir + 's3.stan')
elif distribution == 'skew-normal':
self.stan_model = joblib.load(stan_dir + 's4.stan')
elif distribution == 't-distribution':
self.stan_model = joblib.load(stan_dir + 's5.stan')
else:
print('Illegal distribution specified')
if not sample_posterior and number_imputations!=1:
print('error posterior sampling needs to be enabled if doing multiple imputation ')
return
def impute_once(self, y, X, U, L):
"""
impute one column of censored values using Stan with chosen options
Parameters
----------
y : array like
censored values
X : array like
independent values
U : double
the upper censored values
L : double
the lower censored values
"""
for i in range(len(y) - 1): # check if y is sorted
if y.iloc[i] > y.iloc[i + 1]:
print('values need to be sorted')
return y
K = X.shape[1]
N_cens_right = sum(y >= U) if U != 'NA' else 0
if L != 'NA':
N_cens_left = sum(y <= L)
else:
N_cens_left = 0
L = -np.inf
if N_cens_right == 0 and N_cens_left == 0:
return y # nothing to impute
N_obs = X.shape[0] - N_cens_right - N_cens_left
#Feed the data into stan using a dictionary
data = {'N_obs': N_obs,
'N_cens_left': N_cens_left,
'N_cens_right': N_cens_right,
'x_obs': X.values,
'y_obs': y[N_cens_left:N_obs + N_cens_left].values,
'U': U,
'L': L,
'K': K}
res = self.stan_model.sampling(
data=data,
iter=self.stan_iterations,
n_jobs=self.n_jobs)
if self.debug:
print(res.stansummary())
if self.sample_posterior:
try:
y[N_cens_left + N_obs:] = res.extract()['y_cens_right'][-1]
except KeyError:
pass
try:
y[:N_cens_left] = res.extract()['y_cens_left'][-1]
except KeyError:
pass
else:
try:
y[N_cens_left + N_obs:] = res.extract()['y_cens_right'].mean(axis=0)
except KeyError:
pass
try:
y[:N_cens_left] = res.extract()['y_cens_left'].mean(axis=0)
except KeyError:
pass
return y
def impute(self, data, right_cen=None, left_cen=None, iter_val=1):
"""
impute multiple columns in an iterative style
returns the data in a sorted form
if multiple imputations are requested data is returned
Parameters
----------
data : pandas dataframe
the data as a pandas dataframe
right_cen : list of doubles
the right censoring points of the data NA if no censoring
left_cen : list of doubles
the left censoring points of the data NA if no censoring
iter_val : int
the number of imputation rounds to perform
Returns
-------
array
Dataset with imputed values
"""
no_features = data.shape[1]
if not right_cen or not left_cen:
print('no censoring values provided')
return
if not right_cen:
right_cen=['NA']*no_features
if not left_cen:
left_cen=['NA']*no_features
if not isinstance(data, pd.DataFrame):
print("data needs to be in a pandas dataframe")
def select_columns(i): # selects which columns to use
if self.no_columns == 'all':
return list(range(i)) + list(range(i + 1, no_features))
return 'error not implemented yet' #TODO
#single imputations
if self.number_imputations==1:
for _ in tqdm(range(iter_val)):
for i in range(no_features):
data = data.sort_values(by=data.columns[i], ascending=True)
data.iloc[:, i] = self.impute_once(data.iloc[:, i],
data.iloc[:, select_columns(i)],
right_cen[i], left_cen[i])
return data
#multiple imputations
else:
ret=[]
for _ in tqdm(range(iter_val-1)):
for i in range(no_features):
data = data.sort_values(by=data.columns[i], ascending=True)
data.iloc[:, i] = self.impute_once(data.iloc[:, i],
data.iloc[:, select_columns(i)],
right_cen[i], left_cen[i])
i=0
for j in range(self.number_imputations):
ret.append(data.sort_values(by=data.columns[i], ascending=True).copy())
for data in ret:
data = self.impute_once(data.iloc[:, i],
data.iloc[:, select_columns(i)],
right_cen[i], left_cen[i])
for i in range(1,no_features):
for data in ret:
data = data.sort_values(by=data.columns[i], ascending=True)
data.iloc[:, i] = self.impute_once(data.iloc[:, i],
data.iloc[:, select_columns(i)],
right_cen[i], left_cen[i])
return ret
|
streitho/spinmob | _dialogs.py | import wx as _wx
try: _prefs
except: _prefs = {}
#
# Dialogs
#
def Save(filters="*.*", text='save THIS!', default_directory='default_directory'):
global _prefs
# if this type of pref doesn't exist, we need to make a new one
if _prefs.has_key(default_directory): default = _prefs[default_directory]
else: default = ""
# define the dialog object. Doesn't opent he window
dialog = _wx.FileDialog(None,
message = text,
defaultDir = default,
wildcard = filters,
style = _wx.SAVE|_wx.OVERWRITE_PROMPT)
# This is the command that pops up the dialog for the user
if dialog.ShowModal() == _wx.ID_OK:
# update the default path so you don't have to keep navigating
_prefs[default_directory] = dialog.GetDirectory()
# not sure if you need to, but destroy the object
dialog.Destroy()
return(dialog.GetPath())
else: return(None)
def SingleFile(filters="*.*", text='select a file, fungus pants!', default_directory='default_directory'):
global _prefs
# if this type of pref doesn't exist, we need to make a new one
if _prefs.has_key(default_directory): default = _prefs[default_directory]
else: default = ""
# define the dialog object. Doesn't opent he window
dialog = _wx.FileDialog(None,
message = text,
defaultDir = default,
wildcard = filters,
style = _wx.OPEN)
# This is the command that pops up the dialog for the user
if dialog.ShowModal() == _wx.ID_OK:
# get the paths for returning
path = dialog.GetPath()
# update the default path so you don't have to keep navigating
_prefs[default_directory] = dialog.GetDirectory()
# not sure if you need to, but destroy the object
dialog.Destroy()
return(path)
else: return(None)
def Directory(text='select a directory, hairhead!', default_directory='default_directory'):
global _prefs
# if this type of pref doesn't exist, we need to make a new one
if _prefs.has_key(default_directory): default = _prefs[default_directory]
else: default = ""
# define the dialog object. Doesn't opent he window
dialog = _wx.DirDialog(None,
message = text,
defaultPath = default,
style = _wx.DD_DEFAULT_STYLE)
# This is the command that pops up the dialog for the user
if not dialog.ShowModal() == _wx.ID_OK: return None
# update the default path so you don't have to keep navigating
_prefs[default_directory] = dialog.GetPath()
# not sure if you need to, but destroy the object
dialog.Destroy()
return(dialog.GetPath())
def MultipleFiles(filters="*.*", text='select some files, facehead!', default_directory='default_directory'):
global _prefs
# if this type of pref doesn't exist, we need to make a new one
if _prefs.has_key(default_directory): default = _prefs[default_directory]
else: default = ""
# define the dialog object. Doesn't opent he window
dialog = _wx.FileDialog(None,
message = text,
defaultDir = default,
wildcard = filters,
style = _wx.OPEN | _wx.MULTIPLE)
# This is the command that pops up the dialog for the user
if not dialog.ShowModal() == _wx.ID_OK: return None
# update the default path so you don't have to keep navigating
_prefs[default_directory] = dialog.GetDirectory()
# not sure if you need to, but destroy the object
dialog.Destroy()
return(dialog.GetPaths())
|
streitho/spinmob | _functions.py | #############################################################
# various functions that I like to use
import numpy as _n
import pylab as _pylab
import cPickle as _cPickle
import os as _os
import thread as _thread
import shutil as _shutil
import wx as _wx
import time as _time
from scipy.integrate import quad
import _dialogs ;reload(_dialogs)
import _pylab_tweaks ;reload(_pylab_tweaks)
_pt = _pylab_tweaks
# Functions from other libraries
average = _n.average
try: _prefs
except: _prefs = None
def _print_figures(figures, arguments='', file_format='pdf', target_width=8.5, target_height=11.0, target_pad=0.5):
"""
figure printing loop designed to be launched in a separate thread.
"""
for fig in figures:
# output the figure to postscript
path = _os.path.join(_prefs.temp_dir,"graph."+file_format)
# get the dimensions of the figure in inches
w=fig.get_figwidth()
h=fig.get_figheight()
# we're printing to 8.5 x 11, so aim for 7.5 x 10
target_height = target_height-2*target_pad
target_width = target_width -2*target_pad
# depending on the aspect we scale by the vertical or horizontal value
if 1.0*h/w > target_height/target_width:
# scale down according to the vertical dimension
new_h = target_height
new_w = w*target_height/h
else:
# scale down according to the hozo dimension
new_w = target_width
new_h = h*target_width/w
fig.set_figwidth(new_w)
fig.set_figheight(new_h)
# save it
fig.savefig(path, bbox_inches=_pylab.matplotlib.transforms.Bbox(
[[-target_pad, new_h-target_height-target_pad],
[target_width-target_pad, target_height-target_pad]]))
# set it back
fig.set_figheight(h)
fig.set_figwidth(w)
if not arguments == '':
c = _prefs['print_command'] + ' ' + arguments + ' "' + path + '"'
else:
c = _prefs['print_command'] + ' "' + path + '"'
print c
_os.system(c)
def append_to_file(path, string):
file = open(path, 'a')
file.write(string)
file.close()
def array_shift(a, n, fill="average"):
"""
This will return an array with all the elements shifted forward in index by n.
a is the array
n is the amount by which to shift (can be positive or negative)
fill="average" fill the new empty elements with the average of the array
fill="wrap" fill the new empty elements with the lopped-off elements
fill=37.2 fill the new empty elements with the value 37.2
"""
new_a = _n.array(a)
if n==0: return new_a
fill_array = _n.array([])
fill_array.resize(_n.abs(n))
# fill up the fill array before we do the shift
if fill is "average": fill_array = 0.0*fill_array + _n.average(a)
elif fill is "wrap" and n >= 0:
for i in range(0,n): fill_array[i] = a[i-n]
elif fill is "wrap" and n < 0:
for i in range(0,-n): fill_array[i] = a[i]
else: fill_array = 0.0*fill_array + fill
# shift and fill
if n > 0:
for i in range(n, len(a)): new_a[i] = a[i-n]
for i in range(0, n): new_a[i] = fill_array[i]
else:
for i in range(0, len(a)+n): new_a[i] = a[i-n]
for i in range(0, -n): new_a[-i-1] = fill_array[-i-1]
return new_a
def assemble_covariance(error, correlation):
"""
This takes an error vector and a correlation matrix and assembles the covariance
"""
covariance = []
for n in range(0, len(error)):
covariance.append([])
for m in range(0, len(error)):
covariance[n].append(correlation[n][m]*error[n]*error[m])
return _n.array(covariance)
def avg(array):
return(float(sum(array))/float(len(array)))
def chi_squared(p, f, xdata, ydata):
return(sum( (ydata - f(p,xdata))**2 ))
def coarsen_array(array, level=1, method='average'):
"""
Returns a shorter array of binned data (every level+1 data points).
method can be 'average', 'max', 'min', or 'all'
'all' returns (average, max, min)
returns a new array.
"""
if level is 0 or array==None: return array
# we do all of them for speed reasons (no string comparison at each step)
average = _n.array(array[0::level+1])
maximum = _n.array(array[0::level+1])
minimum = _n.array(array[0::level+1])
temp = _n.array([0.0]); temp.resize(level+1)
# loop over 0, 2, 4, ...
for n in range(0, len(array), level+1):
# loop over this bin
for m in range(n, n+level+1):
# make sure we're not past the end of the array
if m < len(array): temp[m-n] = array[m]
# otherwise give it a useful value (the average of the others)
else: temp[m-n] = _n.average(temp[0:m-n])
# append the average to the new array
average[n/(level+1)] = _n.average(temp)
maximum[n/(level+1)] = _n.max(temp)
minimum[n/(level+1)] = _n.min(temp)
if method=="average": return average
if method=="min" : return minimum
if method=="max" : return maximum
else : return average, maximum, minimum
def coarsen_data(xdata, ydata, yerror=None, level=1):
"""
This does averaging of the data, returning coarsened (numpy) [xdata, ydata, yerror]
Errors are averaged in quadrature.
"""
new_xdata = []
new_ydata = []
new_error = []
# if level = 1, loop over 0, 2, 4, ...
for n in range(0, len(xdata), level+1):
count = 0.0
sumx = 0.0
sumy = 0.0
sume2 = 0.0 # sum squared
# if n==2, loop 2, 3
for m in range(n, n+level+1):
if m < len(xdata):
sumx += xdata[m]
sumy += ydata[m]
try:
sume2 += yerror[m]**2
except:
sume2 = 1.0
count += 1.0
new_xdata.append(sumx/count)
new_ydata.append(sumy/count)
new_error.append(sume2**0.5/count)
xdata = _n.array(new_xdata)
ydata = _n.array(new_ydata)
if not yerror==None: yerror = _n.array(new_error)
return [xdata,ydata,yerror]
def coarsen_matrix(Z, xlevel=0, ylevel=0, method='average'):
"""
This returns a coarsened numpy matrix.
method can be 'average', 'max', or 'min'
"""
# coarsen x
if not ylevel:
Z_coarsened = Z
else:
temp = []
for z in Z: temp.append(coarsen_array(z, ylevel, method))
Z_coarsened = _n.array(temp)
# coarsen y
if xlevel:
Z_coarsened = Z_coarsened.transpose()
temp = []
for z in Z_coarsened: temp.append(coarsen_array(z, xlevel, method))
Z_coarsened = _n.array(temp).transpose()
return Z_coarsened
# first coarsen the columns (if necessary)
if ylevel:
Z_ycoarsened = []
for c in Z: Z_ycoarsened.append(coarsen_array(c, ylevel, method))
Z_ycoarsened = _n.array(Z_ycoarsened)
# now coarsen the rows
if xlevel: return coarsen_array(Z_ycoarsened, xlevel, method)
else: return _n.array(Z_ycoarsened)
def combine_dictionaries(a, b):
"""
returns the combined dictionary. a's values preferentially chosen
"""
c = {}
for key in b.keys(): c[key]=b[key]
for key in a.keys(): c[key]=a[key]
return c
def data_from_file(path, delimiter=" "):
lines = read_lines(path)
x = []
y = []
for line in lines:
s=line.split(delimiter)
if len(s) > 1:
x.append(float(s[0]))
y.append(float(s[1]))
return([_n.array(x), _n.array(y)])
def data_to_file(path, xarray, yarray, delimiter=" ", mode="w"):
file = open(path, mode)
for n in range(0, len(xarray)):
file.write(str(xarray[n]) + delimiter + str(yarray[n]) + '\n')
file.close()
def decompose_covariance(c):
"""
This decomposes a covariance matrix into an error vector and a correlation matrix
"""
# make it a kickass copy of the original
c = _n.array(c)
# first get the error vector
e = []
for n in range(0, len(c[0])): e.append(_n.sqrt(c[n][n]))
# now cycle through the matrix, dividing by e[1]*e[2]
for n in range(0, len(c[0])):
for m in range(0, len(c[0])):
c[n][m] = c[n][m] / (e[n]*e[m])
return [_n.array(e), _n.array(c)]
def derivative(xdata, ydata):
"""
performs d(ydata)/d(xdata) with nearest-neighbor slopes
must be well-ordered, returns new arrays [xdata, dydx_data]
neighbors:
"""
D_ydata = []
D_xdata = []
for n in range(1, len(xdata)-1):
D_xdata.append(xdata[n])
D_ydata.append((ydata[n+1]-ydata[n-1])/(xdata[n+1]-xdata[n-1]))
return [D_xdata, D_ydata]
def derivative_fit(xdata, ydata, neighbors=1):
"""
loops over the data points, performing a least-squares linear fit of the
nearest neighbors at each point. Returns an array of x-values and slopes.
xdata should probably be well-ordered.
neighbors How many data point on the left and right to include.
"""
x = []
dydx = []
nmax = len(xdata)-1
for n in range(nmax+1):
# get the indices of the data to fit
i1 = max(0, n-neighbors)
i2 = min(nmax, n+neighbors)
# get the sub data to fit
xmini = _n.array(xdata[i1:i2+1])
ymini = _n.array(ydata[i1:i2+1])
slope, intercept = fit_linear(xmini, ymini)
# make x the average of the xmini
x.append(float(sum(xmini))/len(xmini))
dydx.append(slope)
return _n.array(x), _n.array(dydx)
def difference(ydata1, ydata2):
"""
Returns the number you should add to ydata1 to make it line up with ydata2
"""
y1 = _n.array(ydata1)
y2 = _n.array(ydata2)
return(sum(y2-y1)/len(ydata1))
def distort_matrix_X(Z, X, f, new_xmin, new_xmax, subsample=3):
"""
Applies a distortion (remapping) to the matrix Z (and x-values X) using function f.
returns new_Z, new_X
f is an INVERSE function old_x(new_x)
Z is a matrix. X is an array where X[n] is the x-value associated with the array Z[n].
new_xmin, new_xmax is the possible range of the distorted x-variable for generating Z
points is how many elements the stretched Z should have. "auto" means use the same number of bins
"""
Z = _n.array(Z)
X = _n.array(X)
points = len(Z)*subsample
# define a function for searching
def zero_me(new_x): return f(new_x)-target_old_x
# do a simple search to find the new_x that gives old_x = min(X)
target_old_x = min(X)
new_xmin = find_zero_bisect(zero_me, new_xmin, new_xmax, _n.abs(new_xmax-new_xmin)*0.0001)
target_old_x = max(X)
new_xmax = find_zero_bisect(zero_me, new_xmin, new_xmax, _n.abs(new_xmax-new_xmin)*0.0001)
# now loop over all the new x values
new_X = []
new_Z = []
bin_width = float(new_xmax-new_xmin)/(points)
for new_x in frange(new_xmin, new_xmax, bin_width):
# make sure we're in the range of X
if f(new_x) <= max(X) and f(new_x) >= min(X):
# add this guy to the array
new_X.append(new_x)
# get the interpolated column
new_Z.append( interpolate(X,Z,f(new_x)) )
return _n.array(new_Z), _n.array(new_X)
def distort_matrix_Y(Z, Y, f, new_ymin, new_ymax, subsample=3):
"""
Applies a distortion (remapping) to the matrix Z (and y-values Y) using function f.
returns new_Z, new_Y
f is a function old_y(new_y)
Z is a matrix. Y is an array where Y[n] is the y-value associated with the array Z[:,n].
new_ymin, new_ymax is the range of the distorted x-variable for generating Z
points is how many elements the stretched Z should have. "auto" means use the same number of bins
"""
# just use the same methodology as before by transposing, distorting X, then
# transposing back
new_Z, new_Y = distort_matrix_X(Z.transpose(), Y, f, new_ymin, new_ymax, subsample)
return new_Z.transpose(), new_Y
def dumbguy_minimize(f, xmin, xmax, xstep):
"""
This just steps x and looks for a peak
returns x, f(x)
"""
prev = f(xmin)
this = f(xmin+xstep)
for x in frange(xmin+xstep,xmax,xstep):
next = f(x+xstep)
# see if we're on top
if this < prev and this < next: return x, this
prev = this
this = next
return x, this
def elements_are_numbers(array, start_index=0, end_index=-1):
if len(array) == 0: return 0
output_value=1
if end_index < 0: end_index=len(array)-1
for n in array:
try: float(n)
except:
try:
complex(n)
output_value=2
except:
try:
complex(n.replace('(','').replace(')',''))
output_value=2
except:
return 0
return output_value
def elements_are_strings(array, start_index=0, end_index=-1):
if len(array) == 0: return 0
if end_index < 0: end_index=len(array)-1
for n in range(start_index, end_index+1):
if not type(array[n]) == str: return 0
return 1
def erange(start, end, steps):
"""
Returns a numpy array over the specified range taking geometric steps.
See also numpy.logspace()
"""
if start == 0:
print "Nothing you multiply zero by gives you anything but zero. Try picking something small."
return None
if end == 0:
print "It takes an infinite number of steps to get to zero. Try a small number?"
return None
# figure out our multiplication scale
x = (1.0*end/start)**(1.0/(steps-1))
# now generate the array
ns = _n.array(range(0,steps))
a = start*_n.power(x,ns)
# tidy up the last element (there's often roundoff error)
a[-1] = end
return a
def find_N_peaks(array, N=4, max_iterations=100, rec_max_iterations=3, recursion=1):
"""
This will run the find_peaks algorythm, adjusting the baseline until exactly N peaks are found.
"""
if recursion<0: return None
# get an initial guess as to the baseline
ymin = min(array)
ymax = max(array)
for n in range(max_iterations):
# bisect the range to estimate the baseline
y1 = (ymin+ymax)/2.0
# now see how many peaks this finds. p could have 40 for all we know
p, s, i = find_peaks(array, y1, True)
# now loop over the subarrays and make sure there aren't two peaks in any of them
for n in range(len(i)):
# search the subarray for two peaks, iterating 3 times (75% selectivity)
p2 = find_N_peaks(s[n], 2, rec_max_iterations, rec_max_iterations=rec_max_iterations, recursion=recursion-1)
# if we found a double-peak
if not p2==None:
# push these non-duplicate values into the master array
for x in p2:
# if this point is not already in p, push it on
if not x in p: p.append(x+i[n]) # don't forget the offset, since subarrays start at 0
# if we nailed it, finish up
if len(p) == N: return p
# if we have too many peaks, we need to increase the baseline
if len(p) > N: ymin = y1
# too few? decrease the baseline
else: ymax = y1
return None
def find_peaks(array, baseline=0.1, return_subarrays=False):
"""
This will try to identify the indices of the peaks in array, returning a list of indices in ascending order.
Runs along the data set until it jumps above baseline. Then it considers all the subsequent data above the baseline
as part of the peak, and records the maximum of this data as one peak value.
"""
peaks = []
if return_subarrays:
subarray_values = []
subarray_indices = []
# loop over the data
n = 0
while n < len(array):
# see if we're above baseline, then start the "we're in a peak" loop
if array[n] > baseline:
# start keeping track of the subarray here
if return_subarrays:
subarray_values.append([])
subarray_indices.append(n)
# find the max
ymax=baseline
nmax = n
while n < len(array) and array[n] > baseline:
# add this value to the subarray
if return_subarrays:
subarray_values[-1].append(array[n])
if array[n] > ymax:
ymax = array[n]
nmax = n
n = n+1
# store the max
peaks.append(nmax)
else: n = n+1
if return_subarrays: return peaks, subarray_values, subarray_indices
else: return peaks
def find_two_peaks(data, remove_background=True):
"""
Returns two indicies for the two maxima
"""
y = _n.array( data )
x = _n.array( range(0,len(y)) )
# if we're supposed to, remove the linear background
if remove_background:
[slope, offset] = fit_linear(x,y)
y = y - slope*x
y = y - min(y)
# find the global maximum
max1 = max(y)
n1 = index(max1, y)
# now starting at n1, work yourway left and right until you find
# the left and right until the data drops below a 1/2 the max.
# the first side to do this gives us the 1/2 width.
np = n1+1
nm = n1-1
yp = max1
ym = max1
width = 0
while 0 < np < len(y) and 0 < nm < len(y):
yp = y[np]
ym = y[nm]
if yp <= 0.5*max1 or ym <= 0.5*max1:
width = np - n1
break
np += 1
nm -= 1
# if we didn't find it, we pooped out
if width == 0:
return [n1,-1]
# this means we have a valid 1/2 width. Find the other max in the
# remaining data
n2 = nm
while 1 < np < len(y)-1 and 1 < nm < len(y)-1:
if y[np] > y[n2]:
n2 = np
if y[nm] > y[n2]:
n2 = nm
np += 1
nm -= 1
return([n1,n2])
def find_zero_bisect(f, xmin, xmax, xprecision):
"""
This will bisect the range and zero in on zero.
"""
if f(xmax)*f(xmin) > 0:
print "find_zero_bisect(): no zero on the range",xmin,"to",xmax
return None
temp = min(xmin,xmax)
xmax = max(xmin,xmax)
xmin = temp
xmid = (xmin+xmax)*0.5
while xmax-xmin > xprecision:
y = f(xmid)
# pick the direction with one guy above and one guy below zero
if y > 0:
# move left or right?
if f(xmin) < 0: xmax=xmid
else: xmin=xmid
# f(xmid) is below zero
elif y < 0:
# move left or right?
if f(xmin) > 0: xmax=xmid
else: xmin=xmid
# yeah, right
else: return xmid
# bisect again
xmid = (xmin+xmax)*0.5
return xmid
def fit_linear(xdata, ydata):
"""
Returns slope and intercept of line of best fit, excluding data
outside the range defined by xrange
"""
x = xdata
y = ydata
ax = avg(x)
ay = avg(y)
axx = avg(x*x)
ayy = avg(y*y)
ayx = avg(y*x)
slope = (ayx - ay*ax) / (axx - ax*ax)
intercept = ay - slope*ax
return slope, intercept
def frange(start, end, inc=1.0):
"""
A range function, that accepts float increments and reversed direction.
See also numpy.linspace()
"""
start = 1.0*start
end = 1.0*end
inc = 1.0*inc
# if we got a dumb increment
if not inc: return _n.array([start,end])
# if the increment is going the wrong direction
if 1.0*(end-start)/inc < 0.0:
inc = -inc
# get the integer steps
ns = _n.array(range(0, int(1.0*(end-start)/inc)+1))
return start + ns*inc
def imax(array):
"""
Returns the index of the maximum of array.
"""
return index(max(array), array)
def imin(array):
"""
Returns the index of the minimum of array.
"""
return index(min(array), array)
def index(value, array):
for n in range(0,len(array)):
if value == array[n]:
return(n)
return(-1)
def index_nearest(value, array):
"""
expects a _n.array
returns the global minimum of (value-array)^2
"""
a = (array-value)**2
return index(a.min(), a)
def index_next_crossing(value, array, starting_index=0, direction=1):
"""
starts at starting_index, and walks through the array until
it finds a crossing point with value
set direction=-1 for down crossing
"""
for n in range(starting_index, len(array)-1):
if (value-array[n] )*direction >= 0 \
and (value-array[n+1])*direction < 0: return n
# no crossing found
return -1
def insert_ordered(value, array):
"""
This will insert the value into the array, keeping it sorted, and returning the
index where it was inserted
"""
index = 0
# search for the last array item that value is larger than
for n in range(0,len(array)):
if value >= array[n]: index = n+1
array.insert(index, value)
return index
def integrate(f, x1, x2):
"""
f(x) = ...
integrated from x1 to x2
"""
return quad(f, x1, x2)[0]
def integrate2d(f, x1, x2, y1, y2):
"""
f(x,y) = ...
integrated from x1 to x2, y1 to y2
"""
def fx(y):
def g(x): return f(x,y)
return integrate(g, x1, x2)
return quad(fx, y1, y2)[0]
def integrate3d(f, x1, x2, y1, y2, z1, z2):
"""
f(x,y,z) = ...
integrated from x1 to x2, y1 to y2, z1 to z2
"""
def fxy(z):
def g(x,y): return f(x,y,z)
return(integrate2d(g, x1, x2, y1, y2))
return quad(fxy, z1, z2)[0]
def integrate_data(xdata, ydata, xmin=None, xmax=None, autozero=0):
"""
Numerically integrates up the ydata using the trapezoid approximation.
estimate the bin width (scaled by the specified amount).
Returns (xdata, integrated ydata).
autozero is the number of data points to use as an estimate of the background
(then subtracted before integrating).
"""
# sort the arrays and make sure they're numpy arrays
[xdata, ydata] = sort_matrix([xdata,ydata],0)
xdata = _n.array(xdata)
ydata = _n.array(ydata)
if xmin==None: xmin = min(xdata)
if xmax==None: xmax = max(xdata)
# find the index range
imin = xdata.searchsorted(xmin)
imax = xdata.searchsorted(xmax)
xint = [xdata[imin]]
yint = [0]
# get the autozero
if autozero >= 1:
zero = _n.average(ydata[imin:imin+int(autozero)])
ydata = ydata-zero
for n in range(imin+1,imax):
if len(yint):
xint.append(xdata[n])
yint.append(yint[-1]+0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1]))
else:
xint.append(xdata[n])
yint.append(0.5*(xdata[n]-xdata[n-1])*(ydata[n]+ydata[n-1]))
return _n.array(xint), _n.array(yint)
def interpolate(xarray, yarray, x, rigid_limits=True):
"""
returns the y value of the linear interpolated function
y(x). Assumes increasing xarray!
rigid_limits=False means when x is outside xarray's range,
use the endpoint as the y-value.
"""
if not len(xarray) == len(yarray):
print "lengths don't match.", len(xarray), len(yarray)
return None
if x < xarray[0] or x > xarray[-1]:
if rigid_limits:
print "x=" + str(x) + " is not in " + str(min(xarray)) + " to " + str(max(xarray))
return None
else:
if x < xarray[0]: return yarray[0]
else: return yarray[-1]
# find the index of the first value in xarray higher than x
for n2 in range(1, len(xarray)):
if x >= min(xarray[n2], xarray[n2-1]) and x <= max(xarray[n2], xarray[n2-1]):
break
if n2 == len(xarray):
print "couldn't find x anywhere."
return None
n1 = n2-1
# now we have the indices surrounding the x value
# interpolate!
return yarray[n1] + (x-xarray[n1])*(yarray[n2]-yarray[n1])/(xarray[n2]-xarray[n1])
def invert_increasing_function(f, f0, xmin, xmax, tolerance, max_iterations=100):
"""
This will try try to qickly find a point on the f(x) curve between xmin and xmax that is
equal to f0 within tolerance.
"""
for n in range(max_iterations):
# start at the middle
x = 0.5*(xmin+xmax)
df = f(x)-f0
if _n.fabs(df) < tolerance: return x
# if we're high, set xmin to x etc...
if df > 0: xmin=x
else: xmax=x
print "Couldn't find value!"
return 0.5*(xmin+xmax)
def is_a_number(s):
try: float(s); return True
except:
try: complex(s); return True
except: return False
def is_close(x, array, fraction=0.0001):
"""
compares x to all of the values in array. If it's fraction close to
any, returns true
"""
result = False
for n in range(0,len(array)):
if array[n] == 0:
if x == 0:
result = True
elif abs((x-array[n])/array[n]) < fraction:
result = True
return(result)
def is_iterable(a):
"""
Test if something is iterable.
"""
return hasattr(a, '__iter__')
def join(array_of_strings, delimiter=' '):
if array_of_strings == []: return ""
if delimiter==None: delimiter=' '
output = str(array_of_strings[0])
for n in range(1, len(array_of_strings)):
output += delimiter + str(array_of_strings[n])
return(output)
def load_object(path="ask", text="Load a pickled object."):
if path=="ask": path = _dialogs.SingleFile("*.pickle", text=text)
if path == "": return None
f = open(path, "r")
object = _cPickle.load(f)
f.close()
object._path = path
return object
def printer(figure='gcf', arguments='', threaded=False, file_format='pdf'):
"""
Quick function that saves the specified figure as a postscript and then
calls the command defined by spinmob.prefs['print_command'] with this
postscript file as the argument.
figure='gcf' can be 'all', a number, or a list of numbers
"""
global _prefs
if not _prefs.has_key('print_command'):
print "No print command setup. Set the user variable prefs['print_command']."
return
if figure=='gcf': figure=[_pylab.gcf().number]
elif figure=='all': figure=_pylab.get_fignums()
if not getattr(figure,'__iter__',False): figure = [figure]
print "figure numbers in queue:", figure
figures=[]
for n in figure: figures.append(_pylab.figure(n))
# now run the ps printing command
if threaded:
# store the canvas type of the last figure
canvas_type = type(figures[-1].canvas)
# launch the aforementioned function as a separate thread
_thread.start_new_thread(_print_figures, (figures,arguments,file_format,))
# wait until the thread is running
_time.sleep(0.25)
# wait until the canvas type has returned to normal
t0 = _time.time()
while not canvas_type == type(figures[-1].canvas) and _time.time()-t0 < 5.0:
_time.sleep(0.1)
if _time.time()-t0 >= 5.0:
print "WARNING: Timed out waiting for canvas to return to original state!"
# bring back the figure and command line
_pylab.draw()
_pylab_tweaks.get_pyshell()
else:
_print_figures(figures, arguments, file_format)
_pylab.draw()
def psd(t, y, pow2=False, window=None):
"""
Single-sided power spectral density, assuming real valued inputs.
This goes through the numpy fourier transform process, assembling and returning
(frequencies, psd) given time and signal data y. Use psdfreq() to get the frequencies.
powers_of_2 Set this to true if you only want to keep the first 2^n data
points (speeds up the FFT substantially)
window can be set to any of the windowing functions in numpy,
e.g. window="hanning"
"""
# make sure they're numpy arrays
y = _n.array(y)
# if we're doing the power of 2, do it
if pow2:
keep = 2**int(_n.log2(len(y)))
# now resize the data
y.resize(keep)
t.resize(keep)
# try to get the windowing function
w = None
if window:
try:
w = eval("_n."+window, globals())
except:
print "ERROR: Bad window!"
return
# apply the window
if w:
a = w(len(y))
y = len(y) * a * y / sum(a)
# do the actual fft, and normalize the power
fft = _n.fft.fft(y)
P = _n.real(fft*fft.conj())
P = P / len(y)**2
Fpos = psdfreq(t, pow2=False)
Ppos = P[0:len(P)/2] + P[0:-len(P)/2]
Ppos[0] = Ppos[0]/2.0
# get the normalized power in y^2/Hz
Ppos = Ppos/(Fpos[1]-Fpos[0])
return Fpos, Ppos
def psdfreq(t, pow2=False):
"""
Given time array t, returns the positive frequencies of the FFT, including zero.
"""
# if we're doing the power of 2, do it
if pow2:
keep = 2**int(_n.log2(len(t)))
t.resize(keep)
# get the frequency array
F = _n.fft.fftfreq(len(t), t[1]-t[0])
# now add the positive and negative frequency components
return F[0:len(F)/2]
def read_lines(path):
f = open(path, 'rU')
a = f.readlines()
f.close()
return(a)
def replace_in_files(search, replace, depth=0, paths="ask", confirm=True):
"""
Does a line-by-line search and replace, but only up to the "depth" line.
"""
# have the user select some files
if paths=="ask":
paths = _dialogs.MultipleFiles('DIS AND DAT|*.*')
if paths == []: return
for path in paths:
lines = read_lines(path)
if depth: N=min(len(lines),depth)
else: N=len(lines)
for n in range(0,N):
if lines[n].find(search) >= 0:
lines[n] = lines[n].replace(search,replace)
print path.split(_os.path.pathsep)[-1]+ ': "'+lines[n]+'"'
_wx.Yield()
# only write if we're not confirming
if not confirm:
_os.rename(path, path+".backup")
write_to_file(path, join(lines, ''))
if confirm:
if raw_input("yes? ")=="yes":
replace_in_files(search,replace,depth,paths,False)
return
def replace_lines_in_files(search_string, replacement_line):
"""
Finds lines containing the search string and replaces the whole line with
the specified replacement string.
"""
# have the user select some files
paths = _dialogs.MultipleFiles('DIS AND DAT|*.*')
if paths == []: return
for path in paths:
_shutil.copy(path, path+".backup")
lines = read_lines(path)
for n in range(0,len(lines)):
if lines[n].find(search_string) >= 0:
print lines[n]
lines[n] = replacement_line.strip() + "\n"
write_to_file(path, join(lines, ''))
return
def reverse(array):
"""
returns a reversed numpy array
"""
l = list(array)
l.reverse()
return _n.array(l)
def save_object(object, path="ask", text="Save this object where?"):
if path=="ask": path = _dialogs.Save("*.pickle", text=text)
if path == "": return
if len(path.split(".")) <= 1 or not path.split(".")[-1] == "pickle":
path = path + ".pickle"
object._path = path
f = open(path, "w")
_cPickle.dump(object, f)
f.close()
def shift_feature_to_x0(xdata, ydata, x0=0, feature=imax):
"""
Finds a feature in the the ydata and shifts xdata so the feature is centered
at x0. Returns shifted xdata, ydata. Try me with plot.tweaks.manipulate_shown_data()!
xdata,ydata data set
x0=0 where to shift the peak
feature=imax function taking an array/list and returning the index of said feature
"""
i = feature(ydata)
return xdata-xdata[i]+x0, ydata
def smooth(array, index, amount):
"""
Returns the average of the data at the specified index +/- amount
"""
sum = array[index]
count = 1.
for n in range(1, amount+1):
if index+n >= len(array):
break
sum += array[index+n]
count += 1.
for n in range(1, amount+1):
if index-n < 0:
break
sum += array[index-n]
count += 1.
return(sum/count)
def smooth_array(array, amount=1):
"""
Returns the nearest-neighbor (+/- amount) smoothed array.
This does not modify the array or slice off the funny end points.
"""
if amount==0: return array
# we have to store the old values in a temp array to keep the
# smoothing from affecting the smoothing
new_array = _n.array(array)
for n in range(len(array)):
new_array[n] = smooth(array, n, amount)
return new_array
def smooth_data(xdata, ydata, yerror, amount=1):
"""
Returns smoothed [xdata, ydata, yerror]. Does not destroy the input arrays.
"""
new_xdata = smooth_array(_n.array(xdata), amount)
new_ydata = smooth_array(_n.array(ydata), amount)
if yerror == None: new_yerror = None
else: new_yerror = smooth_array(_n.array(yerror), amount)
return [new_xdata, new_ydata, new_yerror]
def sort_matrix(a,n=0):
"""
This will rearrange the array a[n] from lowest to highest, and
rearrange the rest of a[i]'s in the same way. It is dumb and slow.
Returns a numpy array.
"""
a = _n.array(a)
return a[:,a[n,:].argsort()] # this is magic.
def submatrix(matrix,i1,i2,j1,j2):
"""
returns the submatrix defined by the index bounds i1-i2 and j1-j2
Endpoints included!
"""
new = []
for i in range(i1,i2+1):
new.append(matrix[i][j1:j2+1])
return _n.array(new)
def trim_data(xdata, ydata, yerror, xrange):
"""
Removes all the data except that between min(xrange) and max(xrange)
This does not destroy the input arrays.
"""
if xrange == None: return [_n.array(xdata), _n.array(ydata), _n.array(yerror)]
xmax = max(xrange)
xmin = min(xrange)
x = []
y = []
ye= []
for n in range(0, len(xdata)):
if xdata[n] >= xmin and xdata[n] <= xmax:
x.append(xdata[n])
y.append(ydata[n])
if not yerror == None: ye.append(yerror[n])
if yerror == None: ye = None
else: ye = _n.array(ye)
return [_n.array(x), _n.array(y), ye]
def ubersplit(s, delimiters=['\t','\r',' ']):
# run through the string, replacing all the delimiters with the first delimiter
for d in delimiters: s = s.replace(d, delimiters[0])
return s.split(delimiters[0])
def write_to_file(path, string):
file = open(path, 'w')
file.write(string)
file.close()
|
streitho/spinmob | setup.py | <filename>setup.py
from distutils.core import setup
import time
t = time.localtime()
setup(name = 'spinmob',
version = "stable-%(y)d-%(m)02d-%(d)02d" % dict(y=t[0], m=t[1], d=t[2]),
description = 'Spinmob Analysis Kit',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'http://code.google.com/p/spinmob',
license = 'GPLv3',
packages = ['spinmob'],
package_data = {'spinmob':['*.bat', '*.txt']}
) |
streitho/spinmob | _plot_complex.py | import _plotting_mess; reload(_plotting_mess)
data = _plotting_mess.complex_data
databoxes = _plotting_mess.complex_databoxes
files = _plotting_mess.complex_files
function = _plotting_mess.complex_function |
streitho/spinmob | _prefs.py | <reponame>streitho/spinmob
import os as _os
def read_lines(path):
f = open(path, 'rU')
a = f.readlines()
f.close()
return(a)
class Prefs():
prefs_path = ''
home_dir = ''
temp_dir = ''
colormaps_dir = ''
path_delimiter = ''
prefs = {}
def __init__(self):
"""
This class holds all the user-variables, paths etc...
"""
# figure out what path delimiter we need to use
if _os.name == "posix":
self.path_delimiter = "/"
else:
# assume windows
_os.environ['HOME'] = _os.environ['USERPROFILE']
self.path_delimiter = "\\"
# assemble the home and temp directory path for this environment
self.home_dir = _os.environ['HOME'] + self.path_delimiter + '.spinmob'
self.temp_dir = self.home_dir + self.path_delimiter + 'temp'
self.prefs_path = self.home_dir + self.path_delimiter + 'preferences.txt'
self.colormaps_dir = self.home_dir + self.path_delimiter + 'colormaps'
# see if this is the first time running (no home directory)
if not _os.path.exists(self.home_dir):
print "Creating "+self.home_dir
_os.mkdir(self.home_dir)
if not _os.path.exists(self.temp_dir):
print "Creating "+self.temp_dir
_os.mkdir(self.temp_dir)
if not _os.path.exists(self.prefs_path):
print "Creating "+self.prefs_path
open(self.prefs_path, 'w').close()
if not _os.path.exists(self.colormaps_dir):
print "Creating "+self.colormaps_dir
_os.mkdir(self.colormaps_dir)
# now read in the prefs file
lines = read_lines(self.prefs_path)
self.prefs = {}
for n in range(0,len(lines)):
s = lines[n].split('=')
if len(s) > 1:
self.prefs[s[0].strip()] = s[1].strip()
def __call__ (self, key): return self.Get(key)
def __getitem__(self,key): return self.Get(key)
def __setitem__(self,key,value): self.Set(key, value)
def __str__(self):
s = ''
for key in self.prefs.keys():
s = s + key + " = " + self.prefs[key] + '\n'
return s
def keys(self): return self.prefs.keys()
def has_key(self, key): return self.prefs.has_key(key)
def List(self):
"""
Lists the keys and values.
"""
print
for key in self.keys():
print key,'=',self[key]
print
def Get(self, key):
"""
Checks if the key exists and returns it. Returns None if it doesn't
"""
if self.prefs.has_key(key):
return self.prefs[key]
else:
return None
def Set(self, key, value):
"""
Sets the key-value pair and dumps to the preferences file.
"""
if not value == None: self.prefs[key] = value
else: self.prefs.pop(key)
self.Dump()
def Remove(self, key):
"""
Removes a key/value pair
"""
self.Set(key, None)
def Dump(self):
"""
Dumps the current prefs to the preferences.txt file
"""
prefs_file = open(self.prefs_path, 'w')
for n in range(0,len(self.prefs)):
if len(self.prefs.items()[n]) > 1:
prefs_file.write(str(self.prefs.items()[n][0]) + ' = ' +
str(self.prefs.items()[n][1]) + '\n')
prefs_file.close()
|
streitho/spinmob | _pylab_colorslider.py | <reponame>streitho/spinmob<filename>_pylab_colorslider.py
#!/usr/bin/env python
#Boa:App:BoaApp
import wx
import matplotlib as _matplotlib
import pylab as _pylab
import _pylab_colorslider_frame as _pcf; reload(_pcf)
try: _prefs
except: _prefs = None
modules ={u'pylab_colorslider_frame': [1,
'Main frame of Application',
u'pylab_colorslider_frame.py']}
class BoaApp(wx.App):
def OnInit(self):
self.main = _pcf.create(None)
self.main.Show()
self.SetTopWindow(self.main)
return True
def main():
application = BoaApp(0)
application.MainLoop()
if __name__ == '__main__':
main()
#
# This class contains one color point and generates new slider gui's when it's time to modify
#
class ColorPoint:
color = None
color2 = None
position = 0.0
min = 0.0 # in case the user modifies this
max = 1.0 # in case the user modifies this
parent = None
slider = None
def __init__(self, parent, position, red=0, green=0, blue=255, red2=0, green2=0, blue2=255):
# just store the local variables
self.parent = parent
self.color = wx.Colour(red, green, blue)
self.color2 = wx.Colour(red2,green2,blue2)
self.position = position
return
def ShowSlider(self, position=[0,0]):
"""
Creates a color slider GUI object, and pops it up. When the colorslider
moves, this object's color data is updated.
"""
# close/delete any old ones
self.HideSlider()
# find out if this is the "main" slider (that appears in the taskbar)
n = None
for i in range(len(self.parent.colorpoints)):
if self == self.parent.colorpoints[i]: n=i
# modify the style accordingly
style = 0
if not n==len(self.parent.colorpoints)-1:
style = wx.FRAME_NO_TASKBAR|wx.CLIP_CHILDREN|wx.FRAME_FLOAT_ON_PARENT|wx.NO_BORDER
size = wx.Size(351, 38)
parent = self.parent.colorpoints[-1].slider # better make the last one first!
else:
style = wx.CLIP_CHILDREN|wx.CAPTION|wx.MINIMIZE_BOX|wx.CLOSE_BOX|wx.SYSTEM_MENU
size = wx.Size(351, 40+35*(len(self.parent.colorpoints)-1))
parent = wx.GetApp().GetTopWindow()
# convert the coords to a real position
position = wx.Point(position[0], position[1])
# create the GUI object
self.slider = _pcf.ColorSliderFrame(parent, self, style=style, size=size, position=position)
if n in [0, len(self.parent.colorpoints)-1]: self.slider.EnableStuff(False)
self.slider.Show()
def HideSlider(self):
if self.slider:
self.slider.Hide()
self.slider.Destroy()
self.slider = None
#
# This class contains a list of color points and a link to a parent image.
# Its job is to update the parent image colormap
#
class GuiColorMap:
# define the local variables of the class
colorpoints = []
image = None
def __init__(self, image="top", colormap="_last"):
"""
This class contains a list of color points defining a colormap. It is
capable of providing GUI sliders to modify the colors and locations of
the color points in the color map and updating the supplied image on
the fly.
To get the initial color from the supplied image, it assumes that
the red, green, and blue channels have the same set of positions!
To find the image, try gca().images[0]
set colormap=None to try and import the current colormap
"""
if image == "top":
image = _pylab.gca().images[0]
# store the reference to the image
self.image = image
# get the data for easier coding
if colormap == None:
# use the color map from the image if possible
c = image.cmap._segmentdata
cr = c['red']
cg = c['green']
cb = c['blue']
# get the number of steps in this cmap
N = len(cb)
# loop over the number of entries and generate the list
self.colorpoints = []
# try to import the colormap from the image
for n in range(N):
if cr[n][0] == cb[n][0] and cr[n][0] == cg[n][0]:
self.colorpoints.append(ColorPoint(
self, cr[n][0],
cr[n][1]*255, cg[n][1]*255, cb[n][1]*255,
cr[n][2]*255, cg[n][2]*255, cb[n][2]*255))
else:
print "This colormap is too complicated. Switching to default."
colormap = "default"
break;
# if we need to, use the default map
if not colormap == None:
self.LoadColorMap(colormap)
# may as well show these guys to the user too
self.ShowSliders()
def LoadColorMap(self, name="default"):
# open the file "[spinmobpath]/colormaps/whatever.txt"
try:
f = open(_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt", "r")
lines = f.readlines()
f.close()
# now loop over the colors (lines) and generate a list
self.colorpoints = []
for line in lines:
# split the line by white space
s = line.split()
# now create a new color point
if len(s) == 7:
self.colorpoints.append(ColorPoint(self, float(s[0]),
float(s[1]), float(s[2]), float(s[3]),
float(s[4]), float(s[5]), float(s[6])))
# use the hard-coded default
except:
print "Could not load "+_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt"
self.colorpoints = [ColorPoint(self, 0.0, 255, 255, 255, 255, 255, 255),
ColorPoint(self, 0.5, 0, 0, 255, 0, 0, 255),
ColorPoint(self, 1.0, 255, 0, 0, 255, 0, 0)]
# now update
self.UpdateImage()
def SaveColorMap(self, name="_last"):
try:
f = open(_prefs.colormaps_dir + _prefs.path_delimiter + name + ".txt", "w")
# loop over the color points
for c in self.colorpoints:
f.write(str(c.position) + " " +
str(c.color.Red()) + " " + str(c.color.Green()) + " " + str(c.color.Blue()) + " " +
str(c.color2.Red())+ " " + str(c.color2.Green())+ " " + str(c.color2.Blue()) + "\n")
f.close()
except:
print "Couldn't save last colormap!"
def UpdateImage(self):
"""
This takes the current values of the various color points, orders them,
and updates the colormap of the parent image.
"""
# first order the list according to the element positions
new_list = []
while len(self.colorpoints):
# find the minimum position
x0 = 2.0
n0 = 0
for n in range(len(self.colorpoints)):
# if this item is smaller than the previous record, store it
if self.colorpoints[n].position < x0:
x0 = self.colorpoints[n].position
n0 = n
# if it's equal to the previous record, make it a little bigger
# next time around, this can be the new minimum
elif self.colorpoints[n].position == x0:
self.colorpoints[n].position = x0 + 0.0001
# if it's larger than 1, set it to 1 and knock off the best a little
if self.colorpoints[n].position > 1.0:
self.colorpoints[n].position = 1.0
self.colorpoints[n0].position = 1.0-0.0001
# now we have the minimum index
new_list.append(self.colorpoints.pop(n0))
# now set the new list
self.colorpoints = new_list
# now generate the colormap from the ordered list
r = []
g = []
b = []
for point in self.colorpoints:
r.append((point.position, point.color.Red()/255.0, point.color2.Red()/255.0))
g.append((point.position, point.color.Green()/255.0, point.color2.Green()/255.0))
b.append((point.position, point.color.Blue()/255.0, point.color2.Blue()/255.0))
# store the formatted dictionary
c = {'red':r, 'green':g, 'blue':b}
# now set the dang thing
self.image.set_cmap(_matplotlib.colors.LinearSegmentedColormap('custom', c))
_pylab.draw()
self.SaveColorMap()
def ShowSliders(self):
"""
This will show all the sliders, tiling them to the right of the figure
"""
# loop over the points in the list
for n in range(len(self.colorpoints)-1,-1,-1): self.ShowSlider(n, "auto")
def HideSliders(self):
for p in self.colorpoints: p.HideSlider()
def ShowSlider(self, n, position="auto"):
"""
This will show the n'th slider at the specified screen position
"""
try:
if position == "auto":
# get the figure position and size
p = self.image.figure.canvas.Parent.GetPosition()
w = self.image.figure.canvas.Parent.GetSize()[0]
if n==len(self.colorpoints)-1:
position = [p[0]+w, p[1]+40*(len(self.colorpoints)-n-1)]
else:
position = [p[0]+w+3, p[1]+65+35*(len(self.colorpoints)-n-2)]
except:
print "Can't position slider relative to anything but a wxAgg plot."
if not hasattr(position, '__iter__'): position = [0,0]
self.colorpoints[n].ShowSlider(position)
def HideSlider(self, n):
self.colorpoints[n].HideSlider()
|
streitho/spinmob | _fitting_mess.py | <reponame>streitho/spinmob<filename>_fitting_mess.py
import pylab as _pylab
import matplotlib as _mpl
import spinmob as _s
import _functions as _fun ; reload(_fun)
import _pylab_tweaks as _tweaks ; reload(_tweaks)
import _models ; reload(_models)
import _dialogs ; reload(_dialogs)
import _data_types ; reload(_data_types)
from numpy import *
#
# Fit function based on the model class
#
def fit_databoxes(ds, xscript=0, yscript=1, eyscript=None, f='a*sin(x)+b', p='a=1.5, b', bg=None, a=None, command="", settings={}, g={}, **kwargs):
"""
Create a model based on the supplied string and fit the supplied list of databoxes.
f is a string (or list of string) of the curve to fit, or a
function / lis of functions f(x,a,b,..) that you have defined.
p is a comma-delimited string of parameters (with default values if
you're into that)
bg is the background function should you want to use it (leaving it as None
sets it equal to f).
g is a list of additional globals, for example if you have defined your
own functions and you want these to be visible to the function string when evaluated.
This routine just generates a model based on this input. For more information
about what the arguments should be, see spinmob.models.curve().
"""
globals().update(g)
# generate the model
model = _s.models.curve(f=f, p=p, bg=bg, a=a, globs=globals())
result = fit_databoxes_model(ds=ds, model=model, xscript=xscript, yscript=yscript, eyscript=eyscript, command=command, settings=settings, **kwargs)
settings = g = dict()
return result
def fit_files(xscript=0, yscript=1, eyscript=None, f='a*sin(x)+b', p='a=1.5, b', bg=None, a=None, command="", settings={}, g={}, **kwargs):
"""
Load a bunch of data files and fit them. kwargs are sent to "data.load_multiple()" which
are then sent to "data.standard()". Useful ones to keep in mind:
for loading: paths, default_directory
See the above mentioned functions for more information.
f is a string of the curve to fit or a function f(x,a,b,..) that you have defined.
p is a comma-delimited string of parameters (with default values if
you're into that)
bg is the background function should you want to use it (leaving it as None
sets it equal to f).
g is a list of additional globals, for example if you have defined your
own functions and you want these to be visible to the function string when evaluated.
This routine just generates a model based on this input. For more information
about what the arguments should be, see spinmob.models.curve().
"""
globals().update(g)
# generate the model
model = _s.models.curve(f=f, p=p, bg=bg, a=a, globs=globals())
result = fit_files_model(model=model, xscript=xscript, yscript=yscript, eyscript=eyscript, command=command, settings=settings, **kwargs)
settings = g = dict()
return result
def fit_files_model(model, xscript=0, yscript=1, eyscript=None, command="", settings={}, **kwargs):
"""
Load a bunch of data files and fit them using fit_databoxes_model().
kwargs are sent to "data.load_multiple()" which
are then sent to "data.databox()". Useful ones to keep in mind:
for loading: paths, default_directory
for generating data to fit: xscript, yscript, eyscript
See the above mentioned functions for more information.
"""
# Have the user select a bunch of files.
ds = _s.data.load_multiple(**kwargs)
result = fit_databoxes_model(ds=ds, model=model, xscript=xscript, yscript=yscript, eyscript=eyscript, command=command, settings=settings, **kwargs)
settings = dict()
return result
def fit_databoxes_model(ds, model, xscript=0, yscript=1, eyscript=None, command="", settings={}, **kwargs):
"""
Loops over the supplied databoxe(s) and fits them using the supplied model
kwargs are sent to "data.load_multiple()" which
are then sent to "data.standard()". Useful ones to keep in mind:
for loading: paths, default_directory
for generating data to fit: xscript, yscript, eyscript
See the above mentioned functions for more information.
"""
if not ds: return
if not _s.fun.is_iterable: ds = [ds]
results = []
for d in ds:
print '\n\n\nFILE:', ds.index(d)+1, '/', len(ds)
print str(d.path)
model.fit_parameters = None
settings["xscript"] = xscript
settings["yscript"] = yscript
settings["eyscript"] = eyscript
# do the interactive fit.
results.append(model.fit(d, command, settings))
results[-1]['databox'] = d
# make sure we didn't quit.
if results[-1]['command'] == 'q': break
# prepare for the next file.
command=''
if results[-1].has_key('settings'): settings = results[-1]['settings']
# clean up
settings = dict()
return results
def fit_shown_data(f='a*sin(x)+b', p='a=1.5, b', bg=None, a=None, command="", settings={}, axes="gca", **kwargs):
"""
Loops over the shown data, performing a fit in a separate figure.
Loops over the shown data, performing a fit in a separate figure.
***kwargs are sent to fit()
"""
# get the axes
if axes == "gca": axes = _pylab.gca()
xlabel=axes.get_xlabel()
ylabel=axes.get_ylabel()
if xlabel == '' :
xlabel='x'
if ylabel == '' :
ylabel='y'
# get the xlimits
xmin, xmax = axes.get_xlim()
# get the output axes
fn0 = axes.figure.number
# create the data object for fitting
d = _s.data.standard(xlabel,ylabel,None)
# generate the model
model = _s.models.curve(f=f, p=p, bg=bg, a=a, globs=globals())
# loop over the data
lines = axes.get_lines()
results = []
for n in range(len(lines)):
line = lines[n]
if isinstance(line, _mpl.lines.Line2D):
# get the trimmed data from the line
x, y = line.get_data()
x, y, e = _fun.trim_data(x,y,None,[xmin,xmax])
# put together a data object with the right parameters
d.path = "Line: "+line.get_label()
d[xlabel] = x
d[ylabel] = y
settings['xscript'] = xlabel
settings['yscript'] = ylabel
# do the fit
print '\n\n\nLINE:', n+1, '/', len(lines)
model.fit_parameters = None
settings['autopath'] = False
settings['figure'] = axes.figure.number+1
results.append(model.fit(d, command, settings))
results[-1]['databox'] = d
# make sure we didn't quit.
if results[-1]['command'] == 'q': break
# prepare for the next file.
command=''
if results[-1].has_key('settings'): settings = results[-1]['settings']
# clean up
settings = dict()
_pylab.figure(fn0)
return results
|
streitho/spinmob | _data_types.py | import pylab as _pylab
import time
import wx as _wx
import os as _os
from mpl_toolkits.mplot3d import Axes3D
import _functions as _fun ;reload(_fun)
import _pylab_tweaks as _pt ;reload(_pt)
import _dialogs ;reload(_dialogs)
# do this so all the scripts will work with all the numpy functions
from numpy import *
fun = _fun
t0 = 0
def TimerStart():
"""
Starts a timer.
"""
global t0
t0 = time.time()
def Time():
"""
Returns time since last TimerStart()
"""
return time.time() - t0
#
# This is the base class, which currently rocks.
#
class standard:
# this is used by the load_file to rename some of the annoying
# column names that aren't consistent between different types of data files (and older data files)
# or to just rename columns with difficult-to-remember ckeys.
obnoxious_ckeys = {}
#obnoxious_ckeys = {"example_annoying1" : "unified_name1",
# "example_annoying2" : "unified_name2"}
# this is just a data class with some inherited features
# These are the current working data sets used by plotting functions.
# The raw data from the file (after load_file()) are stored in columns and header
ydata = None
xdata = None
eydata = None
X = None
Y = None
Z = None
xscript = None
yscript = None
eyscript = None
directory = "default_directory"
xlabel = "xlabel"
ylabel = "ylabel"
legend_string = "(no legend_string set)"
title = "title"
path = "(no path)"
debug = False # Use this to print debug info in various places
delimiter = None # delimiter of the ascii file. If "None" this will just use any whitespace
file_extension = "*" # when asking the user for a file, use this as the filter
headers = {} # this dictionary will hold the header information
columns = {} # this dictionary will hold the data columns
ckeys = [] # we need a special list of column keys to keep track of their order during data assembly
hkeys = [] # ordered list of header keys
extra_globals = {}
def __setitem__(self, n, x):
"""
set's the n'th column to x (n can be a column name too)
"""
if type(n) == str:
self.insert_column(data_array=x, ckey=str(n), index='end')
elif type(n) in [int, long] and n > len(self.ckeys)-1:
self.insert_column(data_array=x, ckey='_column'+str(len(self.ckeys)), index='end')
else:
self.columns[self.ckeys[n]] = array(x)
def __len__(self):
return len(self.ckeys)
#
# functions that are often overwritten in modified data classes
#
def __init__(self, delimiter=None, file_extension="*", debug=False, **kwargs):
"""
delimiter The delimiter the file uses. None means "white space"
file_extension Default file extension when navigating files
debug Displays some partial debug information while running
"""
# update with the user-supplied/default values with kwargs
plot_kwargs = {}
for key in kwargs:
try: eval(key + "=" + kwargs[key])
except: plot_kwargs[key] = kwargs[key]
# this keeps the dictionaries from getting all jumbled with each other
self.clear_columns()
self.clear_headers()
self.obnoxious_ckeys = {}
self.debug = debug
self.delimiter = delimiter
self.file_extension = file_extension
# create a simple initializer command for the user.
initialize = __init__
#
# really useful functions
#
def load_file(self, path="ask", first_data_line="auto", filters="*.*", text="Select a file, FACEPANTS.", default_directory=None, header_only=False):
"""
This will load a file, storing the header info in self.headers, and the data in
self.columns
If first_data_line="auto", then the first data line is assumed to be the first line
where all the elements are numbers.
If you specify a first_data_line (index, starting at 0), the columns need not be
numbers. Everything above will be considered header information and below will be
data columns.
In both cases, the line used to label the columns will always be the last
header line with the same (or more) number of elements as the first data line.
"""
if default_directory==None: default_directory = self.directory
# this loads the file, getting the header and the column values,
if self.debug: print "resetting all the file-specific stuff, path =", path
self.clear_columns()
self.clear_headers()
self.xdata = None
self.ydata = None
self.eydata = None
if path=="ask":
path = _dialogs.SingleFile(filters=self.file_extension,
default_directory=self.directory,
text=text)
self.path = path
if path==None:
print "Aborted."
return False
# open said file for reading, read in all the lines and close
t0 = time.time()
if self.debug: print time.time()-t0, "seconds: starting read_lines()"
self.lines = _fun.read_lines(path)
if self.debug: print time.time()-t0, "seconds: done."
# break up the path into parts and take the last bit (and take a stab at the legend string)
self.legend_string = path.split(_os.path.sep)[-1]
if self.legend_string[0] == '_': self.legend_string = '|' + self.legend_string
# read in the header information
if self.debug: print time.time()-t0, "seconds: start reading headers"
ckeys_line = -2
for n in range(len(self.lines)):
# split the line by the delimiter
s = self.lines[n].strip().split(self.delimiter)
# remove a trailing whitespace entry.
if len(s) and s[-1].strip() == '': s.pop(-1)
# first check and see if this is a data line (all elements are numbers)
if first_data_line=="auto" and _fun.elements_are_numbers(s):
# we've reached the first data line
first_data_line = n
if self.debug: print "first data line =", n
# quit the header loop
break;
# first thing to try is simply evaluating the remaining string
try:
remainder = list(s)
hkey = remainder.pop(0)
remainder = _fun.join(remainder).strip()
self.insert_header(hkey, eval(remainder))
# if that didn't work, try all the other complicated/flexible stuff
except:
# if this isn't an empty line and has strings for elements, assume it's a column key line for now
# (we keep overwriting this until we get to the first data line)
if len(s) > 0:
# overwrite the ckeys, and note the line number
self.ckeys = list(s) # this makes a new instance of the list so it doesn't lose the first element!
ckeys_line = n
# if it's length 1, it's just some word. Store a dummy string in there.
if len(s) == 1: s.append('')
# Also assume it is a header line. Here should be at least two elements in a header element
if len(s) == 2:
# If there are exactly two elemenents, just store the header constant
try:
self.headers[s[0]] = float(s[1]) # this one is a number
except:
try: self.headers[s[0]] = complex(s[1].replace('(','').replace(')','')) # it's a complex number
except:
self.headers[s[0]] = s[1] # this one is a string
# store the key in a variable like the other cases
l = s[0]
else:
# if there are more than 2 elements, then this is an array or a phrase
# if all the elements after the first are numbers, this is an array row
if _fun.elements_are_numbers(s, 1):
# just add this to the headers as an array
for n in range(1,len(s)): s[n] = float(s[n])
# pop off the first element, this is the string used to access the array
l = s.pop(0)
self.headers[l] = s
# in either case, we now have a header key in the variable l.
# now add it to the ordered list, but only if it doesn't exist
if _fun.index(l, self.hkeys) < 0:
self.hkeys.append(l)
else:
print "Duplicate header:", l
if self.debug: print "header '"+l+"' = "+str(self.headers[l])[0:20]+" ..."
# Make sure first_data_line isn't None (which happens if there's no data)
if first_data_line == "auto":
print "Could not find a line of pure data!"
return
# at this point we've found the first_data_line, and ckeys_line is correct or -2
# count the number of data columns
column_count = len(self.lines[first_data_line].strip().split(self.delimiter))
# check to see if ckeys line is first_data_line-1, and that it is equal in length to the
# number of data columns. If it isn't, it's a false ckeys line
if ckeys_line == first_data_line-1 and len(self.ckeys) >= column_count:
# it is valid.
# if we have too many column keys, mention it
if len(self.ckeys) > column_count:
print "Note: more ckeys than columns (stripping extras)"
# remove this line from the header
try: self.pop_header(self.ckeys[0])
except: print "Couldn't pop column labels from header. Weird."
else:
# it is an invalid ckeys line. Generate our own!
self.ckeys = []
for m in range(0, column_count): self.ckeys.append("column_"+str(m))
# for good measure, make sure to trim down the ckeys array to the size of the data columns
for n in range(column_count, len(self.ckeys)): self.ckeys.pop(-1)
# now we have a valid set of column ckeys one way or another, and we know first_data_line.
if header_only: return
# initialize the columns arrays
# I did benchmarks and there's not much improvement by using numpy-arrays here.
for label in self.ckeys: self.columns[label] = []
# start grabbing the data
if self.debug: print time.time()-t0, "seconds: starting to read data"
TimerStart()
for n in range(first_data_line, len(self.lines)):
# split the line up
s = self.lines[n].strip().split(self.delimiter)
# now start filling the column, ignoring the empty or bad data lines
for m in range(len(s)):
try: self.columns[self.ckeys[m]].append(float(s[m]))
except:
try: self.columns[self.ckeys[m]].append(complex(s[m][1:len(s[m])-1]))
except: pass
if self.debug: print time.time()-t0, "seconds: yeah."
# now loop over the columns and make them all hard-core numpy columns!
TimerStart()
for k in self.ckeys: self.columns[k] = array(self.columns[k])
if self.debug: print time.time()-t0, "seconds: totally."
# now, as an added bonus, rename some of the obnoxious headers
for k in self.obnoxious_ckeys:
if self.columns.has_key(k):
if self.debug: print "renaming column",k,self.obnoxious_ckeys[k]
self.columns[self.obnoxious_ckeys[k]] = self.columns[k]
def save_file(self, path="ask"):
"""
This will save all the header info and columns to an ascii file.
"""
if path=="ask": path = _dialogs.Save(self.file_extension, default_directory=self.directory)
if path in ["", None]:
print "Aborted."
return False
self.path=path
# if the path exists, make a backup
if _os.path.exists(path):
_os.rename(path,path+".backup")
# get the delimiter
if self.delimiter==None: delimiter = "\t"
else: delimiter = self.delimiter
# open the file and write the header
f = open(path, 'w')
for k in self.hkeys:
# if this is a numpy array, turn it into a list
if type(self.headers[k]) == type(array([])):
self.headers[k] = self.headers[k].tolist()
f.write(k + delimiter)
# just write it
f.write(str(self.headers[k]) + "\n")
# now write the ckeys line
f.write("\n")
elements = []
for ckey in self.ckeys: elements.append(str(ckey))
f.write(_fun.join(elements,delimiter)+"\n")
# now loop over the data
for n in range(0, len(self[0])):
# loop over each column
elements = []
for m in range(0, len(self)):
# write the data if there is any, otherwise, placeholder ("x")
if n < len(self[m]):
elements.append(str(self[m][n]))
else:
elements.append('_')
f.write(_fun.join(elements, delimiter)+"\n")
f.close()
def pop_data_point(self, n, ckeys=[]):
"""
This will remove and return the n'th data point (starting at 0)
in the supplied list of columns.
n index of data point to pop
ckeys which columns to do this to, specified by index or key
empty list means "every column"
"""
# if it's empty, it's everything
if ckeys == []: ckeys = self.ckeys
# loop over the columns of interest and pop the data
popped = []
for k in ckeys:
if not k == None:
# first convert to a list
data = list(self.c(k))
# pop the data
popped.append(data.pop(n))
# now set this column again
self.insert_column(data, k)
return popped
def plot_and_pop_data_points(self, xkey=0, ykey=1, ekey=None, ckeys=[], **kwargs):
"""
This will plot the columns specified by the scripts and then wait for clicks
from the user, popping data points nearest the clicks. Right-click quits.
xkey,ykey,ekey column keys to plot
ckeys list of columns to pop, using pop_data_point()
Set ckeys=[] to pop from all columns, and ckey="these" to pop only from the
plotted columns, or a list of ckeys from which to pop.
"""
if ckeys == "these": ckeys = [xkey, ykey, ekey]
# plot the data. This should generate self.xdata and self.ydata
self.plot(xkey, ykey, ekey, **kwargs)
a = _pylab.gca()
# start the loop to remove data points
raw_input("Zoom in on the region of interest. <enter>")
print "Now click near the data points you want to pop. Right-click to finish."
poppies = []
while True:
# get a click
clicks = _pt.ginput()
if len(clicks)==0: return poppies
[cx,cy] = clicks[0]
# search through x and y for the closest point to this click
diff = (self.xdata-cx)**2 + (self.ydata-cy)**2
i = _fun.index(min(diff), diff)
# now pop!
poppies.append(self.pop_data_point(i, ckeys))
# now get the current zoom so we can replot
xlim = a.get_xlim()
ylim = a.get_ylim()
# replot and rezoom
_pylab.hold(True)
self.plot(xkey, ykey, ekey, **kwargs)
a.set_xlim(xlim)
a.set_ylim(ylim)
_pylab.hold(False)
_pylab.draw()
def execute_script(self, script, g={}):
"""
Runs a script, returning the result.
Scripts are of the form:
"3.0 + x/y - self[0] where x=3.0*c('my_column')+h('setting'); y=c(1)"
"self" refers to the data object, giving access to everything, enabling
complete control over the universe. c() and h() give quick reference
to self.c() and self.h() to get columns and header lines
Additionally, these scripts can see all of the numpy functions like sin,
cos, sqrt, etc.
Finally, if you would like access to additional globals, set
self.extra_globals to the appropriate globals dictionary or add globals
using insert_global(). Setting g=globals() will automatically insert
your globals into this databox instance.
There are a few shorthand scripts available as well. You can simply type
a column name such as "my_column" or a column number like 2. However, I
only added this functionality as a shortcut, and something like
"2.0*a where a=F" will not work unless F is defined somehow. I figure
since you're already writing a complicated script, you don't want to
accidentally shortcut your way into using a column instead of a constant!
Use "2.0*a where a=c('F')" instead.
NOTE: You shouldn't try to use variables like 'c=...' or 'h=...' because
they are already column and header functions!
"""
if self.debug: print "Generating column '"+str(name)+"' = "+str(script)+"..."
# add any extra user-supplied global variables for the eventual eval() call.
self.extra_globals.update(g)
g = {} # clear out the existing dictionary
# If the script is not a list of scripts, return the script value.
# This is the termination of a recursive call.
if not _fun.is_iterable(script):
if script == None: return None
# get the expression and variables
[expression, v] = self._parse_script(script)
# if there was a problem parsing the script
if v == None:
print "ERROR: Could not parse '"+script+"'"
return None
# otherwise, evaluate the script using python's eval command
return eval(expression, v)
# Otherwise, this is a list of scripts. Make the recursive call.
output = []
for s in script: output.append(self.execute_script(s))
return output
# Define this so you can quickly call a script
__call__ = execute_script
def _parse_script(self, script, n=0):
"""
This takes a script such as "a/b where a=c('current'), b=3.3" and returns
["a/b", {"a":self.columns["current"], "b":3.3}]
You can also just use an integer for script to reference columns by number
or use the column label as the script.
n is for internal use. Don't use it. In fact, don't use this function, user.
"""
if n > 1000:
print "This script ran recursively 1000 times!"
a = raw_input("<enter> or (q)uit: ")
if a.strip().lower() in ['q', 'quit']:
script = None
if script==None: return [None, None]
# check if the script is simply an integer
if type(script) in [int,long]:
if script<0: script = script+len(self.ckeys)
return ["___"+str(script), {"___"+str(script):self[script]}]
# the scripts would like to use calls like "h('this')/3.0*c('that')",
# so to make eval() work we should add these functions to a local list
# start with the globals list
globbies = globals()
# update the globals with supplied extras
globbies.update(self.extra_globals)
# override the important ones!
globbies.update({'h':self.h, 'c':self.c, 'self':self})
# first split up by "where"
split_script = script.split(" where ")
# #######################################
# Scripts without a "where" statement:
# #######################################
# if it's a simple script, like "column0" or "c(3)/2.0"
if len(split_script) == 1:
if self.debug: print "script of length 1"
# try to evaluate the script
# first try to evaluate it as a simple column label
if n==0 and script in self.ckeys:
# only try this on the zero'th attempt
# if this is a recursive call, there can be ambiguities if the
# column names are number strings
return ['___', {'___':self[script]}]
# Otherwise, evaluate it.
try:
b = eval(script, globbies)
return ['___', {'___':b}]
except:
print
print "ERROR: Could not evaluate '"+str(script)+"'"
_wx.Yield()
return [None, None]
# ######################################
# Full-on fancy scripts
# ######################################
# otherwise it's a complicated script like "c(1)-a/2 where a=h('this')"
# tidy up the expression
expression = split_script[0].strip()
# now split the variables list up by ,
varsplit = split_script[1].split(';')
# loop over the entries in the list of variables, storing the results
# of evaluation in the "stuff" dictionary
stuff = {}
for var in varsplit:
# split each entry by the "=" sign
s = var.split("=")
if len(s) == 1:
print s, "has no '=' in it"
return [None, None]
# tidy up into "variable" and "column label"
v = s[0].strip()
c = s[1].strip()
# now try to evaluate c, given our current globbies
# recursively call this sub-script. At the end of all this mess
# we want the final return value to be the first expression
# and a full dictionary of variables to fill it
[x,y] = self._parse_script(c, n+1)
# if it's not working, just quit out.
if y==None: return [None, None]
stuff[v] = y[x]
# incorporate the globbies so other functions can eval() with things
# like c('this')
stuff.update(globbies)
# at this point we've found or generated the list
return [expression, stuff]
def insert_column(self, data_array, ckey='temp', index='end'):
"""
This will insert/overwrite a new column and fill it with the data from the
the supplied array.
If ckey is an integer, use self.ckeys[ckey]
"""
# if it's an integer, use the ckey from the list
if type(ckey) in [int, long]: ckey = self.ckeys[ckey]
# append/overwrite the column value
self.columns[ckey] = array(data_array)
if not ckey in self.ckeys:
if index=='end':
self.ckeys.append(ckey)
else:
self.ckeys.insert(index, ckey)
def insert_header(self, hkey, value, index='end'):
"""
This will insert/overwrite a value to the header and hkeys.
If hkey is an integer, use self.hkeys[hkey]
"""
# if it's an integer, use the hkey from the list
if type(hkey) in [int, long]: hkey = self.hkeys[hkey]
# set the data
self.headers[str(hkey)] = value
if not hkey in self.hkeys:
if index=='end':
self.hkeys.insert(-1,str(hkey))
else:
self.hkeys.insert(index, str(hkey))
def insert_global(self, thing, name=None):
"""
Appends or overwrites the supplied object in the self.extra_globals.
Use this to expose execute_script() or _parse_script() etc... to external
objects and functions.
If name=None, use thing.__name__
"""
if name==None: name=thing.__name__
self.extra_globals[name] = thing
def pop_header(self, hkey):
"""
This will remove and return the specified header value.
You can specify either a key string or an index.
"""
# try the integer approach first to allow negative values
if not type(hkey) == str:
return self.headers.pop(self.hkeys.pop(hkey))
else:
# find the key integer and pop it
hkey = self.hkeys.index(hkey)
# if we didn't find the column, quit
if hkey < 0:
print "Column does not exist (yes, we looked)."
return
# pop it!
return self.headers.pop(self.hkeys.pop(hkey))
def pop_column(self, ckey):
"""
This will remove and return the data in the specified column.
You can specify either a key string or an index.
"""
# try the integer approach first to allow negative values
if not type(ckey) == str:
return self.columns.pop(self.ckeys.pop(ckey))
else:
# find the key integer and pop it
ckey = self.ckeys.index(ckey)
# if we didn't find the column, quit
if ckey < 0:
print "Column does not exist (yes, we looked)."
return
# pop it!
return self.columns.pop(self.ckeys.pop(ckey))
def clear_columns(self):
"""
This will remove all the ckeys and columns.
"""
self.ckeys = []
self.columns = {}
def clear_headers(self):
"""
This will remove all the hkeys and headers
"""
self.hkeys = []
self.headers = {}
def rename_header(self, old_name, new_name):
"""
This will rename the header. The supplied names need to be strings.
"""
self.hkeys[self.hkeys.index(old_name)] = new_name
self.headers[new_name] = self.headers.pop(old_name)
def rename_column(self, old_name, new_name):
"""
This will rename the column. The supplied names need to be strings.
"""
self.ckeys[self.ckeys.index(old_name)] = new_name
self.columns[new_name] = self.columns.pop(old_name)
def get_XYZ(self, xaxis=None, yaxis=None, xlabel=None, ylabel=None, xcoarsen=0, ycoarsen=0):
"""
This will assemble the X, Y, Z data for a 2d colorplot or surface.
yaxis=None What values to use for the y-axis data. "first" means take the first column
yaxis=None means just use bin number
xaxis=None What values to use for the x-axis data, can be a header array
xaxis="first" means pop off the first row of the data
xcoarsen, ycoarsen How much to coarsen the columns or rows
"""
# next we assemble the 2-d array for the colorplot
Z=[]
for c in self.ckeys:
# transform so the image plotting has the same orientation as the data
# in the file itself.
col = list(self.columns[c])
col.reverse()
Z.append(col)
# initialize the axis labels
X=[]
for n in range(len(Z)): X.append(n)
self.xlabel = "x-step number"
Y=[]
for n in range(len(Z[0])): Y.append(n)
self.ylabel = "y-step number"
Y.reverse()
# now if we're supposed to, pop off the first column as Y labels
if yaxis=="first":
# just pop off the first column (Z columns are already reversed)
Y = Z.pop(0)
self.ylabel = "y-values"
# pop the first element of the X-data
X.pop(0)
# otherwise, it's a column value
elif not yaxis==None:
Y = list(self.c(yaxis))
Y.reverse()
self.ylabel = yaxis
# if we're supposed to, pop off the top row for the x-axis values
if xaxis == "first":
X = []
for n in range(len(Z)):
X.append(Z[n].pop(-1))
self.xlabel = "x-values"
# pop the first element of the Y-data
Y.pop(-1)
# otherwise, if we specified a row from the header, use that
elif not xaxis==None:
X = array(self.h(xaxis))
# trim X down to the length of the Zd.ZX row
X.resize(len(Z[:])-1)
self.xlabel = xaxis
# now if we're supposed to coarsen, do so (produces a numpy array)
self.X = _fun.coarsen_array(X, xcoarsen)
self.Y = _fun.coarsen_array(Y, ycoarsen)
# Z has to be transposed to make the data file look like the plot
self.Z = _fun.coarsen_matrix(Z, xcoarsen, ycoarsen).transpose()
# if we specified labels, they trump everything
if xlabel: self.xlabel = xlabel
if ylabel: self.ylabel = ylabel
return
def get_columns_from_XYZ(self, corner="x"):
"""
Assuming you have arryas self.X and self.Y along with the matrix
self.Z, clear out the current column data and regenerate it from XYZ
using X values for column labels, and the corner argument for the first
column label.
"""
self.clear_columns()
# do the necessary transforms to make the saved data in the file
# arranged as plotted
Y = list(self.Y); Y.reverse()
self.insert_column(Y, corner)
# same for the Z's
for n in range(len(self.X)):
Zn = list(self.Z[:,n]); Zn.reverse()
self.insert_column(Zn, str(self.X[n]))
def plot_XYZ(self, cmap="Blues", plot="image", **kwargs):
"""
This is 8 million times faster than pseudocolor I guess, but it won't handle unevenly spaced stuff.
You need to generate X, Y and Z first, probably using get_XYZ.
cmap Name of the matplotlib cmap to use
plot Type of plot, "image" for fast colorplot, "mountains" for slow 3d plot
"""
# if we don't have the data, tell the user
if self.X == None or self.Y == None or self.Z == None:
print "You haven't assembled the surface data yet. Use get_XYZ first!"
return
# try the user's colormap
try:
colormap = eval("_pylab.cm."+cmap)
except:
print "ERROR: Invalid colormap, using default."
colormap = _pylab.cm.Blues
# at this point we have X, Y, Z and a colormap, so plot the mf.
f=_pylab.gcf()
f.clear()
if plot.lower() == "mountains":
X, Y = meshgrid(self.X, self.Y)
a = Axes3D(f)
a.plot_surface(X, Y, self.Z, rstride=2, cstride=2, cmap=colormap, **kwargs)
else:
# assume X and Y are the bin centers and figure out the bin widths
x_width = abs(float(self.X[-1] - self.X[0])/(len(self.X)-1))
y_width = abs(float(self.Y[-1] - self.Y[0])/(len(self.Y)-1))
# do whatever transformation is required
X = self.X
Y = self.Y
Z = self.Z
# reverse the Z's
Z = list(Z); Z.reverse(); Z = array(Z)
_pylab.imshow(Z, cmap=colormap,
extent=[X[0]-x_width/2.0, X[-1]+x_width/2.0,
Y[0]+y_width/2.0, Y[-1]-y_width/2.0], **kwargs)
_pylab.colorbar()
_pt.image_set_aspect(1.0)
# set the title and labels
self.title = self.path
a = _pylab.gca()
a.set_title(self.title)
a.set_xlabel(self.xlabel)
a.set_ylabel(self.ylabel)
def c(self, n):
"""
Returns the n'th column if it's an integer, otherwis the column based
on key.
"""
if type(n) == str: return self.columns[n]
else: return self.columns[self.ckeys[n]]
__getitem__ = c
def h(self, hkey):
"""
This function searches through hkeys for one *containing* the supplied key string,
and returns that header value. It's mostly for shortening coding.
Also can take integers, returning the key'th header value.
"""
# if this is an index
if type(hkey) in [int, long]: return self.headers[self.hkeys[hkey]]
# if this is an exact match
if hkey in self.hkeys: return self.headers[hkey]
# Look for a fragment.
for k in self.hkeys:
if k.find(hkey) >= 0:
return self.headers[k]
print
print "ERROR: Couldn't find '"+str(hkey) + "' in header."
print "Possible values:"
print self.hkeys
print
return None
|
streitho/spinmob | _fitting.py | import _fitting_mess
databoxes = _fitting_mess.fit_databoxes
databoxes2 = _fitting_mess.fit_databoxes_model
files = _fitting_mess.fit_files
files2 = _fitting_mess.fit_files_model
shown_data = _fitting_mess.fit_shown_data |
streitho/spinmob | _plot_xy.py | <gh_stars>0
import _plotting_mess; reload(_plotting_mess)
data = _plotting_mess.xy_data
databoxes = _plotting_mess.xy_databoxes
files = _plotting_mess.xy_files
function = _plotting_mess.xy_function |
streitho/spinmob | _models.py | import os
import scipy as _scipy
import scipy.optimize # don't listen to spyder.
import numpy as _n
import pylab as _pylab
from matplotlib.font_manager import FontProperties as _FontProperties
import spinmob as _s
import _dialogs
_st = _s.plot.tweaks
import _functions as _fun
import wx as _wx
# This is completely a hack, but it works very well. I'd like to rewrite this as
#
# 1. a class-based object to interact with on the command line (so no input loop)
# 2. something that can handle multiple functions or a single function with
# multiple outputs (i.e. returning an array)
# 3. something more general? No need for "x"? Something that can handle many
# parameters, only some of which are fit (specified by you)?
#
# Classes
#
class model_base:
# this is something the derived classes must do, to define
# their fit variables
pnames = []
function_string = None
D = None
# this function just creates a p0 array based on the size of the pnames array
def __init__(self):
# get a numpy array and then resize it
self.p0 = _n.array([])
self.p0.resize(len(self.pnames))
def __call__(self, p, x):
self.evaluate(p,x)
# This function is to be overridden.
# this is where you define the shape of your function
# in terms of parameters p at value x
def evaluate(self, p, x): return(p[0]*0.0*x) # example
# this is another overridden function
# used to get just the bacgkround, given p and value x
def background(self, p, x): return(p[0]*0.0*x) # example
# this is another overridden function
# use this to guess the intial values p0 based on data
# xbi1 and 2 are the indices used to estimate the background
def guess(self, xdatas, ydatas, xbi1=0, xbi2=-1):
# first get the appropriate size array
p=self.p0
p[0] = xdatas[0][xbi2] # example
self.write_to_p0(p)
return
#
#
# These functions are generally not overwritten
#
#
def optimize(self, xdatas, ydatas, eydatas, p0="internal"):
"""
This actually performs the optimization on xdata and ydata.
p0="internal" is the initial parameter guess, such as [1,2,44.3].
"internal" specifies to use the model's internal guess result
but you better have guessed!
returns the fit p from scipy.optimize.leastsq()
"""
if p0 == "internal": p0 = self.p0
if self.D == None: return _scipy.optimize.leastsq(self.residuals_concatenated, p0, args=(xdatas,ydatas,eydatas,), full_output=1)
else: return _scipy.optimize.leastsq(self.residuals_concatenated, p0, args=(xdatas,ydatas,eydatas,), full_output=1, Dfun=self.jacobian, col_deriv=1)
def residuals(self, p, xdatas, ydatas, eydatas):
"""
This function returns a list of vectors of the differences between the
model and ydata, scaled by the error
"""
# evaluate the function for all the data, returns a list!
f = self.evaluate(p,xdatas)
# get the full residuals list
rs = []
for n in range(len(xdatas)): rs.append((ydatas[n]-f[n])/_n.absolute(eydatas[n]))
return rs
def residuals_concatenated(self, p, xdatas, ydatas, eydatas):
"""
This function returns a big long list of residuals so leastsq() knows
what to do with it.
"""
return _n.concatenate(self.residuals(p, xdatas, ydatas, eydatas))
def residuals_variance(self, p, xdatas, ydatas, eydatas):
"""
This returns a list of the variance of the residuals, or chi^2/DOF.
"""
# get the chi^2 list
c2s = self.chi_squared(p, xdatas, ydatas, eydatas)
# get the number of data points
N = 1.0 * len(_n.concatenate(xdatas))
# get the degrees of freedom
dof = 1.0*len(p)
# get the reduced chi squareds assuming the parameters are
# weighted evenly among the individual data points
rc2s = []
for n in range(len(c2s)):
weight = 1.0*len(xdatas[n]) / N # fraction of total data points
rc2s.append(c2s[n] / (len(xdatas[n])-weight*dof)) # chi^2 / dof
return rc2s
def chi_squared(self, p, xdatas, ydatas, eydatas):
"""
This returns a list of numbers that are the chi squareds for the
given set of parameters p
This is currently not in use for the optimization. That uses residuals.
"""
rs = self.residuals(p, xdatas, ydatas, eydatas)
c2s = []
for r in rs: c2s.append(sum(r*r))
return c2s
# this returns the jacobian given the xdata. Derivatives across rows, data down columns.
# (so jacobian[len(xdata)-1] is len(p) wide)
def jacobian(self, p, xdata, ydata):
"""
This returns the jacobian of the system, provided self.D is defined
"""
if not type(p) == type(_n.array([0])): p = _n.array(p)
if not type(xdata) == type(_n.array([0])): xdata = _n.array(xdata)
return self.D(p,xdata)
def set_parameter(self, name, value):
"""
This functions sets a parameter named "name" to a value.
"""
try:
# if we enter something like "min=x", get a click from the user
if value in ['x','y']:
# get the click
print "Please click somewhere to get the "+value+" value."
_st.raise_figure_window()
click = _pylab.ginput()
# use the click value.
if len(click)>0 and value=='x': value = click[0][0]
elif len(click)>0 and value=='y': value = click[0][1]
else:
print "\nCLICK ABORTED.\n"
return
elif value in ['dx', 'dy', 'slope']:
# get two clicks
print "Please click twice to use the "+value+" value."
_st.raise_figure_window()
clicks = _pylab.ginput(2)
# make sure we got two clicks
if len(clicks) == 2:
dx = clicks[1][0]-clicks[0][0]
dy = clicks[1][1]-clicks[0][1]
if value=='dx': value = dx
if value=='dy': value = dy
if value=='slope': value = dy/dx
else:
print "\nCLICKS ABORTED.\n"
return
i = self.pnames.index(name)
self.p0[i] = float(value)
return True
except:
print "ERROR:", name, "is not a valid variable or", value, "is not a valid value."
return False
def write_to_p0(self, p):
"""
This function checks p's against a possible
variable self.already_guessed and stores only those
not in already_guessed.
"""
try:
# loop over all the p0's, storing p's if necessary
for n in range(0, len(self.p0)):
# if the pname of this p0 is not on the guessed list
print self.pnames[n]+"POOP"+str(self.guessed_list.index(self.pnames[n]))
if self.guessed_list.index(self.pnames[n])<0:
self.p0[n] = p[n]
except: # an error occurred, likely due to no guessed_list
self.p0 = p
######################################
## Interactive fitting routine
######################################
def fit(self, data, command="", settings={}):
"""
This generates xdata, ydata, and eydata from the three scripts
(or auto-sets the error and updates it depending on the fit),
fits the data, stores the results (and scripts) in the data file's header
and saves the data in a new file.
data instance of a data class
command initial interactive fit command
interactive set to False to automatically fit without confirmation
"""
iterable_settings = ["min", "max", "xb1", "xb2", "auto_error", "subtract",
"smooth", "coarsen", "show_guess", "show_error",
"show_background", "plot_all", "xscript", "yscript",
"eyscript"]
# dictionary of settings like "min" and "skip"
default_settings = {"min" : None,
"max" : None,
"xb1" : 0,
"xb2" : -1,
"auto_error" : False,
"subtract" : False,
"smooth" : 0,
"coarsen" : 0,
"show_guess" : False,
"show_error" : True,
"show_background" : True,
"plot_all" : False,
"eyscript" : None,
"output_path" : None,
"output_columns" : None,
"skip" : True,
"guess" : None,
"save_file" : True,
"file_tag" : 'fit_',
"figure" : 0,
"autofit" : False,
"fullsave" : False,
}
if not settings.has_key('eyscript'): default_settings["auto_error"] = True
# fill in the non-supplied settings with defaults
for k in default_settings.keys():
if not k in settings.keys():
settings[k] = default_settings[k]
# determine the number of parallel fits from the yscript
if _s.fun.is_iterable(settings['yscript']): number_of_fits = len(settings['yscript'])
else: number_of_fits = 1
# In general we're going to have a list of datas and scripts etc, so make
# sure we're in a position to do this.
if not _s.fun.is_iterable(data): data = [data]
# fill out the arrays so they match the number of fits
while len(data) < number_of_fits: data.append(data[-1])
# make sure the various settings are lists too
for k in iterable_settings:
# make them all iterable
if not _s.fun.is_iterable(settings[k]):
settings[k] = [settings[k]]
# make sure they're all the right length
while len(settings[k]) < number_of_fits: settings[k].append(settings[k][-1])
# Initialize the fit_parameters (we haven't any yet!)
fit_parameters = None
fit_errors = None
format_figures = True
# set up the figures
axes2s = []
axes1s = []
figs = []
for n in range(len(data)):
figs.append(_pylab.figure(settings["figure"]+n))
figs[n].clear()
axes2s.append(_pylab.subplot(211))
axes1s.append(_pylab.subplot(212, sharex=axes2s[n]))
axes2s[n].set_position([0.15, 0.78, 0.70, 0.13])
axes1s[n].set_position([0.15, 0.08, 0.70, 0.64])
# Now keep trying to fit until the user says its okay or gives up.
hold_plot=False
while True:
# Plot everything.
if hold_plot:
hold_plot=False
else:
if settings["skip"]: print "Plotting but not optimizing... (<enter> to fit)"
else: print "Beginning fit routine..."
# assemble all the data
xdatas = []
ydatas = []
eydatas = []
xs = []
ys = []
eys = []
for n in range(len(data)):
# get the data based on the scripts
xdatas.append(data[n](settings["xscript"][n]))
ydatas.append(data[n](settings["yscript"][n]))
if settings["eyscript"][n] == None: eydatas.append(xdatas[n]*0.0 + (max(ydatas[n])-min(ydatas[n]))/20.0)
else: eydatas.append(data[n](settings["eyscript"][n]))
# now sort the data in case it's jaggy!
matrix_to_sort = _n.array([xdatas[n], ydatas[n], eydatas[n]])
sorted_matrix = _fun.sort_matrix(matrix_to_sort, 0)
xdatas[n] = sorted_matrix[0]
ydatas[n] = sorted_matrix[1]
eydatas[n] = sorted_matrix[2]
# now trim all the data based on xmin and xmax
xmin = settings["min"][n]
xmax = settings["max"][n]
if xmin==None: xmin = min(xdatas[n])-1
if xmax==None: xmax = max(xdatas[n])+1
[x, y, ey] = _fun.trim_data(xdatas[n], ydatas[n], eydatas[n], [xmin, xmax])
# smooth and coarsen
[x,y,ey] = _fun.smooth_data( x,y,ey,settings["smooth"][n])
[x,y,ey] = _fun.coarsen_data(x,y,ey,settings["coarsen"][n])
# append to the temporary trimmed data sets.
xs.append(x)
ys.append(y)
eys.append(ey)
# now do the first optimization. Start by guessing parameters from
# the data's shape. This writes self.p0
if settings["guess"]==None:
self.guess(xs, ys, settings["xb1"], settings["xb2"])
else:
self.write_to_p0(settings['guess'])
print "\n FUNCTION:"
for s in self.function_string:
print " "+s
print "\n GUESS:"
for n in range(len(self.pnames)):
print " "+self.pnames[n]+" = "+str(self.p0[n])
print
# now do the first optimization
if not settings["skip"]:
# actually do the least-squares optimization
fit_output = self.optimize(xs, ys, eys, self.p0)
# optimize puts out a float if there's only one parameter. Annoying.
if not _s.fun.is_iterable(fit_output[0]):
fit_parameters = _n.array([fit_output[0]])
else: fit_parameters = fit_output[0]
# If we're doing auto error, now we should scale the error so that
# the reduced xi^2 is 1
if settings["auto_error"]:
# guess the correction to the y-error we're fitting (sets the reduced chi^2 to 1)
rms = _n.sqrt(self.residuals_variance(fit_parameters,xs,ys,eys))
print " initial reduced chi^2 =", list(rms**2)
print " scaling errors by", list(rms), "and re-optimizing..."
for n in range(len(eys)):
eys[n] = rms[n] * eys[n]
eydatas[n] = rms[n] * eydatas[n]
# optimize with new improved errors, using the old fit to start
fit_output = self.optimize(xs,ys,eys,p0=fit_parameters)
# optimize puts out a float if there's only one parameter. Annoying.
if not _s.fun.is_iterable(fit_output[0]):
fit_parameters = _n.array([fit_output[0]])
else: fit_parameters = fit_output[0]
# Now that the fitting is done, show the output
# grab all the information from fit_output
fit_covariance = fit_output[1]
fit_reduced_chi_squared = list(self.residuals_variance(fit_parameters,xs,ys,eys))
if not fit_covariance == None:
# get the error vector and correlation matrix from (scaled) covariance
[fit_errors, fit_correlation] = _fun.decompose_covariance(fit_covariance)
else:
print " WARNING: No covariance matrix popped out of model.optimize()"
fit_errors = fit_parameters
fit_correlation = None
print " reduced chi^2 is now", fit_reduced_chi_squared
# print the parameters
print "\n FUNCTION:"
for s in self.function_string:
print " "+s
print "\n FIT:"
for n in range(0,len(self.pnames)): print " "+self.pnames[n]+" =", fit_parameters[n], "+/-", fit_errors[n]
print
# get the data to plot and plot it.
for n in range(len(axes1s)):
if settings["plot_all"][n]:
x_plot = xdatas[n]
y_plot = ydatas[n]
ey_plot = eydatas[n]
[x_plot, y_plot, ey_plot] = _fun.smooth_data (x_plot, y_plot, ey_plot, settings["smooth"][n])
[x_plot, y_plot, ey_plot] = _fun.coarsen_data(x_plot, y_plot, ey_plot, settings["coarsen"][n])
else:
# this data is already smoothed and coarsened before the fit.
x_plot = xs[n]
y_plot = ys[n]
ey_plot = eys[n]
# now plot everything
# set up the axes
axes1 = axes1s[n]
axes2 = axes2s[n]
_pylab.hold(True)
axes1.clear()
axes2.clear()
# by default, the thing to subtract is 0.
thing_to_subtract = y_plot*0.0
# get the fit data if we're supposed to so we can know the thing to subtract
if not fit_parameters==None:
# get the fit and fit background for plotting (so we can subtract it!)
y_fit = self.evaluate (fit_parameters, x_plot, n)
y_fit_background = self.background(fit_parameters, x_plot, n)
if settings["subtract"][n]: thing_to_subtract = y_fit_background
# plot the guess
if settings["show_guess"][n]:
y_guess = self.evaluate(self.p0, x_plot, n)
axes1.plot(x_plot, y_guess-thing_to_subtract, color='gray', label='guess')
if settings["show_background"]:
y_guess_background = self.background(self.p0, x_plot, n)
axes1.plot(x_plot, y_guess_background-thing_to_subtract, color='gray', linestyle='--', label='guess background')
# Plot the data
if settings["show_error"][n]:
axes1.errorbar(x_plot, y_plot-thing_to_subtract, ey_plot, linestyle='', marker='D', mfc='blue', mec='w', ecolor='b', label='data')
else:
axes1.plot( x_plot, y_plot-thing_to_subtract, linestyle='', marker='D', mfc='blue', mec='w', label='data')
# plot the fit
if not fit_parameters == None and not settings["skip"]:
axes1.plot( x_plot, y_fit-thing_to_subtract, color='red', label='fit')
if settings["show_background"][n]:
axes1.plot(x_plot, y_fit_background-thing_to_subtract, color='red', linestyle='--', label='fit background')
# plot the residuals in the upper graph
axes2.errorbar(x_plot, (y_plot-y_fit)/ey_plot, ey_plot*0.0+1.0, linestyle='', marker='o', mfc='blue', mec='w', ecolor='b')
axes2.plot (x_plot, 0*x_plot, linestyle='-', color='k')
# come up with a title
title1 = data[n].path
# second line of the title is the model
title2 = "eyscript="+str(settings["eyscript"][n])+", model: " + str(self.function_string[n])
# third line is the fit parameters
title3 = ""
if not settings["skip"] and not fit_parameters==None:
t = []
for i in range(0,len(self.pnames)):
t.append(self.pnames[i]+"=%.4g+/-%.2g" % (fit_parameters[i], fit_errors[i]))
title3 = title3+_fun.join(t[0:4],", ")
if len(t)>3: title3 = title3+'\n'+_fun.join(t[4:],", ")
else:
title3 = title3+"(no fit performed)"
# Start by formatting the previous plot
axes2.set_title(title1+"\n"+title2+"\nFit: "+title3)
axes1.set_xlabel(settings["xscript"][n])
axes1.set_ylabel(settings["yscript"][n])
# set the position of the legend
axes1.legend(loc=[1.01,0], borderpad=0.02, prop=_FontProperties(size=7))
# set the label spacing in the legend
axes1.get_legend().labelsep = 0.01
# set up the title label
axes2.title.set_horizontalalignment('right')
axes2.title.set_size(8)
axes2.title.set_position([1.0,1.010])
fig = _pylab.figure(axes1.get_figure().number)
if format_figures: _st.format_figure(fig)
_st.auto_zoom(axes1)
_pylab.draw()
_wx.Yield()
format_figures = False
_st.raise_figure_window()
_wx.Yield()
_st.raise_pyshell()
# the only way we optimize is if we hit enter.
if settings["autofit"]: settings["skip"] = False
else: settings["skip"] = True
# If last command is None, this is the first time. Parse the initial
# command but don't ask for one.
if command == "":
if len(settings['min'])==1: print "min=" + str(settings['min'][0]) + "\nmax="+str(settings['max'][0])
else : print "min=" + str(settings['min']) + "\nmax="+str(settings['max'])
if settings["autofit"]:
if fit_parameters==None: command = ""
else: command = "y"
else:
command = raw_input("-------> ").strip()
clower = command.lower().strip()
# first check and make sure the command isn't one of the simple ones
if clower in ['']:
settings["skip"] = False
elif clower in ['h', 'help']:
print
print "COMMANDS"
print " <enter> Run the fit or do more iterations."
print " g Guess and show the guess."
print " o Choose and output summary file."
print " n No, this is not a good fit. Move on."
print " p Call the printer() command."
print " q Quit."
print " u Same as 'y' but use fit as the next guess."
print " t Just transfer these fit results to the guess."
print " y Yes, this is a good fit. Move on."
print " z Use current zoom to set xmin and xmax."
print " zN Use current zoom from N'th figure (e.g. z3)."
print " zo Zoom out xrange by a factor of 2."
print " zoN Zoom out the N'th figure."
print
print "SETTINGS"
keys = settings.keys()
keys.sort()
for key in keys: print " "+key+" =", settings[key]
print
print "SETTING PARAMETER GUESS VALUES"
print " <parameter>=<value>"
print " sets the parameter guess value."
print
print " <parameter>=x|y|dx|dy|slope"
print " sets the parameter guess value to the"
print " clicked x, y, dx, dy, or slope value."
print
print " <parameter> lists the curent parameter value."
print
command=""
hold_plot=True
elif clower in ['q', 'quit', 'exit']:
return {'command':'q','settings':settings}
elif clower in ['g', 'guess']:
settings['guess'] = None
settings['show_guess'] = True
elif clower in ['o', 'output']:
# print all the header elements of the current databox
# and have the user choose as many as they want.
print "\n\nChoose which header elements to include as columns in the summary file:"
for n in range(len(data[0].hkeys)):
print " "+str(n)+": "+str(data[0].hkeys[n])
# get a list of numbers from the user
key_list = raw_input("pick headers by number: ").split(',')
if not settings['output_columns']==None:
old_output_columns = list(settings['output_columns'])
else: old_output_columns = None
try:
# get the list of keys.
settings['output_columns'] = []
for n in key_list: settings['output_columns'].append(data[0].hkeys[int(n.strip())])
# now have the user select a file
settings['output_path'] = _dialogs.Save()
if not settings['output_path']==None:
# write the column names
f = open(settings['output_path'], 'w')
f.write('function_string\t'+str(self.function_string)+
'\nmodel\t'+str(self.__class__)+
'\nxscript\t'+str(settings["xscript"])+
'\nyscript\t'+str(settings["yscript"])+
'\neyscript\t'+str(settings["eyscript"])+'\n\n')
for k in settings['output_columns']: f.write(k+'\t')
for n in self.pnames: f.write(n+'\t'+n+'_error\t')
f.write('reduced_chi_squared\n')
f.close()
# all set. It will now start appending to this file.
except:
print "\nOops! Aborting."
settings['output_columns'] = old_output_columns
hold_plot=True
elif clower in ['y', 'yes','u','use']:
if fit_parameters==None or fit_errors==None:
print "\nERROR: Cannot say a fit is good with no fit!"
else:
if settings['save_file']:
# write the fit results to the header
for d in data:
# If this is a good fit. Add relevant information to the header then save
d.insert_header("fit_model", str(self.__class__).split()[0][0:])
d.insert_header("fit_function", str(self.function_string))
for n in range(len(self.pnames)):
d.insert_header("fit_"+self.pnames[n], [fit_parameters[n], fit_errors[n]])
d.insert_header("fit_reduced_chi_squared",fit_reduced_chi_squared)
# build the correlations array (not a 2-d array)
d.insert_header("fit_correlations", fit_correlation)
d.insert_header("fit_min", settings['min'])
d.insert_header("fit_max", settings['max'])
d.insert_header("fit_smooth", settings['smooth'])
d.insert_header("fit_coarsen", settings['coarsen'])
# auto-generate the new file name
if settings['fullsave'] in [1, True, 'auto']:
directory, filename = os.path.split(d.path)
new_path = directory + os.sep + settings['file_tag'] + filename
if new_path: d.save_file(new_path)
elif settings['fullsave'] in [2, 'ask']:
new_path = _dialogs.SingleFile()
if new_path: d.save_file(new_path)
# append to the summary file
if settings['output_path']:
f = open(settings['output_path'],'a')
for k in settings['output_columns']:
f.write(str(d.h(k))+'\t')
for n in range(len(fit_parameters)):
f.write(str(fit_parameters[n])+'\t'+str(fit_errors[n])+'\t')
f.write(str(sum(fit_reduced_chi_squared)/len(fit_reduced_chi_squared))+'\n')
f.close()
# Return the information
return_value = {"command" :'y',
"fit_parameters" :fit_parameters,
"fit_errors" :fit_errors,
"fit_reduced_chi_squared" :fit_reduced_chi_squared,
"fit_covariance" :fit_covariance,
"settings" :settings,
"function_string" :self.function_string,
"pnames" :self.pnames}
if clower in ['u', 'use']:
return_value['command'] = 'u'
return_value['settings']['guess'] = fit_parameters
return return_value
elif clower in ['t', 'transfer']:
if fit_parameters==None or fit_errors==None:
print "\nERROR: Nothing to transfer!"
else:
for n in range(len(fit_parameters)):
self.p0[n] = fit_parameters[n]
elif clower in ['n', 'no', 'next']:
return {'command':'n','settings':settings}
elif clower in ['p', 'print']:
_s.printer()
hold_plot = True
elif clower in ['zo', 'zoomout']:
# if we haven't set the min and max yet, use the axes bounds.
if not settings['min']:
settings['min'] = []
for a in axes1s: settings['min'].append(a.get_xlim()[0])
if not settings['max']:
settings['max'] = []
for a in axes1s: settings['max'].append(a.get_xlim()[1])
x0 = _n.array(settings['min'])
x1 = _n.array(settings['max'])
xc = 0.5*(x0+x1)
xs = x1-x0
settings['min'] = list(xc-xs)
settings['max'] = list(xc+xs)
elif clower in ['z', 'zoom']:
settings['min'] = []
settings['max'] = []
for a in axes1s:
settings['min'].append(a.get_xlim()[0])
settings['max'].append(a.get_xlim()[1])
elif clower[0] == "z" and len(clower.split('=')) == 1:
try:
if clower[1] == 'o':
n = int(clower[2:].strip()) - settings['figure']
print "Zooming out figure", n+settings['figure']
x0, x1 = axes1s[n].get_xlim()
xc = 0.5*(x0+x1)
xs = x1-x0
settings['min'][n] = xc-xs
settings['max'][n] = xc+xs
else:
n = int(clower[1:].strip()) - settings['figure']
print "Zooming figure", n+settings['figure']
settings['min'][n] = axes1s[n].get_xlim()[0]
settings['max'][n] = axes1s[n].get_xlim()[1]
except:
print "ERROR: could not zoom according to the specified figure"
# just display the value
elif clower in settings.keys():
print
print clower,'=',settings[clower]
hold_plot = True
else:
# now parse it (it has the form "min=2; max=4; plot_all=True")
s = command.split(';')
for c in s:
try:
# get the key and value
[key, value] = c.split('=')
key = key.strip()
value = value.strip()
# if this is a setting
if settings.has_key(key):
# execute the string to get the value
evalue = eval(value)
# if this is an iterable setting, make sure it's a list
if key in iterable_settings:
# if it's a single value, set all the values to this
if not _s.fun.is_iterable(evalue):
for i in range(len(settings[key])):
settings[key][i] = evalue
# otherwise, the lengths must match
elif len(evalue)==len(settings[key]):
settings[key] = evalue
# lengths don't match
else:
print "\nERROR: length of "+key+" does not match."
# if it's not an iterable setting
else: settings[key] = eval(value)
# otherwise we've specified a parameter guess
else:
self.set_parameter(key, value)
settings['guess'] = self.p0
except:
print "ERROR: '"+str(c)+"' is an invalid command."
# make sure we don't keep doing the same command over and over!
command = ""
print
class curve(model_base):
globs={} # globals such as sin and cos...
def __init__(self, f='a+b*x+c*x**2', p='a=1.5, b, c=1.5', bg=None, a=None, globs={}):
"""
This class takes the function string you specify and generates
a model based on it.
f can be either a string or a function f(x,a,b,..) that you have defined.
p is a comma-delimited string
bg is a background function.
a is a comma-delimited string of additional args to send to the function.
globs is a list of globals should you wish to have these visible to f.
If the function is a string it will be evaluated knowing about all the
globals specified by the globs argument.
If it is a function, it can have as many arguments as you like, so long
as the x data is the first argument, and each of the subsequent argument
slots has a corresponding element in the list p.
If you want to do something a little more fancy with a guessing algorithm,
it's relatively straightforward to write one of the model classes similar
to the examples given in spinmob.models
If you want, you can specify a list of functions, a string of parameters,
a matching list of background functions, and a matching list of additional
arguments to fit more than one dataset simultaneously.
"""
# make sure we have lists
if not _s.fun.is_iterable(f) : f = [f]
if not _s.fun.is_iterable(bg): bg = [bg]
# make sure the background has as many elements as the function list
if not len(f)==len(bg):
x = bg[0]
bg = list(f)
for n in range(len(bg)): bg[n]=x
# start by parsing the p string. This is the same for both f's
p_split = p.split(',')
# Loop over the parameters, get their names and possible default values
self.pnames = []
self.defaults = []
for parameter in p_split:
parameter_split = parameter.split('=')
self.pnames.append(parameter_split[0].strip())
if len(parameter_split)==2: self.defaults.append(float(parameter_split[1]))
else: self.defaults.append(1.0)
# set up the guess
self.p0 = _n.array(self.defaults)
# store the globals
self.globs = dict(globs)
self.f = []
self.bg = []
self.function_string = []
self.background_string = []
self.additional_args = []
# loop over the supplied list of functions
for n in range(len(f)):
# now do different things depending on the type of function
if type(f[n])==str:
# get the function strings
self.function_string.append(f[n])
if bg[n]==None: self.background_string.append(f[n])
else: self.background_string.append(bg[n])
# override the function and background
args = 'x,'+_fun.join(self.pnames,',')
if a==None or a[n]==None:
self.additional_args.append(None)
else:
args = args + "," + str(a[n])
self.additional_args.append(eval('['+str(a[n])+']', self.globs))
self.f.append( eval('lambda ' + args + ': ' + self.function_string[n], self.globs))
self.bg.append(eval('lambda ' + args + ': ' + self.background_string[n], self.globs))
else:
if bg[n]==None: bg[n] = f[n]
self.function_string.append( f[n].__name__ +"(x, "+p+")")
self.background_string.append(bg[n].__name__ +"(x, "+p+")")
# override the function and background
self.f.append(f[n])
self.bg.append(bg[n])
# override the evaluate and background functions used by the base class.
def evaluate(self, p, x, n=None):
if n==None:
results = []
for n in range(len(self.f)):
if self.additional_args[n]==None: results.append(self.f[n](x[n],*p))
else: results.append(self.f[n](x[n],*(list(p)+self.additional_args[n])))
else:
if self.additional_args[n]==None: results = self.f[n](x,*p)
else: results = self.f[n](x,*(list(p)+self.additional_args[n]))
return results
def background(self, p, x, n=None):
if n==None:
results = []
for n in range(len(self.bg)):
if self.additional_args[n]==None: results.append(self.bg[n](x[n],*p))
else: results.append(self.bg[n](x[n],*(list(p)+self.additional_args[n])))
else:
if self.additional_args[n]==None: results = self.bg[n](x,*p)
else: results = self.bg[n](x,*(list(p)+self.additional_args[n]))
return results
# You can override this if you want the guess to be something fancier.
def guess(self, xdata, ydata, xbi1=0, xbi2=-1):
"""
This function takes the supplied data (and two indices from which to
estimate the background should you want them) and returns a best guess
of the parameters, then stores this guess in p0.
"""
self.write_to_p0(self.defaults)
return
class quartic(model_base):
function_string = "p[0] + p[1]*x + p[2]*x*x + p[3]*x*x*x + p[4]*x*x*x*x"
pnames = ["a0", "a1", "a2", "a3", "a4"]
# this must return an array!
def background(self, p, x):
return self.evaluate(p,x)
def evaluate(self, p, x):
return p[0] + p[1]*x + p[2]*x*x + p[3]*x*x*x + p[4]*x*x*x*x
def guess(self, xdata, ydata, xbi1=0, xbi2=-1):
# first get the appropriate size array
p=self.p0
# guess the slope and intercept
p[0] = ydata[0][len(xdata[0])/2]
p[1] = (ydata[0][xbi2]-ydata[0][xbi1])/(xdata[0][xbi2]-xdata[0][xbi1])
p[2] = 0.0
p[3] = 0.0
p[4] = 0.0
# write these values to self.p0, but avoid the guessed_list
self.write_to_p0(p)
|
streitho/spinmob | _constants.py | c = 299792458.0
pi = 3.1415926535897931
u0 = 1.25663706e-6
uB = 9.27400949e-24
e = 1.60217e-19
h = 6.626068e-34
hbar = h/(2*pi)
kB = 1.3806503e-23 |
streitho/spinmob | _plot_magphase.py | import _plotting_mess; reload(_plotting_mess)
data = _plotting_mess.magphase_data
databoxes = _plotting_mess.magphase_databoxes
files = _plotting_mess.magphase_files
function = _plotting_mess.magphase_function |
streitho/spinmob | _pylab_helper.py | <reponame>streitho/spinmob
#!/usr/bin/env python
#Boa:App:BoaApp
import wx
import pylab
import numpy
import pylab_helper_frame
modules ={'pylab_helper_frame': [1,
'Main frame of Application',
'pylab_helper_frame.py']}
class BoaApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
self.main = pylab_helper_frame.create(None)
self.main.Show()
self.SetTopWindow(self.main)
return True
def main():
pylab.plot([1,2,1,2,1,2])
pylab.figure()
pylab.plot([1,2,1,3,1,4])
pylab.plot([2,1,2,1,2,1])
application = BoaApp(0)
application.MainLoop()
def gui():
wx.InitAllImageHandlers()
a = wx.GetApp()
a.main = pylab_helper_frame.create(None)
a.main.Show()
a.SetTopWindow(a.main)
return a
#return(BoaApp(0))
if __name__ == '__main__':
main()
|
streitho/spinmob | ___retired.py | def plot_columns(self, start=1, end=-1, yshift=0.0, yshift_every=1, xcolumn=0, legend=None, clear=1, axes="gca", legend_max=30, autoformat=True, tall="auto", **kwargs):
"""
This does a line plot of a range of columns.
start=1 Index of the starting column.
end=-1 Index of the end column, with -1 meaning "all the way"
yshift=0.0 How much vertical artificial offset to apply
yshift_every=1 How many traces should sit at the same offset
xcolumn=0 Index of the x-data column
legend=None What header row to use as the legend values. If set to None,
use the column ckeys
legend_max=40 Maximum number of legend entries
tall=True When formatting the figure, make it tall.
**kwargs Arguments to be sent to "plot". See "plot" for more details!
"""
# get the axes
if axes=="gca": axes=_pylab.gca()
if clear: axes.clear()
# set the xdata and ckeys
self.xdata = self.c(xcolumn)
self.xlabel = self.ckeys[xcolumn]
self.ylabel = self.ckeys[start]
# get the last index if necessary
if end < start: end = len(self.columns)-1
# now loop over the columns
for n in range(start, end+1):
# store this trace
self.ydata = self.c(n)
self.eydata = None
if legend == None: self.legend_string = self.ckeys[n].replace("_","")
else: self.legend_string = str(self.h(legend)[n-1]).replace("_","")
# now plot it
self.plot(yshift=((n-start)/yshift_every)*yshift, axes=axes, clear=0, autoformat=False, **kwargs)
# now fix the legend up real nice like
if n-start > legend_max-2 and n != end: axes.get_lines()[-1].set_label('_nolegend_')
elif n-start == legend_max-2: axes.get_lines()[-1].set_label('...')
# fix up the title if there's an offset
if yshift: self.title = self.path + '\nprogressive y-shift='+str(yshift)+" every "+str(yshift_every)
axes.set_title(self.title)
# make it look nice
if tall=="auto": tall = yshift
if autoformat: _pt.format_figure(axes.figure, tall=tall)
# bring it to the front, but keep the command line up too
_pt.get_figure_window()
_pt.get_pyshell()
def get_data(self):
"""
This function is mostly used for the fitting routine, whose only
restriction on the data class is that it can load_file(), and get_data()
storing the results in self.xdata, self.ydata, self.eydata.
It has no parameters because the fit function doesn't need to know.
Uses self.xscript, self.yscript, and self.eyscript.
"""
self.xdata = self.execute_script(self.xscript)
self.ydata = self.execute_script(self.yscript)
if self.eyscript: self.eydata = self.execute_script(self.eyscript)
else: self.eydata = None
self.xlabel = self.xscript
self.ylabel = self.yscript
def plot(self, xscript=0, yscript=1, eyscript=None, clear=True, autoformat=True, axes="gca", coarsen=0, yshift=0, linestyle='auto', marker='auto', lscript=None, title=None, **kwargs):
"""
KEYWORDS (can set as arguments or kwargs):
xscript, yscript, eyscript These are the scripts to generate the three columns of data
axes="gca" Which set of axes to use. "gca" means use the current axes.
clear=True Clear the axes first?
autoformat=True Format the axes/labels/legend/title when done plotting?
coarsen=0 Should we coarsen the data?
yshift=0 How much vertical artificial offset should we add?
linestyle="auto" What type of line should we plot?
"auto" means lines for data with no error and symbols
for data with error (using spinmob style object).
"style" means always use lines from spinmob style cycle
marker="auto" What type of markers should we use?
"auto" means markers only for data with error, using spinmob style
"style" means definitely use markers from spinmob style
otherwise just specify a marker
lscript=None None means use self.legend_string (usually file name), otherwise,
this runs the script and takes the str() of the result
title=None None means automatically come up with a title.
kwargs
"""
# update with the user-supplied/default values with kwargs
plot_kwargs = {}
for key in kwargs:
try: eval(key + "=" + kwargs[key])
except: plot_kwargs[key] = kwargs[key]
# if we're doing a no-script plot
if xscript==None or yscript==None:
if self.ydata == None or len(self.ydata) <= 0:
print "No data to plot! Generate data with xscript and yscript."
return False
xdata = self.xdata
ydata = self.ydata
eydata= self.eydata
# if we're doing a scripted plot
else:
# use the expected error column if we're supposed to
if eyscript == "auto": eyscript = yscript+"_error"
# if eydata doesn't exist and we haven't specified no error
# try to generate the column
if not eyscript in self.columns.keys() \
and not eyscript==None \
and not type(eyscript) in [int,long]:
if self.debug: print eyscript, "is not a column"
eydata = self.execute_script(eyscript)
[xpression, xvars] = self._parse_script(xscript)
if xvars == None: return
[ypression, yvars] = self._parse_script(yscript)
if yvars == None: return
if not eyscript == None:
[spression, svars] = self._parse_script(eyscript)
if svars == None: eydata = None
# try to evaluate the data
self.xdata = eval(xpression, xvars)
self.ydata = eval(ypression, yvars)
if eyscript == None: self.eydata = None
else: self.eydata = eval(spression, svars)
xdata = self.xdata
ydata = self.ydata
eydata = self.eydata
self.xlabel = xscript
self.ylabel = yscript
if title==None:
self.title = self.assemble_title()
else:
self.title = title
# trump the x and y labels
if plot_kwargs.has_key('xlabel'): self.xlabel = plot_kwargs.pop('xlabel')
if plot_kwargs.has_key('ylabel'): self.ylabel = plot_kwargs.pop('ylabel')
# coarsen the data if we're supposed to
if coarsen: [xdata, ydata, eydata]=_fun.coarsen_data(xdata, ydata, eydata, coarsen)
# assumes we've gotten data already
if axes=="gca": axes = _pylab.gca()
if clear: axes.clear()
# modify the legend string
if lscript == None: label = self.legend_string
else: label = str(self.execute_script(lscript))
if yshift: label = label + " ("+str(yshift)+")"
# Now figure out the list of arguments and plotting function
# default line and marker values
mec = None
mfc = None
line_color = None
# no eydata.
if eydata == None:
# if we're to use the style object to get the line attributes
if linestyle in ['auto', 'style']:
# get the linestyle from the style cycle
linestyle = _pt.style.get_linestyle(1)
line_color = _pt.style.get_line_color(1)
# only make markers without eydata if we're not in auto mode
if marker in ['auto']:
marker = ''
# if we're forcing the use of style
elif marker in ['style']:
# get the marker attributes from the style cycle
marker = _pt.style.get_marker(1)
mfc = _pt.style.get_face_color(1)
mec = _pt.style.get_edge_color(1)
# otherwise, marker is already defined. Hopefully **plot_kwargs will override these values
# handle to plotting function
plotter = axes.plot
# we have error bars
else:
# if we're in auto mode, NO LINES!
if linestyle in ['auto']:
linestyle = ''
line_color = 'k'
# if we're forcing the style object
elif linestyle in ['style']:
linestyle = _pt.style.get_linestyle(1)
line_color = _pt.style.get_line_color(1)
# otherwise it's specified. Default to blue and let **plot_kwargs override
# similarly for markers
if marker in ['auto', 'style']:
# get the marker attributes from the style cycle
marker = _pt.style.get_marker(1)
mfc = _pt.style.get_face_color(1)
mec = _pt.style.get_edge_color(1)
# otherwise it's specified
# handle to plotter and error argument
plotter = axes.errorbar
plot_kwargs['yerr'] = eydata
plot_kwargs['ecolor'] = mec
# only add these new arguments to plot_kwargs if they don't already exist
# we want to be able to supercede the style cycle
if not plot_kwargs.has_key('color') \
and not line_color == None: plot_kwargs['color'] = line_color
if not plot_kwargs.has_key('linestyle') \
and not plot_kwargs.has_key('ls'): plot_kwargs['linestyle'] = linestyle
if not plot_kwargs.has_key('marker'): plot_kwargs['marker'] = marker
if not plot_kwargs.has_key('mec') \
and not plot_kwargs.has_key('markeredgecolor') \
and not mec==None: plot_kwargs['mec'] = mec
if not plot_kwargs.has_key('mfc') \
and not plot_kwargs.has_key('markerfacecolor') \
and not mfc==None: plot_kwargs['mfc'] = mfc
if not plot_kwargs.has_key('markeredgewidth') \
and not plot_kwargs.has_key('mew'): plot_kwargs['mew'] = 1.0
# actually do the plotting
plotter(xdata, ydata + yshift, label=label, **plot_kwargs)
axes.set_xlabel(self.xlabel)
axes.set_ylabel(self.ylabel)
axes.set_title(self.title)
if autoformat: _pt.format_figure()
return axes |
streitho/spinmob | _plotting.py | import _pylab_tweaks as tweaks; reload(tweaks)
import _plotting_mess; reload(_plotting_mess)
databoxes = _plotting_mess.databoxes_xy
files = _plotting_mess.files_xy
data = _plotting_mess.xy
data_magphase = _plotting_mess.mag_phase
data_realimag = _plotting_mess.real_imag
surface_data = _plotting_mess.xyz
function = _plotting_mess.function_1D
function_parametric = _plotting_mess.function_parametric
surface_function = _plotting_mess.function_2D
style = _plotting_mess.plot_style_cycle |
streitho/spinmob | _spline.py | import scipy as _scipy
import pylab as _pylab
from scipy import interpolate as _interpolate
import spinmob as _s
import pylab_helper_standalones as _pylab_help
import time as _time
import spinmob_functions as _fun
import numpy as _numpy
class spline_single:
# this class is a container for a single spline fit curve
# initialize it with the results from a spline fit (that array thingy)
# simply store the fit result on creation
def __init__(self, xdata, ydata, smoothing=5000, degree=5, presmoothing=0, plot=True, xlabel="x", ylabel="y", show_derivative=0, xmin="same", xmax="same", simple=0):
"""
Create a spline object and fit xdata vs ydata with spline
Set type="interpolate" to simply interpolate the raw data
"""
# store this stuff for later use
self.xlabel = xlabel
self.ylabel = ylabel
self.xdata = xdata
self.ydata = ydata
self.simple = simple
self.xmin = min(xdata)
self.xmax = max(xdata)
self._path = "(spline not saved)"
self.smoothing = smoothing
self.degree = degree
self.presmoothing = presmoothing
self.plot_flag = plot
self.message = ""
# if we're not in simple mode, we have to do a spline fit, not just store the data
if not simple:
# self.ydata might be smoothed
self.ydata_smoothed = list(ydata)
# if we're supposed to, presmooth the data
if presmoothing: _fun.smooth_array(self.ydata_smoothed, presmoothing)
print "presmoothing = ", str(presmoothing)
print "smoothing = ", str(smoothing)
print "degree = ", str(degree)
# do the fit
self.pfit = _interpolate.splrep(xdata, self.ydata_smoothed, s=smoothing, k=degree)
# now plot if we're supposed to
if plot:
fig = _pylab.gcf()
fig.clear()
axes = fig.gca()
axes.plot(xdata,ydata, "xr")
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
if not simple:
axes.plot(xdata,self.ydata_smoothed,"+g")
self.plot(steps=len(xdata)*5, clear=False)
_pylab_help.set_all_line_attributes("lw", 2)
if show_derivative: self.plot(steps=len(xdata)*5, clear=False, derivative=show_derivative, yaxis='right')
_pylab_help.set_all_line_attributes("lw", 1)
_pylab_help.set_all_line_attributes("color", "r")
_pylab_help.set_xrange(xmin, xmax)
fig.canvas.Refresh()
# this takes a single value or an array!
def __call__(self, x, derivative=0, smooth=0, simple='auto'):
return self.evaluate(x, derivative, smooth, simple)
def evaluate(self, x, derivative=0, smooth=0, simple='auto'):
"""
smooth=0 is how much to smooth the spline data
simple='auto' is whether we should just use straight interpolation
you may want smooth > 0 for this, when derivative=1
"""
if simple=='auto': simple = self.simple
# make it into an array if it isn't one, and remember that we did
is_array = True
if not type(x) == type(_pylab.array([])):
x = _pylab.array([x])
is_array = False
if simple:
# loop over all supplied x data, and come up with a y for each
y = []
for n in range(0, len(x)):
# get a window of data around x
if smooth:
[xtemp, ytemp, etemp] = _fun.trim_data(self.xdata, self.ydata, None, [x[n]-smooth, x[n]+smooth])
else:
i1 = _fun.index_nearest(x[n], self.xdata)
# if the nearest data point is lower than x, use the next point to interpolate
if self.xdata[i1] <= x[n] or i1 <= 0: i2 = i1+1
else: i2 = i1-1
# if we're at the max, extrapolate
if i2 >= len(self.xdata):
print x[n], "is out of range. extrapolating"
i2 = i1-1
x1 = self.xdata[i1]
y1 = self.ydata[i1]
x2 = self.xdata[i2]
y2 = self.ydata[i2]
slope = (y2-y1)/(x2-x1)
xtemp = _numpy.array([x[n]])
ytemp = _numpy.array([y1 + (x[n]-x1)*slope])
# calculate the slope based on xtemp and ytemp (if smoothing)
# or just use the raw slope if smoothing=0
if derivative == 1:
if smooth:
y.append((_numpy.average(xtemp*ytemp)-_numpy.average(xtemp)*_numpy.average(ytemp)) /
(_numpy.average(xtemp*xtemp)-_numpy.average(xtemp)**2))
else:
y.append(slope)
# otherwise just average (even with one element)
elif derivative==0:
y.append(_numpy.average(ytemp))
if is_array: return _numpy.array(y)
else: return y[0]
if smooth:
y = []
for n in range(0, len(x)):
# take 20 data points from x+/-smooth
xlow = max(self.xmin,x[n]-smooth)
xhi = min(self.xmax,x[n]+smooth)
xdata = _pylab.linspace(xlow, xhi, 20)
ydata = _interpolate.splev(xdata, self.pfit, derivative)
y.append(_numpy.average(ydata))
if is_array: return _numpy.array(y)
else: return y[0]
else:
return _interpolate.splev(x, self.pfit, derivative)
def plot(self, derivative=0, xmin="auto", xmax="auto", steps=500, smooth=0, simple='auto', clear=True, yaxis='left'):
if simple=='auto': simple = self.simple
# get the min and max
if xmin=="auto": xmin = self.xmin
if xmax=="auto": xmax = self.xmax
# get and clear the figure and axes
f = _pylab.gcf()
if clear and yaxis=='left': f.clf()
# setup the right-hand axis
if yaxis=='right': a = _pylab.twinx()
else: a = _pylab.gca()
# define a new simple function to plot, then plot it
def f(x): return self.evaluate(x, derivative, smooth, simple)
_pylab_help.plot_function(f, xmin, xmax, steps, clear, axes=a)
# label it
th = "th"
if derivative == 1: th = "st"
if derivative == 2: th = "nd"
if derivative == 3: th = "rd"
if derivative: self.ylabel = str(derivative)+th+" derivative of "+self.ylabel+" spline"
a.set_xlabel(self.xlabel)
a.set_ylabel(self.ylabel)
a.figure.canvas.Refresh()
class spline_array:
# this class holds an array of spline curves spline(x) at different values of some
# "y" parameter, and will provide linear interpolation between them with evaluate()
# evaluate_uber() is used when you generate y_splines. I wouldn't use this yet.
#
def __init__(self, max_y_splines=100, simple=0):
# create this class, then add spline curves to it
# the splines are stored in a dictionary with the key
# as the parameter and the value is the spline_single
self.x_splines = {} # supplied by you
self.y_splines = [{},{},{},{},{}] # generated by this class, index is x-derivative
self.max_y_splines = max_y_splines # this sets the minimum x_parameter spacing of the y-splines
self.xmin = None # set the minimum and maximum values over which this is valid
self.xmax = None
self.ymin = None
self.ymax = None
self.xlabel = None
self.ylabel = None
self.zlabel = None
self._path = "(spline array not saved)"
self.simple=simple
def add_x_spline(self, y_parameter, x_spline):
# this function adds a spline to the dictionary
self.x_splines[y_parameter] = x_spline
# define or update range of validity
if self.xmin == None: self.xmin = x_spline.xmin
if self.xmax == None: self.xmax = x_spline.xmax
if self.ymax == None: self.ymax = y_parameter
if self.ymin == None: self.ymin = y_parameter
if x_spline.xmax > self.xmax: self.xmax = x_spline.xmax
if x_spline.xmin < self.xmin: self.xmin = x_spline.xmin
if y_parameter > self.ymax: self.ymax = y_parameter
if y_parameter < self.ymin: self.ymin = y_parameter
# add in order to the master list of y_values
self.y_values = self.x_splines.keys()
self.y_values.sort()
def remove_x_spline(self, y_parameter):
# this function removes a spline from the dictionary
try: self.x_splines.pop(y_parameter)
except: print "Spline with parameter "+str(y_parameter)+" doesn't exist! Stop being a damn FOOL."
def __call__(self, x, y, x_derivative=0, smooth=0, simple='auto'):
return self.evaluate(x, y, x_derivative, smooth, simple)
def evaluate(self, x, y, x_derivative=0, smooth=0, simple='auto'):
"""
this evaluates the 2-d spline by doing linear interpolation of the curves
"""
if simple=='auto': simple = self.simple
# find which values y is in between
for n in range(0, len(self.y_values)-1):
# if it's in between, interpolate!
if self.y_values[n] <= y and self.y_values[n+1] >= y:
y1 = self.y_values[n]
y2 = self.y_values[n+1]
z1 = self.x_splines[y1].evaluate(x, x_derivative, smooth, simple)
z2 = self.x_splines[y2].evaluate(x, x_derivative, smooth, simple)
return z1 + (y-y1)*(z2-z1)/(y2-y1)
print "YARG! The y value "+str(y)+" is out of interpolation range!"
if y >= self.y_values[-1]: return self.x_splines[self.y_values[-1]].evaluate(x, x_derivative, smooth, simple)
else : return self.x_splines[self.y_values[0]].evaluate(x, x_derivative, smooth, simple)
def plot_fixed_x(self, x_values, x_derivative=0, steps=1000, smooth=0, simple='auto', ymin="auto", ymax="auto", format=True, clear=1):
"""
plots the data at fixed x-value, so z vs x
"""
if simple=='auto': simple=self.simple
# get the min and max
if ymin=="auto": ymin = self.ymin
if ymax=="auto": ymax = self.ymax
if clear: _pylab.gca().clear()
if not type(x_values) in [type([]), type(_pylab.array([]))]: x_values = [x_values]
for x in x_values:
# define a new simple function to plot, then plot it
def f(y): return self.evaluate(x, y, x_derivative, smooth, simple)
_pylab_help.plot_function(f, ymin, ymax, steps, 0, False)
# label it
a = _pylab.gca()
a.set_xlabel(self.ylabel)
if x_derivative: a.set_ylabel(str(x_derivative)+" "+str(self.xlabel)+" derivative of "+self.zlabel)
else: a.set_ylabel(self.zlabel)
a.set_title(self._path+"\nSpline array plot at fixed x = "+self.xlabel)
a.get_lines()[-1].set_label("x ("+self.xlabel+") = "+str(x))
if format: _s.format_figure()
return a
def plot_range_fixed_x(self, xmin="auto", xmax="auto", xsteps=21, ymin="auto", ymax="auto", ysteps=200, clear=True, x_derivative=0):
if xmin=="auto": xmin=self.xmin
if xmax=="auto": xmax=self.xmax
self.plot_fixed_x(_pylab.linspace(xmin, xmax, xsteps), x_derivative, ysteps, ymin, ymax, False, clear)
_s.format_figure()
def plot_fixed_y(self, y_values, x_derivative=0, steps=1000, smooth=0, simple='auto', xmin="auto", xmax="auto", format=True, clear=1):
"""
plots the data at a fixed y-value, so z vs y
"""
if simple=='auto': simple=self.simple
# get the min and max
if xmin=="auto": xmin = self.xmin
if xmax=="auto": xmax = self.xmax
if clear: _pylab.gca().clear()
if not type(y_values) in [type([]), type(_pylab.array([]))]: y_values = [y_values]
for y in y_values:
# define a new simple function to plot, then plot it
def f(x): return self.evaluate(x, y, x_derivative, smooth, simple)
_pylab_help.plot_function(f, xmin, xmax, steps, 0, True)
# label it
a = _pylab.gca()
th = "th"
if x_derivative == 1: th = "st"
if x_derivative == 2: th = "nd"
if x_derivative == 3: th = "rd"
if x_derivative: a.set_ylabel(str(x_derivative)+th+" "+self.xlabel+" derivative of "+self.zlabel+" spline")
else: a.set_ylabel(self.zlabel)
a.set_xlabel(self.xlabel)
a.set_title(self._path+"\nSpline array plot at fixed y "+self.ylabel)
a.get_lines()[-1].set_label("y ("+self.ylabel+") = "+str(y))
if format: _s.format_figure()
return a
def plot_range_fixed_y(self, ymin="auto", ymax="auto", ysteps=21, xmin="auto", xmax="auto", xsteps=200, clear=True, x_derivative=0):
if ymin=="auto": ymin=self.ymin
if ymax=="auto": ymax=self.ymax
self.plot_fixed_y(_pylab.linspace(ymin, ymax, ysteps), x_derivative, xsteps, xmin, xmax, False, clear)
_s.format_figure()
def generate_y_values(self):
self.y_values = self.x_splines.keys()
self.y_values.sort()
def copy_spline_array(a):
"""
This returns an instance of a new spline_array with all the fixins, and the data from a.
"""
b = spline_array()
b.x_splines = a.x_splines
b.y_splines = a.y_splines
b.max_y_splines = a.max_y_splines
b.xmin = a.xmin
b.xmax = a.xmax
b.ymin = a.ymin
b.ymax = a.ymax
b.xlabel = a.xlabel
b.ylabel = a.ylabel
b.zlabel = a.zlabel
b.simple = a.simple
b.generate_y_values()
return b
def load_spline_array(path="ask", text="Give me a spline array to load, jerkface! No, YOU'RE the jerkface."):
a = _s.load_object(path, text)
b = copy_spline_array(a)
b._path = a._path
_s.save_object(b, b._path)
return b
def generate_spline_array(data, y_parameter="field", smoothing=5000, degree=5, presmoothing=0, coarsen=0, autosave=True, show_derivative=1, text="Give me some files to make a spline with.", simple=0):
"""
Asks for a bunch of data files, plots and spline-fits the data (verifying at each step),
and generates a spline_2d object instance filled with the results.
"""
# create an instance of the spline_2d class
s = spline_array()
s.ylabel = y_parameter
# have the user select a file
paths = _s.DialogMultipleFiles('DIS AND DAT|*.dat', text=text, default_directory=data.directory)
if paths == []: return
# loop over each data file, fit it, plot it, ask if it's okay, and move on
for n in range(0,len(paths)):
print "spline "+str(n+1)+"/"+str(len(paths)+1)
# fill up the xdata, ydata, and key
data.get_data(paths[n])
print "y_parameter = "+str(data.constants[y_parameter])
# if this is just a simple interpolator, make a simple one
if simple:
x_spline = spline_single(data.xdata, data.ydata, plot=True, xlabel=data.xlabel, ylabel=data.ylabel, xmin="same", xmax="same", simple=1)
a = raw_input("ya: ")
if a in ["quit", "q"]: return s
if a in ["y", "yes", "\n"]: s.add_x_spline(data.constants[y_parameter], x_spline)
# otherwise, we have to do the whole spline plot/fitting thingy
else:
data.plot(coarsen=coarsen)
x_spline = splot("gca", smoothing, degree, presmoothing, interactive=True, show_derivative=show_derivative, simple=simple)
if x_spline.message == "quit": return s
if x_spline.message == "good": s.add_x_spline(data.constants[y_parameter], x_spline)
s.xlabel = x_spline.xlabel
s.zlabel = x_spline.ylabel
# update the parameters as specified by the user
presmoothing = x_spline.presmoothing
smoothing = x_spline.smoothing
degree = x_spline.degree
print "Complete! "+str(len(paths))+" file and "+str(len(s.x_splines))+" successful splines."
s.simple = simple
if autosave: _s.save_object(s)
return s
def splinteractive(xdata, ydata, smoothing=5000, degree=5, presmoothing=0, spline_class=spline_single, xlabel="x", ylabel="y", show_derivative=1, boost_factor=1.1, xmin="same", xmax="same"):
"""
Give this function x and y data to spline fit and it will fit it, returning an instance of
spline_single. It will also ask you if it's okay and you can play with parameters.
"""
while True:
# fit it
x_spline = spline_class(xdata, ydata, smoothing, degree, presmoothing, True, xlabel, ylabel, show_derivative, xmin=xmin, xmax=xmax)
# ask if it's okay
if x_spline.message == "": command = raw_input("What now? ")
else: command = x_spline.message
try:
float(command)
command = "s="+command
except:
print "parsing..."
# deal with simple commands
if command in ["y", "Y", "yes"]:
x_spline.message = "good"
smoothing = x_spline.smoothing
return x_spline
elif command in ["n", "N", "no"]:
x_spline.message = "no"
smoothing = x_spline.smoothing
return x_spline
elif command in ["q", "Q", "quit", "exit"]:
x_spline.message = "quit"
smoothing = x_spline.smoothing
return x_spline
elif command in ["l", "L", "line"]:
x_spline.message = "line"
smoothing = x_spline.smoothing
x_spline.plot()
raw_input("(press <enter> when done looking)")
elif command in ["d", "D", "derivative", "\n"]:
x_spline.message = "derivative"
smoothing = x_spline.smoothing
x_spline.plot(derivative=1)
raw_input("(press <enter> when done looking)")
elif command in ["dd", "DD"]:
x_spline.message = "derivative 2"
x_spline.plot(derivative=2)
smoothing = x_spline.smoothing
raw_input("(press <enter> when done looking)")
elif command in ["p", "P", "print", "printer"]:
x_spline.message = "print"
smoothing = x_spline.smoothing
_s.printer()
elif command == "[":
x_spline.message = "unboost smoothing"
smoothing = smoothing / boost_factor
elif command == "]":
x_spline.message = "boost smoothing"
smoothing = smoothing * boost_factor
# deal with parameter changes
elif command.split("=")[0].strip() in ["s", "smoothing"]:
try: smoothing = eval(command.split("=")[1].strip())
except: print "Surely you can give me a better number than THAT piece of shit."
elif command.split("=")[0].strip() in ["d", "degree"]:
try: degree = int(eval(command.split("=")[1].strip()))
except: print "Nice try, ass. Learn how to enter data."
elif command.split("=")[0].strip() in ["p", "pre", "presmoothing"]:
try: presmoothing = int(eval(command.split("=")[1].strip()))
except: print "Nice work. Yeah, not really."
elif command.split("=")[0].strip() in ["b", "boost"]:
try: boost_factor = float(eval(command.split("=")[1].strip()))
except: print "Nice try smelly fart."
# idiot at the controls
else:
print "You need help.\n"
print "------- COMMAND EXAMPLES -------\n"
print "y Yes, it looks good, move on."
print "n No, and there's no hope. Ignore and move on."
print "q Quit."
print "s=1000 Set the smoothing to 10"
print "d=3 Set the degree to 3"
print "p=5 Set the presmoothing to 5"
print "l Show me the spline without data"
print "d Show me the derivative"
print "] Boost the smoothing"
print "[ Reduce the smoothing"
print "b=1.2 Set the boost factor to 1.2"
smoothing = x_spline.smoothing
def splot(axes="gca", smoothing=5000, degree=5, presmoothing=0, plot=True, spline_class=spline_single, interactive=True, show_derivative=1):
"""
gets the data from the plot and feeds it into splint
returns an instance of spline_single
axes="gca" which axes to get the data from.
smoothing=5000 spline_single smoothing parameter
presmoothing=0 spline_single data presmoothing factor (nearest neighbor)
plot=True should we plot the result?
spline_class=spline_single which data class to use?
interactive=False should we spline fit interactively or just make a spline_single?
"""
if axes=="gca": axes = _pylab.gca()
xlabel = axes.xaxis.label.get_text()
ylabel = axes.yaxis.label.get_text()
xdata = axes.get_lines()[0].get_xdata()
ydata = axes.get_lines()[0].get_ydata()
if interactive:
return splinteractive(xdata, ydata, smoothing, degree, presmoothing, spline_class, xlabel, ylabel)
else:
return spline_class(xdata, ydata, smoothing, degree, presmoothing, plot, xlabel, ylabel)
|
streitho/spinmob | _plot_realimag.py | import _plotting_mess; reload(_plotting_mess)
data = _plotting_mess.realimag_data
databoxes = _plotting_mess.realimag_databoxes
files = _plotting_mess.realimag_files
function = _plotting_mess.realimag_function
|
streitho/spinmob | _plot_image.py | import _plotting_mess
data = _plotting_mess.image_data
function = _plotting_mess.image_function
|
streitho/spinmob | _pylab_helper_frame.py | <filename>_pylab_helper_frame.py<gh_stars>0
#Boa:Frame:FrameMain
import time
import wx
from wx.lib.anchors import LayoutAnchors
import matplotlib
from matplotlib import pylab
from pylab_helper_standalones import *
def create(parent):
return FrameMain(parent)
[wxID_FRAMEMAIN, wxID_FRAMEMAINBUTTONAPPLY, wxID_FRAMEMAINBUTTONAPPLYALL,
wxID_FRAMEMAINBUTTONAUTOSCALE, wxID_FRAMEMAINBUTTONTIDY,
wxID_FRAMEMAINTEXTCTRLVALUE, wxID_FRAMEMAINTREECTRLMAIN,
] = [wx.NewId() for _init_ctrls in range(7)]
class FrameMain(wx.Frame):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_FRAMEMAIN, name='FrameMain',
parent=prnt, pos=wx.Point(639, 0), size=wx.Size(385, 424),
style=wx.DEFAULT_FRAME_STYLE, title='Fancy Pantsy')
self.SetClientSize(wx.Size(377, 390))
self.SetBackgroundStyle(wx.BG_STYLE_COLOUR)
self.SetBackgroundColour(wx.Colour(222, 222, 240))
self.TreeCtrlMain = wx.TreeCtrl(id=wxID_FRAMEMAINTREECTRLMAIN,
name='TreeCtrlMain', parent=self, pos=wx.Point(8, 8),
size=wx.Size(360, 296), style=wx.TR_SINGLE | wx.TR_HAS_BUTTONS)
self.TreeCtrlMain.SetSpacing(8)
self.TreeCtrlMain.Bind(wx.EVT_TREE_SEL_CHANGED,
self.OnTreeCtrlMainSelChanged, id=wxID_FRAMEMAINTREECTRLMAIN)
self.TextCtrlValue = wx.TextCtrl(id=wxID_FRAMEMAINTEXTCTRLVALUE,
name='TextCtrlValue', parent=self, pos=wx.Point(8, 308),
size=wx.Size(260, 48), style=wx.TE_LINEWRAP | wx.TE_MULTILINE,
value='')
self.TextCtrlValue.Bind(wx.EVT_TEXT_ENTER, self.OnButtonApplyButton,
id=wxID_FRAMEMAINTEXTCTRLVALUE)
self.ButtonApply = wx.Button(id=wxID_FRAMEMAINBUTTONAPPLY,
label='Apply', name='ButtonApply', parent=self, pos=wx.Point(272,
308), size=wx.Size(96, 23), style=0)
self.ButtonApply.Bind(wx.EVT_BUTTON, self.OnButtonApplyButton,
id=wxID_FRAMEMAINBUTTONAPPLY)
self.ButtonTidy = wx.Button(id=wxID_FRAMEMAINBUTTONTIDY,
label='\xdcbertidy', name='ButtonTidy', parent=self,
pos=wx.Point(104, 360), size=wx.Size(96, 23), style=0)
self.ButtonTidy.Bind(wx.EVT_BUTTON, self.OnButtonTidyButton,
id=wxID_FRAMEMAINBUTTONTIDY)
self.ButtonApplyAll = wx.Button(id=wxID_FRAMEMAINBUTTONAPPLYALL,
label='Apply All', name='ButtonApplyAll', parent=self,
pos=wx.Point(272, 332), size=wx.Size(96, 23), style=0)
self.ButtonApplyAll.Enable(False)
self.ButtonApplyAll.Bind(wx.EVT_BUTTON, self.OnButtonApplyAllButton,
id=wxID_FRAMEMAINBUTTONAPPLYALL)
self.ButtonAutoscale = wx.Button(id=wxID_FRAMEMAINBUTTONAUTOSCALE,
label='Autoscale', name='ButtonAutoscale', parent=self,
pos=wx.Point(8, 360), size=wx.Size(96, 23), style=0)
self.ButtonAutoscale.SetToolTipString('Autoscale')
self.ButtonAutoscale.Bind(wx.EVT_BUTTON, self.OnButtonAutoscaleButton,
id=wxID_FRAMEMAINBUTTONAUTOSCALE)
def __init__(self, parent):
self._init_ctrls(parent)
# T is used a lot
T = self.TreeCtrlMain
# here is where we make the root of the tree
self.tree_root = T.AddRoot("Current Session")
# here is where we define the line attributes
self.line_attributes = ["linestyle", "linewidth", "color", "marker", \
"markersize", "markerfacecolor", \
"markeredgewidth", "markeredgecolor"]
# now fill the tree
self.FillTree()
T.SelectItem(self.tree_root)
########################
# Standalone functions #
########################
# This is the MEAT function.
# given the current wx session, this function
# finds all the axes/lines/attributes and fills up
# the main tree.
def FillTree(self):
# For ease of coding
T = self.TreeCtrlMain
# starting from the top, grab ALL the wx windows available
w = wx.GetTopLevelWindows()
# find all the windows that are plot windows for wxagg
self.plot_windows = []
for x in w:
if type(x) == matplotlib.backends.backend_wxagg.FigureFrameWxAgg:
self.plot_windows.append(x)
# get all the figures associated with these windows
self.figures = []
for x in self.plot_windows:
self.figures.append(x.canvas.figure)
# all items in the tree must have data associated with it
# we use a dictionary of important values for ease of coding
# children, parents and selfs are all tree ID's
T.SetItemData(self.tree_root,
wx.TreeItemData({"string":"Current Session",
"type":"tree_root_title",
"self":self.tree_root,
"parent":None,
"children":[],
"window":None,
"figure":None,
"axes":None,
"line":None}))
# fill the tree items!
# kill all the root's children
T.DeleteChildren(self.tree_root)
# now loop over the figures and add a branch for each
for nf in range(0,len(self.figures)):
f = self.figures[nf]
w = self.plot_windows[nf]
# add the tree item corresponding to the figure
cf_title = self.plot_windows[nf].GetTitle()
cf = T.AppendItem(self.tree_root, cf_title)
# we need to append this to the children array in the root
self.AppendTreeItemData(self.tree_root, "children", cf)
# we also need to set the tree item data for this item
T.SetItemData(cf,
wx.TreeItemData({"string":cf_title,
"type":"figure_title",
"self":cf,
"parent":self.tree_root,
"children":[],
"window":w,
"figure":f,
"axes":None,
"line":None}))
# now loop over the axes
for na in range(0,len(f.axes)):
a = f.axes[na]
# add the axes tree item to the figure item
ca_title = "axes "+str(na)
ca = T.AppendItem(cf, ca_title)
# now append this id to the children array of the figure
self.AppendTreeItemData(cf, "children", ca)
# we also need to set the tree item data for this item
T.SetItemData(ca,
wx.TreeItemData({"string":ca_title,
"type":"axes_tree_label",
"self":ca,
"parent":cf,
"children":[],
"window":w,
"figure":f,
"axes":a,
"line":None}))
# add the "axes title" item to the tree and axes children list
p = T.AppendItem(ca, "Axes Title")
self.AppendTreeItemData(ca, "children", p)
T.SetItemData(p,
wx.TreeItemData({"string":a.title.get_text(),
"type":"axes_title",
"self":p,
"parent":ca,
"children":[],
"window":w,
"figure":f,
"axes":a,
"line":None}))
# add the "y-scale" item to the tree and axes children list
p = T.AppendItem(ca, "y-scaling = 1.0")
self.AppendTreeItemData(ca, "children", p)
T.SetItemData(p,
wx.TreeItemData({"type":"y_scale",
"string":"1.0",
"last_value":1.0,
"self":p,
"parent":ca,
"children":[],
"window":w,
"figure":f,
"axes":a,
"line":None}))
# now loop over the lines
lines = a.get_lines()
for nl in range(0,len(lines)):
l = lines[nl]
# add the axes tree item to the figure item
cl_title = "line "+str(nl)
cl = T.AppendItem(ca, cl_title)
# now append this id to the children array of the figure
self.AppendTreeItemData(ca, "children", cl)
# we also need to set the tree item data for this item
T.SetItemData(cl,
wx.TreeItemData({"string":cl_title,
"type":"line_title",
"self":cl,
"parent":ca,
"children":[],
"window":w,
"figure":f,
"axes":a,
"line":l}))
# now we set the individual line attributes
for x in self.line_attributes:
# make the tree branch
y = T.AppendItem(cl, x+": '"+str(pylab.getp(l,x))+"'")
self.AppendTreeItemData(cl, "children", y)
T.SetItemData(y,
wx.TreeItemData({"string": str(pylab.getp(l,x)),
"type":x,
"self":y,
"parent":cl,
"children":[],
"window":w,
"figure":f,
"axes":a,
"line":l}))
# now expand the bitch
T.Expand(self.tree_root)
def GetTreeItemData(self, id, key):
return(self.TreeCtrlMain.GetItemData(id).GetData()[key])
def SetTreeItemData(self, id, key, new_value):
T = self.TreeCtrlMain
data = T.GetItemData(id).GetData()
data[key] = new_value
T.SetItemData(id, wx.TreeItemData(data))
def AppendTreeItemData(self, id, key, new_value):
# store in a temp variable
T = self.TreeCtrlMain
# get the data from the tree item id
data = T.GetItemData(id).GetData()
# set the key/value pair
data[key].append(new_value)
# update the tree item data
T.SetItemData(id, wx.TreeItemData(data))
return
def SetYScale(self, id, new_string):
T = self.TreeCtrlMain
# get the axes associated with this, and the previous value
a = self.GetTreeItemData(id, "axes")
old_scale = self.GetTreeItemData(id, "last_value")
try: scale = float(new_string)
except: return
# get all the lines associated with the axes
lines = a.get_lines()
# for each line, scale the ydata
for l in lines:
y = l.get_ydata()
for n in range(0,len(y)):
y[n] = y[n]*scale/old_scale
l.set_ydata(y)
# update the tree
T.SetItemText(id, "y-scaling = "+new_string)
# update the old value
self.SetTreeItemData(id, "last_value", scale)
# now autoscale to the data + 3% margins
self.OnButtonAutoscaleButton(None)
def SetAttribute(self, id, new_string):
T = self.TreeCtrlMain
# get the attribute type from the tree id data
type = self.GetTreeItemData(id, "type")
line = self.GetTreeItemData(id, "line")
axes = self.GetTreeItemData(id, "axes")
figure = self.GetTreeItemData(id, "figure")
window = self.GetTreeItemData(id, "window")
# if it's just a title, all we update is the tree (not the plot)
if type == "tree_root_title": T.SetItemText(id, new_string)
elif type == "figure_title":
T.SetItemText(id, new_string)
window.SetTitle(new_string)
elif type == "axes_tree_label":
T.SetItemText(id, new_string)
elif type == "axes_title":
axes.title.set_text(new_string)
elif type == "y_scale":
self.SetYScale(id, new_string)
elif type == "line_title":
T.SetItemText(id, new_string)
# if it's a line attribute
elif type in self.line_attributes and not line == None:
# update the title string
formatted = type+": '"+new_string+"'"
T.SetItemText(id, formatted)
# also edit the attribute
if is_a_number(new_string):
pylab.setp(line, type, float(new_string))
else:
pylab.setp(line, type, new_string)
figure.canvas.Refresh()
def FindChildType(self, selection, type):
# this looks for a child with of specified type
# in the specified selection
children = self.GetTreeItemData(selection, "children")
# loop over the children
for c in children:
if self.GetTreeItemData(c,"type") == type:
return c
return None
##########
# Events #
##########
def OnButtonTidyButton(self, event):
# for easy coding
T = self.TreeCtrlMain
s = T.GetSelection()
f = self.GetTreeItemData(s, "figure")
w = self.GetTreeItemData(s, "window")
# set the current figure
pylab.figure(f.number)
# first set the size of the window
w.SetSize([500,500])
# now loop over all the data and get the range
lines = f.axes[0].get_lines()
# we want thick lines
f.axes[0].get_frame().set_linewidth(3.0)
# get the tick lines in one big list
xticklines = f.axes[0].get_xticklines()
yticklines = f.axes[0].get_yticklines()
# set their marker edge width
pylab.setp(xticklines+yticklines, mew=2.0)
# set what kind of tickline they are (outside axes)
for l in xticklines: l.set_marker(matplotlib.lines.TICKDOWN)
for l in yticklines: l.set_marker(matplotlib.lines.TICKLEFT)
# get rid of the top and right ticks
f.axes[0].xaxis.tick_bottom()
f.axes[0].yaxis.tick_left()
# we want bold fonts
pylab.xticks(fontsize=20, fontweight='bold', fontname='Arial')
pylab.yticks(fontsize=20, fontweight='bold', fontname='Arial')
# we want to give the labels some breathing room (1% of the data range)
for label in pylab.xticks()[1]:
label.set_y(-0.02)
for label in pylab.yticks()[1]:
label.set_x(-0.01)
# set the position/size of the axis in the window
f.axes[0].set_position([0.1,0.1,0.8,0.8])
# set the axis labels
f.axes[0].set_title('')
f.axes[0].set_xlabel('')
f.axes[0].set_ylabel('')
# set the position of the legend far away
f.axes[0].legend(loc=[1.2,0])
f.canvas.Refresh()
# autoscale
self.OnButtonAutoscaleButton(None)
def OnTreeCtrlMainSelChanged(self, event):
T = self.TreeCtrlMain
# get the current selection
# all branches should have a my_value variable (string)
s = T.GetSelection()
# set the value in the value box to that of this tree branch
self.TextCtrlValue.SetValue(self.GetTreeItemData(s,"string"))
# if the type is a line attribute
if self.GetTreeItemData(s,"type") in self.line_attributes:
self.ButtonApplyAll.Enable()
else:
self.ButtonApplyAll.Disable()
if self.GetTreeItemData(s,"axes") == None:
self.ButtonAutoscale.Disable()
else:
self.ButtonAutoscale.Enable()
# highlight the text in the box
self.TextCtrlValue.SetFocus()
def OnButtonApplyButton(self, event):
T = self.TreeCtrlMain
s = T.GetSelection()
# get the value from the text box
new_string = self.TextCtrlValue.GetValue()
# set the attribute (updates the plot too)
self.SetAttribute(s, new_string)
def OnButtonApplyAllButton(self, event):
# get the current selection
s = self.TreeCtrlMain.GetSelection()
f = self.GetTreeItemData(s, "figure")
# get the parent axes
p = self.GetTreeItemData(s, "parent")
p = self.GetTreeItemData(p, "parent")
# get all the children
children = self.GetTreeItemData(p, "children")
# get all the children that have an associated line
line_children = []
for child in children:
if self.GetTreeItemData(child, "type") == "line_title":
line_children.append(child)
# get the value from the text box
new_string = self.TextCtrlValue.GetValue()
type = self.GetTreeItemData(s, "type")
# loop over the line children
for child in line_children:
# find the child that has the same type
s = self.FindChildType(child, type)
# set the attribute
self.SetAttribute(s, new_string)
f.canvas.Refresh()
def OnButtonAutoscaleButton(self, event):
# Autoscales the data on the currently selected axes
# for ease of coding:
T = self.TreeCtrlMain
s = T.GetSelection()
f = self.GetTreeItemData(s, "figure")
a = self.GetTreeItemData(s, "axes")
# get all the lines
lines = a.get_lines()
xdata = []
ydata = []
# get all the data into one giant array
for n in range(0,len(lines)):
x = lines[n].get_xdata()
y = lines[n].get_ydata()
for m in range(0,len(x)):
xdata.append(x[m])
ydata.append(y[m])
xmin = min(xdata)
xmax = max(xdata)
ymin = min(ydata)
ymax = max(ydata)
# we want a 3% white space boundary surrounding the data in our plot
# so set the range accordingly
a.set_xlim(xmin-0.03*(xmax-xmin), xmax+0.03*(xmax-xmin))
a.set_ylim(ymin-0.03*(ymax-ymin), ymax+0.03*(ymax-ymin))
f.canvas.Refresh()
|
streitho/spinmob | _plotting_mess.py | <gh_stars>0
import os as _os
import pylab as _pylab
import numpy as _numpy
import itertools as _itertools
import time as _time
import _functions as _fun
import _pylab_tweaks as _pt
import spinmob as _s
# for the user to get at
tweaks = _pt
_n = _numpy
# expose all the eval statements to all the functions in numpy
from numpy import *
#
# General plotting routines
#
def complex_data(data, edata=None, **kwargs):
"""
Plots the X and Y of complex data.
data complex data
edata complex error
kwargs are sent to spinmob.plot.xy.data()
"""
# generate the data the easy way
try:
rdata = _n.real(data)
idata = _n.imag(data)
if edata==None:
erdata = None
eidata = None
else:
erdata = _n.real(edata)
eidata = _n.imag(edata)
# generate the data the hard way.
except:
rdata = []
idata = []
if edata==None:
erdata = None
eidata = None
else:
erdata = []
eidata = []
for n in range(len(data)):
rdata.append(_n.real(data[n]))
idata.append(_n.imag(data[n]))
if not edata == None:
erdata.append(_n.real(edata[n]))
eidata.append(_n.imag(edata[n]))
if not kwargs.has_key('xlabel'): kwargs['xlabel'] = 'Real'
if not kwargs.has_key('ylabel'): kwargs['ylabel'] = 'Imaginary'
return xy_data(rdata, idata, eidata, erdata, **kwargs)
def complex_databoxes(ds, script='c(1)+1j*c(2)', escript=None, **kwargs):
"""
Use script to generate data and send to harrisgroup.plot.complex_data()
ds list of databoxes
script complex script
escript complex script for error bars
**kwargs are sent to spinmob.plot.complex.data()
"""
datas = []
labels = []
if escript==None: errors = None
else: errors = []
for d in ds:
datas.append(d(script))
labels.append(_os.path.split(d.path)[-1])
if not escript==None: errors.append(d(escript))
return complex_data(datas, errors, label=labels, **kwargs)
def complex_files(script='c(1)+1j*c(2)', **kwargs):
"""
Loads and plots complex data in the real-imaginary plane.
**kwargs are sent to spinmob.plot.complex.databoxes()
"""
ds = _s.data.load_multiple()
if len(ds) == 0: return
if not kwargs.has_key('title'): kwargs['title'] = _os.path.split(ds[0].path)[0]
return complex_databoxes(ds, script=script, **kwargs)
def complex_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots the function over the specified range
f complex-valued function or list of functions to plot;
can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()!
erange Use exponential spacing of the x data?
**kwargs are sent to spinmob.plot.xy.data()
"""
kwargs2 = dict(xlabel='Real', ylabel='Imaginary')
kwargs2.update(kwargs)
return function(f, xmin, xmax, steps, p, g, erange, plotter=xy_data, complex_plane=True, **kwargs2)
def magphase_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', mscale='linear', pscale='linear', mlabel='Magnitude', plabel='Phase', phase='degrees', figure='gcf', clear=1, **kwargs):
"""
Plots the magnitude and phase of complex ydata.
xdata real-valued x-axis data
ydata complex-valued y-axis data
eydata=None complex-valued y-error
exdata=None real-valued x-error
xscale='linear' 'log' or 'linear'
mscale='linear' 'log' or 'linear' (only applies to the magnitude graph)
pscale='linear' 'log' or 'linear' (only applies to the phase graph)
mlabel='Magnitude' y-axis label for magnitude plot
plabel='Phase' y-axis label for phase plot
phase='degrees' 'degrees' or 'radians'
figure='gcf' figure instance
clear=1 clear the figure?
kwargs are sent to plot.xy.data()
"""
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
m = _n.abs(ydata)
p = _n.angle(ydata)
if phase=='degrees':
for n in range(len(ydata)):
p[n] = p[n]*180.0/_n.pi
# do the elliptical error transformation
em = []
ep = []
er = []
ei = []
if eydata==None:
em = None
ep = None
er = None
ei = None
else:
if eydata[n] == None:
em.append(None)
ep.append(None)
else:
er = _n.real(eydata[n])
ei = _n.imag(eydata[n])
em.append(0.5*((er+ei) + (er-ei)*_n.cos(p[n])) )
ep.append(0.5*((er+ei) - (er-ei)*_n.cos(p[n]))/m[n] )
# convert to degrees
if phase=='degrees':
if not ep[n]==None: ep[n] = ep[n]*180.0/_n.pi
if phase=='degrees': plabel = plabel + " (degrees)"
else: plabel = plabel + " (radians)"
if kwargs.has_key('xlabel'): xlabel=kwargs.pop('xlabel')
else: xlabel=''
if kwargs.has_key('ylabel'): kwargs.pop('ylabel')
if not kwargs.has_key('draw'): kwargs['draw'] = False
if not kwargs.has_key('tall'): kwargs['tall'] = False
if not kwargs.has_key('autoformat'): kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, m, em, exdata, ylabel=mlabel, axes=axes1, clear=0, xscale=xscale, yscale=mscale, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, p, ep, exdata, ylabel=plabel, axes=axes2, clear=0, xscale=xscale, yscale=pscale, **kwargs)
axes2.set_title('')
_pt.auto_zoom(axes=axes1)
_pylab.draw()
def magphase_databoxes(ds, xscript=0, yscript='c(1)+1j*c(2)', eyscript=None, exscript=None, **kwargs):
"""
Use script to generate data and plot it.
ds list of databoxes
xscript script for x data
yscript script for y data
eyscript script for y error
exscript script for x error
**kwargs are sent to spinmob.plot.mag_phase.data()
"""
return databoxes(ds, xscript, yscript, eyscript, exscript, plotter=magphase_data, **kwargs)
def magphase_files(xscript=0, yscript='c(1)+1j*c(2)', eyscript=None, exscript=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
**kwargs are sent to spinmob.plot.mag_phase.databoxes()
"""
return files(xscript, yscript, eyscript, exscript, plotter=magphase_databoxes, **kwargs)
def magphase_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots the function over the specified range
f function or list of functions to plot; can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()!
erange Use exponential spacing of the x data?
**kwargs are sent to plot.mag_phase.data()
"""
return function(f, xmin, xmax, steps, p, g, erange, plotter=magphase_data, **kwargs)
def realimag_data(xdata, ydata, eydata=None, exdata=None, xscale='linear', rscale='linear', iscale='linear', rlabel='Real', ilabel='Imaginary', figure='gcf', clear=1, **kwargs):
"""
Plots the magnitude and phase of complex ydata.
xdata real-valued x-data
ydata complex-valued y-data
eydata complex-valued error on y-data
exdata real-valued error on x-data
xscale='linear' 'log' or 'linear'
rscale='linear' 'log' or 'linear' for the real yscale
iscale='linear' 'log' or 'linear' for the imaginary yscale
rlabel='Real' y-axis label for magnitude plot
ilabel='Imaginary' y-axis label for phase plot
figure='gcf' figure instance
clear=1 clear the figure?
kwargs are sent to plot.xy.data()
"""
if figure == 'gcf': f = _pylab.gcf()
if clear: f.clear()
axes1 = _pylab.subplot(211)
axes2 = _pylab.subplot(212,sharex=axes1)
rdata = _n.real(ydata)
idata = _n.imag(ydata)
# HACK!!
if eydata==None or eydata[0]==None:
erdata = None
eidata = None
else:
erdata = _n.real(eydata)
eidata = _n.imag(eydata)
if kwargs.has_key('xlabel') : xlabel=kwargs.pop('xlabel')
else: xlabel=''
if kwargs.has_key('ylabel') : kwargs.pop('ylabel')
if not kwargs.has_key('draw'): kwargs['draw'] = False
if not kwargs.has_key('tall'): kwargs['tall'] = False
if not kwargs.has_key('autoformat'): kwargs['autoformat'] = True
autoformat = kwargs['autoformat']
kwargs['autoformat'] = False
kwargs['xlabel'] = ''
xy_data(xdata, rdata, eydata=erdata, exdata=exdata, ylabel=rlabel, axes=axes1, clear=0, xscale=xscale, yscale=rscale, **kwargs)
kwargs['autoformat'] = autoformat
kwargs['xlabel'] = xlabel
xy_data(xdata, idata, eydata=eidata, exdata=exdata, ylabel=ilabel, axes=axes2, clear=0, xscale=xscale, yscale=iscale, **kwargs)
axes2.set_title('')
_pylab.draw()
def realimag_databoxes(ds, xscript=0, yscript='c(1)+1j*c(2)', eyscript=None, exscript=None, **kwargs):
"""
Use script to generate data and plot it.
ds list of databoxes
xscript script for x data
yscript script for y data
eyscript script for y error
exscript script for x error
**kwargs are sent to spinmob.plot.real_imag.data()
"""
return databoxes(ds, xscript, yscript, eyscript, exscript, plotter=realimag_data, **kwargs)
def realimag_files(xscript=0, yscript='c(1)+1j*c(2)', eyscript=None, exscript=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
**kwargs are sent to spinmob.plot.real_imag.databoxes()
"""
return files(xscript, yscript, eyscript, exscript, plotter=realimag_databoxes, **kwargs)
def realimag_function(f='1.0/(1+1j*x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots the function over the specified range
f function or list of functions to plot; can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()!
erange Use exponential spacing of the x data?
**kwargs are sent to spinmob.plot.real_imag.data()
"""
return function(f, xmin, xmax, steps, p, g, erange, plotter=realimag_data, **kwargs)
def xy_data(xdata, ydata, eydata=None, exdata=None, label=None, xlabel='', ylabel='', \
title='', pyshell_history=1, xshift=0, yshift=0, xshift_every=1, yshift_every=1, \
coarsen=0, style=None, clear=True, axes=None, xscale='linear', yscale='linear', grid=False, \
legend='best', autoformat=True, tall=False, draw=True, **kwargs):
"""
Plots specified data.
xdata, ydata Arrays (or arrays of arrays) of data to plot
eydata, exdata Arrays of x and y errorbar values
label string or array of strings for the line labels
xlabel='' label for the x-axis
ylabel='' label for the y-axis
title='' title for the axes; set to None to have nothing.
pyshell_history=1 how many commands from the pyshell history to include
with the title
xshift=0, yshift=0 progressive shifts on the data, to make waterfall plots
xshift_every=1 perform the progressive shift every 1 or n'th line.
yshift_every=1 perform the progressive shift every 1 or n'th line.
style style cycle object.
clear=True if no axes are specified, clear the figure, otherwise
clear just the axes.
axes=None which axes to use, or "gca" for the current axes
xscale,yscale 'linear' by default. Set either to 'log' for log axes
grid=False Should we draw a grid on the axes?
legend='best' where to place the legend (see pylab.legend())
Set this to None to ignore the legend.
autoformat=True Should we format the figure for printing?
False Should the format be tall?
draw=True whether or not to draw the plot after plotting
**kwargs are sent to pylab.errorbar()
"""
# make sure everything is at least iterable.
if not _fun.is_iterable(xdata): xdata = [xdata]
if not _fun.is_iterable(exdata): exdata = [exdata]
if not _fun.is_iterable(ydata): ydata = [ydata]
if not _fun.is_iterable(eydata): eydata = [eydata]
# make sure at least xdata and ydata are 2-D
if _fun.is_a_number(xdata[0]): xdata = [xdata]
if _fun.is_a_number(ydata[0]): ydata = [ydata]
# make sure the number of data sets agrees
N = max(len(xdata),len(ydata))
for n in range(N-len( xdata)): xdata.append( xdata[0])
for n in range(N-len( ydata)): ydata.append( ydata[0])
for n in range(N-len(exdata)): exdata.append(exdata[0])
for n in range(N-len(eydata)): eydata.append(eydata[0])
# loop over each x and y data set, making sure None's are all converted
# to counting arrays
for n in range(N):
# clean up the [None]'s
if _fun.is_iterable(xdata[n]) and xdata[n][0] == None: xdata[n] = None
if _fun.is_iterable(ydata[n]) and ydata[n][0] == None: ydata[n] = None
if xdata[n] == None and ydata[n] == None:
print "ERROR: "+str(n)+"'th data set is (None, None)."
return
if xdata[n] == None: xdata[n] = _n.arange(len(ydata[n]))
if ydata[n] == None: ydata[n] = _n.arange(len(xdata[n]))
# check that the labels is a list of strings of the same length
if not _fun.is_iterable(label): label = [label]
if len(label) < len(ydata):
for n in range(len(ydata)-1): label.append(label[0])
# clear the figure?
if clear and not axes: _pylab.gcf().clear()
# setup axes
if axes=="gca" or axes==None: axes = _pylab.gca()
# if we're clearing the axes
if clear: axes.clear()
# set the current axes
_pylab.axes(axes)
# now loop over the list of data in xdata and ydata
for n in range(0,len(xdata)):
# get the label
if label: l = str(label[n])
else: l = str(n)
# calculate the x an y progressive shifts
dx = xshift*(n/xshift_every)
dy = yshift*(n/yshift_every)
# if we're supposed to coarsen the data, do so.
x = _s.fun.coarsen_array(xdata[n], coarsen)
y = _s.fun.coarsen_array(ydata[n], coarsen)
ey = _s.fun.coarsen_array(eydata[n], coarsen)
ex = _s.fun.coarsen_array(exdata[n], coarsen)
# update the style
if not style==None: kwargs.update(style.next())
axes.errorbar(x+dx, y+dy, label=l, yerr=ey, xerr=ex, **kwargs)
_pylab.xscale(xscale)
_pylab.yscale(yscale)
if legend: axes.legend(loc=legend)
axes.set_xlabel(xlabel)
axes.set_ylabel(ylabel)
# for some arguments there should be no title.
if title in [None, False, 0]:
axes.set_title('')
# add the commands to the title
else:
title = str(title)
if pyshell_history:
title = 'Plot created ' + _time.asctime() + '\n' + title
for n in range(pyshell_history):
if not title == '': title = _pt.get_pyshell_command(n) + "\n" + title
else: title = _pt.get_pyshell_command(n)
axes.set_title(title)
if grid: _pylab.grid(True)
if autoformat: _pt.format_figure(tall=tall, draw=False)
# update the canvas
if draw: _pylab.draw()
return axes
def xy_databoxes(ds, xscript=0, yscript=1, eyscript=None, exscript=None, **kwargs):
"""
Use script to generate data and plot it.
ds list of databoxes
xscript script for x data (xscript = None for counting script)
yscript script for y data (yscript = None for counting script)
eyscript script for y error
exscript script for x error
**kwargs are sent to spinmob.plot.xy.data()
"""
return databoxes(ds, xscript, yscript, eyscript, exscript, plotter=xy_data, **kwargs)
def xy_files(xscript=0, yscript=1, eyscript=None, exscript=None, **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
**kwargs are sent to spinmob.plot.xy.databoxes()
"""
return files(xscript, yscript, eyscript, exscript, plotter=xy_databoxes, **kwargs)
def xy_function(f='sin(x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, **kwargs):
"""
Plots the function over the specified range
f function or list of functions to plot; can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()!
erange Use exponential spacing of the x data?
**kwargs are sent to spinmob.plot.xy.data()
"""
return function(f, xmin, xmax, steps, p, g, erange, plotter=xy_data, **kwargs)
def databoxes(ds, xscript=0, yscript=1, eyscript=None, exscript=None, plotter=xy_data, **kwargs):
"""
Use script to generate data and send to harrisgroup.plot.complex_data()
ds list of databoxes
xscript script for x data
yscript script for y data
eyscript script for y error
exscript script for x error
plotter function used to do the plotting
**kwargs are sent to plotter()
"""
if not _fun.is_iterable(ds): ds = [ds]
if not kwargs.has_key('xlabel'): kwargs['xlabel'] = str(xscript)
if not kwargs.has_key('ylabel'): kwargs['ylabel'] = str(yscript)
# First make sure everything is a list of scripts (or None's)
if not _fun.is_iterable(xscript): xscript = [xscript]
if not _fun.is_iterable(yscript): yscript = [yscript]
if not _fun.is_iterable(exscript): exscript = [exscript]
if not _fun.is_iterable(eyscript): eyscript = [eyscript]
# make sure exscript matches shape with xscript (and the same for y)
if len(exscript) < len(xscript):
for n in range(len(xscript)-1): exscript.append(exscript[0])
if len(eyscript) < len(yscript):
for n in range(len(yscript)-1): eyscript.append(eyscript[0])
# Make xscript and exscript match in shape with yscript and eyscript
if len(xscript) < len(yscript):
for n in range(len(yscript)-1):
xscript.append(xscript[0])
exscript.append(exscript[0])
# check for the reverse possibility
if len(yscript) < len(xscript):
for n in range(len(xscript)-1):
yscript.append(yscript[0])
eyscript.append(eyscript[0])
# now check for None's (counting scripts)
for n in range(len(xscript)):
if xscript[n] == None and yscript[n] == None:
print "Two None scripts? But why?"
return
if xscript[n] == None:
if type(yscript[n])==str: xscript[n] = 'range(len('+yscript[n]+'))'
else: xscript[n] = 'range(len(c('+str(yscript[n])+')))'
if yscript[n] == None:
if type(xscript[n])==str: yscript[n] = 'range(len('+xscript[n]+'))'
else: yscript[n] = 'range(len(c('+str(xscript[n])+')))'
xdatas = []
ydatas = []
exdatas = []
eydatas = []
labels = []
for d in ds:
xdata = d(xscript)
for n in range(len(xdata)):
xdatas.append(xdata[n])
if len(xdata)>1: labels.append(str(n)+": "+_os.path.split(d.path)[-1])
else: labels.append(_os.path.split(d.path)[-1])
for y in d(yscript): ydatas.append(y)
for x in d(exscript): exdatas.append(x)
for y in d(eyscript): eydatas.append(y)
# allow for custom labels
if kwargs.has_key('label'): labels = kwargs.pop('label')
return plotter(xdatas, ydatas, eydatas, exdatas, label=labels, **kwargs)
def files(xscript=0, yscript=1, eyscript=None, exscript=None, plotter=xy_databoxes, paths='ask', **kwargs):
"""
This will load a bunch of data files, generate data based on the supplied
scripts, and then plot this data using the specified databox plotter.
xscript, yscript, eyscript, exscript scripts to generate x, y, and errors
**kwargs are sent to plotter()
"""
if kwargs.has_key('delimiter'): delimiter = kwargs.pop('delimiter')
else: delimiter = None
ds = _s.data.load_multiple(paths=paths, delimiter = delimiter)
if ds==None or len(ds) == 0: return
# generate a default title (the directory)
if not kwargs.has_key('title'): kwargs['title']=_os.path.split(ds[0].path)[0]
# run the databox plotter
return plotter(ds, xscript=xscript, yscript=yscript, eyscript=eyscript, exscript=exscript, **kwargs)
def function(f='sin(x)', xmin=-1, xmax=1, steps=200, p='x', g=None, erange=False, plotter=xy_data, complex_plane=False, **kwargs):
"""
Plots the function over the specified range
f function or list of functions to plot; can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()
erange Use exponential spacing of the x data?
plotter function used to plot the generated data
complex_plane plot imag versus real of f?
**kwargs are sent to spinmob.plot.real_imag.data()
"""
if not g: g = {}
# do the opposite kind of update()
for k in globals().keys():
if not g.has_key(k): g[k] = globals()[k]
# if the x-axis is a log scale, use erange
if erange: x = _fun.erange(xmin, xmax, steps)
else: x = _numpy.linspace(xmin, xmax, steps)
# make sure it's a list so we can loop over it
if not type(f) in [type([]), type(())]: f = [f]
# loop over the list of functions
xdatas = []
ydatas = []
labels = []
for fs in f:
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
# try directly evaluating
try: y = a(x)
# do it the slow way.
except:
y = []
for z in x: y.append(a(z))
xdatas.append(x)
ydatas.append(y)
labels.append(a.__name__)
if not kwargs.has_key('xlabel'): kwargs['xlabel'] = p
if not kwargs.has_key('label'): kwargs['label'] = labels
# plot!
if complex_plane: return plotter(real(ydatas),imag(ydatas), **kwargs)
else: return plotter(xdatas, ydatas, **kwargs)
def image_data(Z, X=[0,1.0], Y=[0,1.0], aspect=1.0, zmin=None, zmax=None, clear=1, **kwargs):
"""
Generates an image or 3d plot
X 1-d array of x-values
Y 1-d array of y-values
Z 2-d array of z-values
X and Y can be something like [0,2] or an array of X-values
"""
fig = _pylab.gcf()
if clear:
fig.clear()
_pylab.axes()
# generate the 3d axes
X = _numpy.array(X)
Y = _numpy.array(Y)
Z = _numpy.array(Z)
# assume X and Y are the bin centers and figure out the bin widths
x_width = abs(float(X[-1] - X[0])/(len(Z[0])-1))
y_width = abs(float(Y[-1] - Y[0])/(len(Z)-1))
# reverse the Z's
Z = Z[-1::-1]
# get rid of the label and title kwargs
xlabel=''
ylabel=''
title =''
if kwargs.has_key('xlabel'): xlabel = kwargs.pop('xlabel')
if kwargs.has_key('ylabel'): ylabel = kwargs.pop('ylabel')
if kwargs.has_key('title'): title = kwargs.pop('title')
_pylab.imshow(Z, extent=[X[0]-x_width/2.0, X[-1]+x_width/2.0,
Y[0]-y_width/2.0, Y[-1]+y_width/2.0], **kwargs)
_pylab.colorbar()
_pt.image_set_clim(zmin,zmax)
_pt.image_set_aspect(aspect)
a = _pylab.gca()
a.set_title(title)
a.set_xlabel(xlabel)
a.set_ylabel(ylabel)
_pt.close_sliders()
_pt.image_sliders()
_pt.raise_figure_window()
_pt.raise_pyshell()
_pylab.draw()
return _pylab.gca()
def image_function(f='sin(x)*cos(y)', xmin=-1, xmax=1, ymin=-1, ymax=1, xsteps=100, ysteps=100, p="x,y", g=None, **kwargs):
"""
Plots a 2-d function over the specified range
f takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin,xmax,ymin,ymax range over which to generate/plot the data
xsteps,ysteps how many points to plot on the specified range
p if using strings for functions, this is a string of parameters.
g Optional additional globals. Try g=globals()!
"""
# aggregate globals
if not g: g = {}
for k in globals().keys():
if not g.has_key(k): g[k] = globals()[k]
if type(f) == str:
f = eval('lambda ' + p + ': ' + f, g)
# generate the grid x and y coordinates
xones = _numpy.linspace(1,1,ysteps)
x = _numpy.linspace(xmin, xmax, xsteps)
xgrid = _numpy.outer(xones, x)
yones = _numpy.linspace(1,1,xsteps)
y = _numpy.linspace(ymin, ymax, ysteps)
ygrid = _numpy.outer(y, yones)
# now get the z-grid
try:
# try it the fast numpy way. Add 0 to assure dimensions
zgrid = f(xgrid, ygrid) + xgrid*0.0
except:
print "Notice: function is not rocking hardcore. Generating grid the slow way..."
# manually loop over the data to generate the z-grid
zgrid = []
for ny in range(0, len(y)):
zgrid.append([])
for nx in range(0, len(x)):
zgrid[ny].append(f(x[nx], y[ny]))
zgrid = _numpy.array(zgrid)
# now plot!
return image_data(zgrid, x, y, **kwargs)
def parametric_function(fx='sin(t)', fy='cos(t)', tmin=-1, tmax=1, steps=200, p='t', g=None, erange=False, **kwargs):
"""
Plots the parametric function over the specified range
fx, fy function or list of functions to plot; can be string functions
xmin, xmax, steps range over which to plot, and how many points to plot
p if using strings for functions, p is the parameter name
g optional dictionary of extra globals. Try g=globals()!
erange Use exponential spacing of the t data?
**kwargs are sent to spinmob.plot.xy.data()
"""
if not g: g = {}
for k in globals().keys():
if not g.has_key(k): g[k] = globals()[k]
# if the x-axis is a log scale, use erange
if erange: r = _fun.erange(tmin, tmax, steps)
else: r = _numpy.linspace(tmin, tmax, steps)
# make sure it's a list so we can loop over it
if not type(fy) in [type([]), type(())]: fy = [fy]
if not type(fx) in [type([]), type(())]: fx = [fx]
# loop over the list of functions
xdatas = []
ydatas = []
labels = []
for fs in fx:
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
x = []
for z in r: x.append(a(z))
xdatas.append(x)
labels.append(a.__name__)
for n in range(len(fy)):
fs = fy[n]
if type(fs) == str:
a = eval('lambda ' + p + ': ' + fs, g)
a.__name__ = fs
else:
a = fs
y = []
for z in r: y.append(a(z))
ydatas.append(y)
labels[n] = labels[n]+', '+a.__name__
# plot!
return xy_data(xdatas, ydatas, label=labels, **kwargs)
class plot_style_cycle(dict):
iterators = {}
def __init__(self, **kwargs):
"""
Supply keyword arguments that would be sent to pylab.plot(), except
as a list so there is some order to follow. For example:
style = plot_style_cycle(color=['k','r','b'], marker='o')
"""
self.iterators = {}
# make sure everything is iterable
for key in kwargs:
if not getattr(kwargs[key],'__iter__',False): kwargs[key] = [kwargs[key]]
# The base class is a dictionary, so update our own elements!
self.update(kwargs)
# create the auxiliary iterator dictionary
self.reset()
def next(self):
"""
Returns the next dictionary of styles to send to plot as kwargs.
For example:
pylab.plot([1,2,3],[1,2,1], **style.next())
"""
s = {}
for key in self.iterators.keys():
s[key] = self.iterators[key].next()
return s
def reset(self):
"""
Resets the style cycle.
"""
for key in self.keys(): self.iterators[key] = _itertools.cycle(self[key])
return self
|
streitho/spinmob | __init__.py | #######################################################################
# Set up the wx and matplotlib environment for use with pyshell/pycrust
#######################################################################
import wx as _wx
# setup matplotlib and wx so it works well with pyshell/pycrust
import matplotlib as _mpl
if not _mpl.get_backend() == 'WXAgg': _mpl.use('wxAgg') # set the backend (must do this first)
_mpl.rcParams['figure.facecolor']='w'
import pylab
pylab.ion() # turn on interactive mode
import scipy
import numpy
# now get the global application
_app = _wx.GetApp()
if _app == None: _app = _wx.PySimpleApp()
#############################
# Spinmob stuff
#############################
# create the user preferences object (sets up prefs directory and stuff)
import _prefs
prefs = _prefs.Prefs()
import _dialogs as dialogs; reload(dialogs); dialogs._prefs = prefs
import _pylab_colorslider ;_pylab_colorslider._prefs = prefs
import _plot as plot ;plot._prefs = prefs
import _functions as fun ;fun._prefs = prefs
import _models as models ;models._prefs = prefs
import _fitting as fit ;fit._prefs = prefs
import _data as data ;data._prefs = prefs
data._data_types._prefs = prefs
# pull some of the common functions to the top
import scipy.constants
printer = fun.printer
constants = scipy.constants
xscale = plot.tweaks.xscale
yscale = plot.tweaks.yscale
# now do the big reload chain.
|
streitho/spinmob | _plot_parametric.py | import _plotting_mess
function = _plotting_mess.parametric_function |
streitho/spinmob | _data.py | <reponame>streitho/spinmob
import wx as _wx
import os as _os
import _functions as _fun ;reload(_fun)
import _pylab_tweaks as _pt ;reload(_pt)
import _dialogs ;reload(_dialogs)
import _data_types ;reload(_data_types)
# make the standard data class visible, ja.
standard = _data_types.standard
databox = standard
def load(path="ask", first_data_line="auto", filters="*.*", text="Select a file, FACEHEAD.", default_directory="default_directory", quiet=False, header_only=False, **kwargs):
"""
Loads a data file into the standard data class. Returns the data object.
**kwargs are sent to databox(), so check there for more information (i.e.
about delimiters)
"""
d = standard(**kwargs)
d.load_file(path=path, first_data_line=first_data_line,
filters=filters, text=text, default_directory=default_directory,
header_only=header_only)
if not quiet:
print "loaded", d.path
_wx.Yield()
return d
def load_multiple(paths="ask", first_data_line="auto", filters="*.*", text="Select some files, FACEHEAD.", default_directory="default_directory", **kwargs):
"""
Loads a list of data files into a list of standard data objects.
Returns said list.
**kwargs are sent to databox()
"""
if paths=="ask": paths = _dialogs.MultipleFiles(filters, text, default_directory)
if paths==None: return
datas = []
for path in paths:
if _os.path.isfile(path): datas.append(load(path, first_data_line, **kwargs))
return datas
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.