repo_name stringlengths 6 97 | path stringlengths 3 341 | text stringlengths 8 1.02M |
|---|---|---|
spohnan/geowave | python/src/main/python/pygw/test/data_store_test.py | <gh_stars>0
#
# Copyright (c) 2013-2019 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
import pytest
import os
from pygw.store import DataStoreFactory
from pygw.store.rocksdb import RocksDBOptions
from pygw.index import SpatialIndexBuilder
from pygw.query import VectorQueryBuilder
from .conftest import TEST_DIR
from .conftest import POINT_TYPE_ADAPTER
from .conftest import TEST_DATA
from .conftest import write_test_data
from .conftest import results_as_list
# Test Additions #
def test_add_type(test_ds):
# given
index = SpatialIndexBuilder().create_index()
adapter = POINT_TYPE_ADAPTER
# when
test_ds.add_type(adapter, index)
indices = test_ds.get_indices()
types = test_ds.get_types()
# then
assert len(indices) == 1
assert indices[0].get_name() == index.get_name()
assert indices[0].get_index_strategy() == index.get_index_strategy()
assert indices[0].get_index_model() == index.get_index_model()
assert len(types) == 1
assert types[0].get_type_name() == adapter.get_type_name()
def test_add_existing_type(test_ds):
# given
index = SpatialIndexBuilder().create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
# when
test_ds.add_type(adapter, index)
indices = test_ds.get_indices(adapter.get_type_name())
# then
assert len(indices) == 1
assert indices[0].get_name() == index.get_name()
assert indices[0].get_index_strategy() == index.get_index_strategy()
assert indices[0].get_index_model() == index.get_index_model()
# Test Removing #
def test_remove_index(test_ds):
# given
index = SpatialIndexBuilder().set_name_override("idx1").create_index()
index2 = SpatialIndexBuilder().set_name_override("idx2").create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
test_ds.add_type(adapter, index2)
# when
test_ds.remove_index(adapter.get_type_name(), index.get_name())
indices = test_ds.get_indices()
# then
assert len(indices) == 1
assert indices[0].get_name() == index2.get_name()
assert indices[0].get_index_strategy() == index2.get_index_strategy()
assert indices[0].get_index_model() == index2.get_index_model()
def test_remove_index_last(test_ds):
with pytest.raises(Exception) as exec:
# given
index = SpatialIndexBuilder().create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
# when
test_ds.remove_index(index.get_name())
# then
assert 'Adapters require at least one index' in str(exec.value)
def test_remove_index_non_exist(test_ds):
# given
index = SpatialIndexBuilder().create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
# when
test_ds.remove_index("Corgi")
# then
assert len(test_ds.get_indices()) == 1
def test_remove_type(test_ds):
# given
index = SpatialIndexBuilder().create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
write_test_data(test_ds, index)
# when
test_ds.remove_type(adapter.get_type_name())
query = VectorQueryBuilder().build()
res = results_as_list(test_ds.query(query))
# then
assert len(test_ds.get_indices(adapter.get_type_name())) == 0
assert len(test_ds.get_indices()) == 1
assert len(res) == 0
# Test Deleting #
def test_delete(test_ds):
# given
index = SpatialIndexBuilder().set_name_override("idx1").create_index()
index2 = SpatialIndexBuilder().set_name_override("idx2").create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
test_ds.add_type(adapter, index2)
write_test_data(test_ds, index, index2)
# when
qbldr = VectorQueryBuilder()
constraints_factory = qbldr.constraints_factory()
# filter encompasses 10 features (1, 1) - (10, 10)
constraints = constraints_factory.cql_constraints("BBOX(the_geom, 0.5, 0.5, 10.5, 10.5)")
qbldr.constraints(constraints)
test_ds.delete(qbldr.build())
query = VectorQueryBuilder().build()
res = results_as_list(test_ds.query(query))
# then
assert len(test_ds.get_indices()) == 2
assert len(test_ds.get_types()) == 1
assert len(res) == (len(TEST_DATA) - 10)
# Test Delete All #
def test_delete_all(test_ds):
# given
index = SpatialIndexBuilder().set_name_override("idx1").create_index()
index2 = SpatialIndexBuilder().set_name_override("idx2").create_index()
adapter = POINT_TYPE_ADAPTER
test_ds.add_type(adapter, index)
test_ds.add_type(adapter, index2)
write_test_data(test_ds, index, index2)
# when
test_ds.delete_all()
query = VectorQueryBuilder().build()
res = results_as_list(test_ds.query(query))
# then
assert len(test_ds.get_indices()) == 0
assert len(test_ds.get_types()) == 0
assert len(res) == 0
# Test Copy #
def test_copy(test_ds):
# given
options = RocksDBOptions()
options.set_geowave_namespace("geowave.tests")
options.set_directory(os.path.join(TEST_DIR, "datastore2"))
ds2 = DataStoreFactory.create_data_store(options)
adapter = POINT_TYPE_ADAPTER
index = SpatialIndexBuilder().create_index()
test_ds.add_type(adapter, index)
write_test_data(test_ds, index)
# when
test_ds.copy_to(ds2)
indices = ds2.get_indices()
types = ds2.get_types()
query = VectorQueryBuilder().build()
res = results_as_list(ds2.query(query))
# then
assert len(test_ds.get_indices()) == 1
assert len(indices) == 1
assert indices[0].get_name() == index.get_name()
assert indices[0].get_index_strategy() == index.get_index_strategy()
assert indices[0].get_index_model() == index.get_index_model()
assert len(types) == 1
assert types[0].get_type_name() == adapter.get_type_name()
assert len(res) == len(TEST_DATA)
ds2.delete_all()
def test_copy_by_query(test_ds):
# given
options = RocksDBOptions()
options.set_geowave_namespace("geowave.tests")
options.set_directory(os.path.join(TEST_DIR, "datastore2"))
ds2 = DataStoreFactory.create_data_store(options)
adapter = POINT_TYPE_ADAPTER
index = SpatialIndexBuilder().create_index()
test_ds.add_type(adapter, index)
write_test_data(test_ds, index)
# when
qbldr = VectorQueryBuilder()
constraints_factory = qbldr.constraints_factory()
# filter encompasses 10 features (1, 1) - (10, 10)
constraints = constraints_factory.cql_constraints("BBOX(the_geom, 0.5, 0.5, 10.5, 10.5)")
qbldr.all_indices().constraints(constraints)
test_ds.copy_to(ds2, qbldr.build())
indices = ds2.get_indices()
types = ds2.get_types()
query = VectorQueryBuilder().build()
res = results_as_list(ds2.query(query))
# then
assert len(test_ds.get_indices()) == 1
assert len(indices) == 1
assert indices[0].get_name() == index.get_name()
assert indices[0].get_index_strategy() == index.get_index_strategy()
assert indices[0].get_index_model() == index.get_index_model()
assert len(types) == 1
assert types[0].get_type_name() == adapter.get_type_name()
assert len(res) == 10
ds2.delete_all()
# Test Writer #
def test_create_writer(test_ds):
# given
adapter = POINT_TYPE_ADAPTER
index = SpatialIndexBuilder().create_index()
test_ds.add_type(adapter, index)
# when
writer = test_ds.create_writer(adapter.get_type_name())
# then
assert writer is not None
def test_create_writer_null(test_ds):
# when
writer = test_ds.create_writer("Corgi")
# then
assert writer is None
def test_create_writer_null_other(test_ds):
# given
adapter = POINT_TYPE_ADAPTER
index = SpatialIndexBuilder().create_index()
test_ds.add_type(adapter, index)
test_ds.create_writer(adapter.get_type_name())
# when
writer = test_ds.create_writer("Corgi")
# then
assert writer is None
def test_write(test_ds):
# given
adapter = POINT_TYPE_ADAPTER
index = SpatialIndexBuilder().create_index()
test_ds.add_type(adapter, index)
# when
write_test_data(test_ds, index)
query = VectorQueryBuilder().build()
res = results_as_list(test_ds.query(query))
# then
assert len(res) == len(TEST_DATA)
|
spohnan/geowave | python/src/main/python/pygw/__init__.py | #
# Copyright (c) 2013-2019 Contributors to the Eclipse Foundation
#
# See the NOTICE file distributed with this work for additional information regarding copyright
# ownership. All rights reserved. This program and the accompanying materials are made available
# under the terms of the Apache License, Version 2.0 which accompanies this distribution and is
# available at http://www.apache.org/licenses/LICENSE-2.0.txt
#===============================================================================================
"""
# Submodule descriptions
In general each submodule tries to mimic the behavior of the GeoWave Java API. If there is ever any question about how something should be done with the Python bindings, the answer is most likely the same as how it is done in Java. The difference being that function names use underscores instead of camel case as is the convention in Java. For example if the Java version of a class has a function `getName()`, the Python variant would be `get_name()`.
The main difference between the two APIs is how the modules are laid out. The Python bindings use a simplified module structure to avoid bringing in all the unnecessary complexity of the Java packages that the Java variants belong to.
## config
The `config` module includes a singleton object of type GeoWaveConfiguration called `gw_config` that handles all communication between python and the Py4J Java Gateway. The module includes several shortcut objects to make accessing the gateway more convenient. These include:
- *`java_gateway`* Py4J Gateway Object
- *`java_pkg`*: Shortcut for `java_gateway.jvm`. Can be used to construct JVM objects like `java_pkg.org.geotools.feature.simple.SimpleFeatureTypeBuilder()`
- *`geowave_pkg`*: Similar to `java_pkg`, serves as a shortcut for `java_gateway.jvm.org.locationtech.geowave`.
- *`reflection_util`*: Direct access to the Py4J reflection utility.
These objects can be imported directly using `from pygw.config import <object_name>`.
NOTE: the GeoWaveConfiguration has an `init()` method. This is INTENTIONALLY not an `__init__` method. Initialization is attempted when the configuration is imported.
## base
The `base` module includes common classes that are used by other modules. This includes the base `GeoWaveObject` class that serves as a python wrapper for a java reference. It also includes a `type_conversions` submodule that can be used to convert Python types to Java types that are commonly used in GeoWave.
## geotools
The `geotools` module contains classes that wrap the functionality of geotools SimpleFeatures and SimpleFeatureTypes. These classes can be used to create feature types, features, and data adapters based on simple features.
## index
The `index` module contains classes that are used in creating spatial and spatial/temporal indices.
## query
The `query` module contains classes that are used in constructing queries and their constraints.
## store
The `store` module contains classes that can be used to establish connections to the various GeoWave backends. Each store type has a submodule which contains a class that can be used to connect to that store type. For example `from pygw.store.accumulo import AccumuloOptions`. The `DataStore` object can be constructed by passing the options object to the `DataStoreFactory.create_data_store(<options>)` method.
## debug.py
This exposes a function called `print_obj` that can be used to help with debugging raw java objects. It will print information about the object in question on both the Python side and on the Java server side. There's a `verbose` flag that will give you more information about the object in question.
"""
__version__ = "1.0.0-RC2-SNAPSHOT"
|
stevens97/SALT_RSS_Extract_SNR | Extract_SNR.py | <filename>Extract_SNR.py
'''
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
SALT RSS Aperture Extraction (With Minimum Desired Single to Noise Ratio)
Extract regions (outward from the object's centre) for a FITS file
processed by the SALT RSS pipeline.
The target FITS file is typically a 2D flux spectrum of a galaxy.
The SALT RSS Data Reduction procedure is described in:
http://mips.as.arizona.edu/~khainline/salt_redux.html
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
'''
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Import Libraries
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
import os # For bash commands
import numpy as np # For array handling
from pyraf import iraf # For IRAF commands
import astropy.io.fits as fits # For FITS file handling
from pathlib import Path # To extract filenames
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Load IRAF Libraries
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
iraf.images()
iraf.images.imutil()
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Aperture Extraction
The following methods are quite similar,
with minor differences.
extract_1kpc(): Extract 0-1 kiloparsec regions from the centre.
extract_left(): Extract regions left (-) from the centre of the object,
such that these regions have a signal-to-noise ratio (SNR) above the desired
minimum.
extract_right(): Extract regions right (+) from the centre of the object,
such that these regions have a signal-to-noise ratio (SNR) above the desired
minimum.
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
def extract_1kpc_left(path, file, centre, pix_per_kpc):
'''
:param path [String]: File path containing the galaxy's FITS file.
:param file [String]: The galaxy's FITS file.
:param centre [float]: Central pixel of the galaxy's FITS file.
:param pix_per_kpc [float]: Pixel per kiloparsec ratio for the galaxy.
:return:
'''
# Change to file path
os.chdir(path)
print('Changed to path: {}'.format(path))
# Set pixel ranges
pix_end = int(centre)
pix_start = pix_end - 1
# Set operands
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[*,{}]'.format(file, pix_end)
# Set file names
res = 'result.fits'
temp = 'temp.fits'
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Extract Apertures
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
flag = False;
while flag is False:
# Set kpc
kpc = (((pix_start + pix_end) / 2.0) - centre) / pix_per_kpc
print('pix_start = {}'.format(pix_start))
print('pix_end = {}'.format(pix_end))
iraf.images.imutil.imarith(operand1=op1, op='+', operand2=op2, result=res, verbose='No', mode='ql')
if kpc > -0.5:
iraf.images.imutil.imcopy(input=res, output=temp)
iraf.images.imutil.imdelete(images=res)
pix_start = pix_start - 1
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[0]'.format(temp)
else:
out_file = '{}_-0.5kpcL.fits'.format(file)
iraf.images.imutil.imcopy(input=res, output=out_file)
iraf.images.imutil.imdelete(images=res)
# Update Header Information
iraf.imutil.hedit(images=out_file, fields='ymin_pixel', value=pix_start)
iraf.imutil.hedit(images=out_file, fields='ymax_pixel', value=pix_end)
pix_end = pix_start - 1
pix_start = pix_end - 1
flag = True
iraf.images.imutil.imdelete(images=temp)
break
return None
def extract_1kpc_right(path, file, centre, pix_per_kpc):
'''
:param path [String]: File path containing the galaxy's FITS file.
:param file [String]: The galaxy's FITS file.
:param centre [float]: Central pixel of the galaxy's FITS file.
:param pix_per_kpc [float]: Pixel per kiloparsec ratio for the galaxy.
:return:
'''
# Change to file path
os.chdir(path)
print('Changed to path: {}'.format(path))
# Set pixel ranges
pix_start = int(centre)
pix_end = int(centre + 1)
# Set operands
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[*,{}]'.format(file, pix_end)
# Set file names
res = 'result.fits'
temp = 'temp.fits'
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Extract Apertures
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
flag = False;
while flag is False:
# Set kpc
kpc = (((pix_start + pix_end) / 2.0) - centre) / pix_per_kpc
print('pix_start = {}'.format(pix_start))
print('pix_end = {}'.format(pix_end))
iraf.images.imutil.imarith(operand1=op1, op='+', operand2=op2, result=res, verbose='No', mode='ql')
if kpc < 0.5:
iraf.images.imutil.imcopy(input = res, output = temp)
iraf.images.imutil.imdelete(images = res)
pix_end = pix_end + 1
op1 = '{}[0]'.format(temp)
op2 = '{}[*,{}]'.format(file, pix_end)
else:
out_file = '{}_0.5kpcR.fits'.format(file)
iraf.images.imutil.imcopy(input=res, output=out_file)
iraf.images.imutil.imdelete(images=res)
# Update Header Information
iraf.imutil.hedit(images=out_file, fields='ymin_pixel', value=pix_start)
iraf.imutil.hedit(images=out_file, fields='ymax_pixel', value=pix_end)
pix_start = pix_end + 1
pix_end = pix_start + 1
flag = True
iraf.images.imutil.imdelete(images=temp)
break
return None
def extract_left(path, file, desired_SNR, centre, pix_per_kpc):
'''
:param path [String]: File path containing the galaxy's FITS file.
:param file [String]: The galaxy's FITS file.
:param desired_SNR [float]: Desired *minimum* signal-to-noise ratio for aperture extraction.
:param centre [float]: Central pixel of the galaxy's FITS file.
:param pix_per_kpc [float]: Pixel per kiloparsec ratio for the galaxy.
:return:
'''
# Change to file path
os.chdir(path)
print('Changed to path: {}'.format(path))
# Set pixel ranges
pix_end = int(centre)
pix_start = pix_end - 1
# Set operands
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[*,{}]'.format(file, pix_end)
# Set file names
res = 'result.fits'
temp = 'temp.fits'
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Extract Apertures
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
flag = False;
while flag is False:
# Set kpc
kpc = (((pix_start + pix_end) / 2.0) - centre) / pix_per_kpc
kpc = np.round(kpc, 2)
if pix_end - pix_start > 50:
print('Maximum aperture size of 50 exceeded.')
try:
iraf.images.imutil.imdelete(images=temp)
except:
pass
try:
iraf.images.imutil.imdelete(images=res)
except:
pass
flag = True;
break;
print('pix_start = {}'.format(pix_start))
print('pix_end = {}'.format(pix_end))
iraf.images.imutil.imarith(operand1=op1, op='+', operand2=op2, result=res, verbose='No', mode='ql')
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Open FITS File
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
hdu = fits.open(res)
galaxy = hdu[0].data
try:
if len(galaxy[0] > 1):
galaxy = galaxy[0]
except:
pass
print('galaxy = {}'.format(galaxy))
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Calculate Signal-To-Noise Ratio
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
signal = np.median(galaxy)
n = len(galaxy)
noise = 0.6052697 * np.median(np.abs(2.0 * galaxy[2:n - 2] - galaxy[0:n - 4] - galaxy[4:n]))
SNR = signal / noise
print('Singal-To-Noise Ratio = {}'.format(SNR))
# Delete aperture if Signal-To-Noise Ratio is too low
# Then further increase aperture size
# Else, keep FITS file and adjust starting pixel
if SNR < desired_SNR:
iraf.images.imutil.imcopy(input=res, output=temp)
iraf.images.imutil.imdelete(images=res)
pix_start = pix_start - 1
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[0]'.format(temp)
else:
out_file = '{}_{}kpcL.fits'.format(file, kpc)
iraf.images.imutil.imcopy(input=res, output=out_file)
iraf.images.imutil.imdelete(images=res)
# Update Header Information
iraf.imutil.hedit(images=out_file, fields='SNR', value=SNR)
iraf.imutil.hedit(images=out_file, fields='ymin_pixel', value=pix_start)
iraf.imutil.hedit(images=out_file, fields='ymax_pixel', value=pix_end)
pix_end = pix_start - 1
pix_start = pix_end - 1
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[*,{}]'.format(file, pix_end)
return None
def extract_right(path, file, desired_SNR, centre, pix_per_kpc):
'''
:param path [String]: File path containing the galaxy's FITS file.
:param file [String]: The galaxy's FITS file.
:param desired_SNR [float]: Desired *minimum* signal-to-noise ratio for aperture extraction.
:param centre [float]: Central pixel of the galaxy's FITS file.
:param pix_per_kpc [float]: Pixel per kiloparsec ratio for the galaxy.
:return:
'''
# Change to file path
os.chdir(path)
print('Changed to path: {}'.format(path))
# Set pixel ranges
pix_start = int(centre)
pix_end = int(centre + 1)
# Set operands
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[*,{}]'.format(file, pix_end)
# Set file names
res = 'result.fits'
temp = 'temp.fits'
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Extract Apertures
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
flag = False;
while flag is False:
# Set kpc
kpc = (((pix_start + pix_end) / 2.0) - centre) / pix_per_kpc
kpc = np.round(kpc, 2)
if pix_end - pix_start > 50:
print('Maximum aperture size of 50 exceeded.')
try:
iraf.images.imutil.imdelete(images = temp)
except:
pass
try:
iraf.images.imutil.imdelete(images = res)
except:
pass
flag = True;
break;
print('pix_start = {}'.format(pix_start))
print('pix_end = {}'.format(pix_end))
iraf.images.imutil.imarith(operand1=op1, op='+', operand2=op2, result=res, verbose='No', mode='ql')
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Open FITS File
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
hdu = fits.open(res)
galaxy = hdu[0].data
try:
if len(galaxy[0] > 1):
galaxy = galaxy[0]
except:
pass
print('galaxy = {}'.format(galaxy))
'''
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
Calculate Signal-To-Noise Ratio
>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
'''
signal = np.median(galaxy)
n = len(galaxy)
noise = 0.6052697 * np.median(np.abs(2.0 * galaxy[2:n - 2] - galaxy[0:n - 4] - galaxy[4:n]))
SNR = signal / noise
print('Singal-To-Noise Ratio = {}'.format(SNR))
# Delete aperture if Signal-To-Noise Ratio is too low
# Then further increase aperture size
# Else, keep FITS file and adjust starting pixel
if SNR < desired_SNR:
iraf.images.imutil.imcopy(input = res, output = temp)
iraf.images.imutil.imdelete(images = res)
pix_end = pix_end + 1
op1 = '{}[0]'.format(temp)
op2 = '{}[*,{}]'.format(file, pix_end)
else:
out_file = '{}_{}kpcR.fits'.format(file, kpc)
iraf.images.imutil.imcopy(input=res, output=out_file)
iraf.images.imutil.imdelete(images=res)
# Update Header Information
iraf.imutil.hedit(images=out_file, fields='SNR', value=SNR)
iraf.imutil.hedit(images=out_file, fields='ymin_pixel', value=pix_start)
iraf.imutil.hedit(images=out_file, fields='ymax_pixel', value=pix_end)
pix_start = pix_end + 1
pix_end = pix_start + 1
op1 = '{}[*,{}]'.format(file, pix_start)
op2 = '{}[*,{}]'.format(file, pix_end)
return None
# Set extraction mode
mode = str(raw_input("Please enter extraction mode. Enter 'B' for Both, 'L' for Left and 'R' for Right: "))
# Set filepath of data
path = raw_input('Please enter file path of the galaxy\'s FITS file: ')
# Set filename
file = raw_input('Please enter file name of the galaxy\'s FITS file (with complete extension): ')
# Set desired signal-to-noise ratio
desired_SNR = float(input('Please enter the desired Signal-To-Noise Ratio: '))
# Set centre pixel number
centre = float(input('Please enter the center pixel of the galaxy: '))
# Set pixel per kiloparsec ratio
pix_per_kpc = float(input('Please enter the pixel per kpc value for this galaxy: '))
# Extract regions
if mode == 'B':
extract_left(path, file, desired_SNR, centre, pix_per_kpc)
extract_1kpc_left(path, file, centre, pix_per_kpc)
extract_right(path, file, desired_SNR, centre, pix_per_kpc)
extract_1kpc_right(path, file, centre, pix_per_kpc)
elif mode == 'L':
extract_left(path, file, desired_SNR, centre, pix_per_kpc)
extract_1kpc_left(path, file, centre, pix_per_kpc)
elif mode == 'R':
extract_right(path, file, desired_SNR, centre, pix_per_kpc)
extract_1kpc_right(path, file, centre, pix_per_kpc)
|
minhlong94/PPL-2021 | src/lex/__init__.py | """This is the module containing all codes needed for the lexer.
This module would take the character stream and generate a collection of tokens.
Example:
>>> from lex import Lexer
>>>
>>> with open(path_to_file, "r") as f:
>>> character_stream = f.read()
>>> lexer = Lexer(character_stream)
# Lexer has an Iterator to get tokens:
>>> for token in lexer.tokens():
>>> print(token)
"""
__all__ = [
"token_names",
"Token",
"LexerError",
"Lexer",
]
from enum import Enum as _Enum
from typing import Iterator as _Iterator
from typing import Sequence as _Sequence
try:
from lex import token_names as _token_names
except ImportError:
from src.lex import token_names as _token_names
class Token:
""" A simple Token structure.
Contains the token position, name and value.
Attributes:
position (str): The position of the token, its format is ``{line_number}:{position from the start of the line}``
token_name (str): The name of the token.
value (str): The value of the token.
"""
def __init__(self, line_number, line_start_position, start_position, end_position, token_name, value):
"""Token constructor.
Args:
line_number (int): The current line number.
line_start_position (int): The start position of the current line.
start_position (int): The start position of the token.
end_position (int): The end position of the token.
token_name (str): The name of the token.
value (str): The value of the token.
"""
self.position = f"{line_number}:{start_position - line_start_position}"
self._start_position = start_position
self._end_position = end_position
self.token_name = token_name
self.value = value
def key(self):
return self._start_position
def check_token(self, *args) -> bool:
"""Check the token type whether it matches that of the passed argument.
Args:
*args: The function takes only one argument, which can be a string, an enum object, or a sequence object.
Returns:
True if matches, False otherwise.
If the argument is a sequence, True if it contains this token type, False otherwise.
Raises:
TypeError: An error occurred when the argument is missing, or of incorrect types.
"""
if len(args) == 1:
if isinstance(args[0], str):
return self.token_name == args[0]
elif isinstance(args[0], _Enum):
return self.token_name == args[0].name
elif isinstance(args[0], _Sequence):
return self.token_name in args[0]
raise TypeError("_check_token() taking 1 argument, type: str, Enum or Sequence")
def __str__(self):
return f"{self.position}\t {self._start_position}\t {self.token_name}\t {self.value}"
def __hash__(self):
return hash((self.position, self._end_position, self.token_name, self.value))
def __eq__(self, other):
if isinstance(other, Token):
return self.position == other.position and self.token_name == other.token_name and self.value == other.value
return NotImplemented
class LexerError(Exception):
"""Lexer exception.
Attributes:
position (int): The position in the stream where the error occurred.
message (str): Human readable description of the error.
"""
def __init__(self, position, message: str = None):
"""LexerError constructor.
Args:
position (int): The start position of the error.
message: Human readable description of the error. Optional.
"""
self.position = position
self.message = f"Unknown token at position {self.position}" if message is None else message
super().__init__(self.message)
class Lexer:
"""The lexer.
Scans the file as stream and tokenize it.
Attributes:
stream (str): The data to work on.
EOF (boolean): The flag to indicate end of file.
current_position (int): The position in the stream currently.
current_char (str): The current character.
"""
def __init__(self, character_stream):
"""Lexer constructor.
Args:
character_stream (str): The character stream of the input file.
"""
self.stream = character_stream
self.EOF = False
self.line_number = 1
self.line_start_position = 0
self.current_position = -1
self.current_char = ""
self._next_char()
def _next_char(self):
"""Moves to the next character. Set `EOF` to True when end of file."""
self.current_position += 1
if self.current_position >= len(self.stream):
self.current_char = "\0"
self.EOF = True
else:
self.current_char = self.stream[self.current_position]
if self.current_char == "\n":
self.line_number += 1
self.line_start_position = self.current_position
def _peek(self):
"""Returns the lookahead character.
Returns:
str: The next character in the stream, null character "\0" if end of file.
"""
if self.current_position + 1 >= len(self.stream):
return "\0"
return self.stream[self.current_position + 1]
def _skip(self):
"""Skips whitespaces, newlines and comments.
Raises:
LexerError: An error occurred while getting tokens in the character stream.
"""
if self.current_char == "/":
last_position = self.current_position
if self._peek() == "/": # Single-line comment
while self.current_char != "\n":
self._next_char()
elif self._peek() == "*": # Multiple-line comment
while self.current_char != "*" or self._peek() != "/":
self._next_char()
if self.EOF: # Check unclosed comment
raise LexerError(
last_position, f"Unclosed comment at position {last_position}")
self._next_char()
self._next_char()
while self.current_char in [" ", "\t", "\r", "\n"]:
self._next_char()
def _get_token(self):
"""Returns the next token.
Returns:
Token: An token found in the stream.
Raises:
LexerError: An error occurred while getting tokens in the character stream.
"""
self._skip()
token = None
# Checks single-quoted string.
if self.current_char == "'":
start_position = self.current_position
while not (self.current_char != "\\" and self._peek() == "'"):
self._next_char()
if self.EOF:
raise LexerError(
start_position, f"EOL while scanning string literal at position {start_position}")
self._next_char()
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.STRING, self.stream[start_position:self.current_position + 1])
# Checks double-quoted string.
elif self.current_char == '"':
start_position = self.current_position
while not (self.current_char != "\\" and self._peek() == '"'):
self._next_char()
if self.EOF:
raise LexerError(
start_position, f"EOL while scanning string literal at position {start_position}")
self._next_char()
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.STRING, self.stream[start_position:self.current_position + 1])
# Checks number begins with a digit.
elif self.current_char.isdigit():
start_position = self.current_position
while self._peek().isdigit():
self._next_char()
if self._peek() == ".":
self._next_char()
while self._peek().isdigit():
self._next_char()
if self._peek() in ["d", "D", "f", "F"]:
self._next_char()
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.NUMBER, self.stream[start_position:self.current_position + 1])
# Checks number begins with a dot.
elif self.current_char == ".":
if self._peek().isdigit():
start_position = self.current_position
while self._peek().isdigit():
self._next_char()
if self._peek() in ["d", "D", "f", "F"]:
self._next_char()
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.NUMBER, self.stream[start_position:self.current_position + 1])
else:
token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,
_token_names.Separators(self.current_char).name, self.current_char)
# Checks word begins with an alphabetic letter or an underscore.
elif self.current_char.isalpha() or self.current_char == "_":
start_position = self.current_position
while True:
if (self._peek() in [" ", "\t", "\r", "\n", "\0"]
or self._peek() in _token_names.SEPARATORS
or self._peek() in _token_names.OPERATORS):
break
self._next_char()
word = self.stream[start_position:self.current_position + 1]
# Checks if word is a keyword.
if word in _token_names.Keywords.values():
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.Keywords(word).name, word)
elif word in _token_names.KeywordsType.values():
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.KeywordsType(word).name, word)
elif word in _token_names.KeywordsAttribute.values():
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.KeywordsAttribute(word).name, word)
# Otherwise put it as identifier.
else:
token = Token(self.line_number, self.line_start_position, start_position, self.current_position,
_token_names.IDENTIFIER, word)
# Checks if is a separator.
elif self.current_char in _token_names.Separators.values():
token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,
_token_names.Separators(self.current_char).name, self.current_char)
# Checks if is an operator.
elif self.current_char in _token_names.Operators.values():
last_position = self.current_position
if self.current_char not in ["&", "|"] and self._peek() == "=":
val = self.current_char + self._peek()
self._next_char()
token = Token(self.line_number, self.line_start_position, last_position, self.current_position,
_token_names.Operators(val).name, val)
elif self.current_char == "+" and self._peek() == "+":
val = self.current_char + self._peek()
self._next_char()
token = Token(self.line_number, self.line_start_position, last_position, self.current_position,
_token_names.Operators(val).name, val)
elif self.current_char == "-" and self._peek() == "-":
val = self.current_char + self._peek()
self._next_char()
token = Token(self.line_number, self.line_start_position, last_position, self.current_position,
_token_names.Operators(val).name, val)
elif self.current_char == "&" and self._peek() == "&":
val = self.current_char + self._peek()
self._next_char()
token = Token(self.line_number, self.line_start_position, last_position, self.current_position,
_token_names.Operators(val).name, val)
elif self.current_char == "|" and self._peek() == "|":
val = self.current_char + self._peek()
self._next_char()
token = Token(self.line_number, self.line_start_position, last_position, self.current_position,
_token_names.Operators(val).name, val)
else:
token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,
_token_names.Operators(self.current_char).name, self.current_char)
# Checks if is EOF
elif self.current_char == "\0":
token = Token(self.line_number, self.line_start_position, self.current_position, self.current_position,
_token_names.EOF, self.current_char)
# Raise error if is an unknown token.
else:
raise LexerError(self.current_position)
self._next_char()
return token
def reset(self):
"""Resets the lexer to its initial state."""
self.EOF = False
self.current_position = -1
self.current_char = ""
self._next_char()
def tokens(self, ignore=True) -> _Iterator[Token]:
""" An generator to iterate over all of the tokens found in the character stream.
Args:
ignore (bool): If True, ignore all of the unsupported tokens.
Yields:
Token: A token object.
Raises:
LexerError: An error occurred while getting tokens in the character stream.
"""
self.reset()
header = True
while not self.EOF:
token = self._get_token()
if token is not None:
if ignore:
if header and not token.check_token(_token_names.KeywordsType("class")):
continue
else:
header = False
if token.check_token(_token_names.Ignored.names()):
continue
yield token
|
minhlong94/PPL-2021 | src/symtableTest.py | <filename>src/symtableTest.py
import pprint
import sys
from lex import Lexer
from symbol_table import SymbolTable
def run():
with open(sys.argv[1], "r") as f:
buffer = f.read()
table = SymbolTable(Lexer(buffer))
pprint.pprint(table, width=640)
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.argv = ["symtableTest.py", "../test/case1/Main.java"]
run()
|
minhlong94/PPL-2021 | src/ast/__init__.py | <gh_stars>0
__all__ = [
"programTree",
"blockTree",
"declrTree",
"funcDeclTree",
"funcHeadTree",
"typeTree",
"idTree",
"numberTree",
"stringTree",
"assignTree",
"ifTree",
"whileTree",
"returnTree",
"callTree",
"relOPTree",
"addOPTree",
"multOPTree",
]
from abc import ABC as _ABC
class _AST(_ABC):
""" A simple Abstract Syntax Tree structure.
Contains the root label and children ASTs.
Attributes:
nodeCount (static int): How many nodes in the program tree.
_kids (list): List of immediate children AST.
_label (str): Label of the root node.
_nodeNum (int): The index of the root node in the program tree.
"""
nodeCount = 0
def __init__(self, label):
"""AST constructor.
Args:
label (int): The root node's label.
"""
super().__init__()
self._kids = []
_AST.nodeCount += 1
self._nodeNum = _AST.nodeCount
self._label = label
def getKid(self, idx):
"""Return the child AST at the given index.
Args:
idx (int): The index of the child AST.
Returns:
(AST) The child AST if exists, None if index out of bounds.
"""
if idx <= 0 or idx > self.kidCount():
return None
return self._kids[idx - 1]
def getKids(self):
"""Return a list containing the children ASTs of the root node.
Returns:
(List) List of children ASTs.
"""
return self._kids
def kidCount(self):
"""Return the number of children ASTs of the root node.
Returns:
(int) The number of children ASTs.
"""
return len(self._kids)
def addKid(self, kidAST):
"""Add an AST to the children list of the root node.
Args:
kidAST (AST): The child AST to be added
Returns:
(AST) The AST of the root node.
"""
self._kids.append(kidAST)
return self
def setLabel(self, label):
"""Set the label of the root node.
Args:
label (str): The label to be set.
Returns:
None
"""
self._label = label
def getLabel(self):
"""Return the label of the AST's root node.
Returns:
(str) The label of the AST's root node.
"""
return self._label
# GRAMMAR NOTES #
# --------------------------------------------------------------------#
# Children trees are noted by '*' #
# [] stands for a list of #
class programTree(_AST):
""" An AST for the program structure.
GRAMMAR:
program :- class *id *block
"""
def __init__(self):
super().__init__('Program/Class')
class blockTree(_AST):
""" An AST for a codeblock structure.
GRAMMAR:
block :- { [*statements] }
"""
def __init__(self):
super().__init__('Code block')
class declrTree(_AST):
""" An AST for a declaration statement structure.
GRAMMAR:
declr :- *type *id = *expr ;
*type *id ;
*type *id
"""
def __init__(self):
super().__init__('Declaration')
class funcDeclTree(_AST):
""" An AST for a function declaration structure.
GRAMMAR:
funcDeclr :- *type *id *funcHead *block
"""
def __init__(self):
super().__init__('Function Declaration')
class funcHeadTree(_AST):
""" An AST for a function header structure.
GRAMMAR:
funcHead :- () # void
( [*declr] ) # params / list of declr
"""
def __init__(self):
super().__init__('Function header')
class typeTree(_AST):
""" An AST for a type structure.
ATOMIC/LEAF
GRAMMAR:
type :- type
Args:
isList (bool): whether the type is for a single or a list/array of variables.
E.g: String vs String[]
"""
def __init__(self, isList=False):
super().__init__('Type')
self.isArray = isList
def setArray(self):
""" Set the value of isList to True
Returns:
None
"""
self.isArray = True
def isArray(self):
""" Return the value of isList.
Returns:
(bool) whether the type is an array or not.
"""
return self.isArray
class idTree(_AST):
""" An AST for a identifier or name.
ATOMIC/LEAF
GRAMMAR:
id :- id/name
Args:
name (str): name of the identifier.
"""
def __init__(self, name, key):
super().__init__('id')
self.name = name
self.key = key
def getName(self):
""" Return the value of id's name.
Returns:
(str) name of the id.
"""
return self.name
class numberTree(_AST):
""" An AST for a literal number.
ATOMIC/LEAF
GRAMMAR:
num :- <int>
<float>
Args:
value (str): value of the literal number.
"""
def __init__(self, value):
super().__init__('literal number')
self.value = value
def getValue(self):
""" Return the value of literal number.
Returns:
(str) value of the number.
"""
return self.value
class stringTree(_AST):
""" An AST for a literal string.
ATOMIC/LEAF
GRAMMAR:
string :- string
Args:
value (str): value of the literal string.
"""
def __init__(self, value):
super().__init__('literal string')
self.value = value
def getValue(self):
""" Return the value of literal string.
Returns:
(str) value of the string.
"""
return self.value
class assignTree(_AST):
""" An AST for a assignment structure.
GRAMMAR:
assignment :- *id assign_op *expr ;
Args:
assignToken (str): the assignment operator token name.
"""
def __init__(self, assignToken):
super().__init__('Assignment')
self.assignToken = assignToken
def getToken(self):
""" Return the assignment operator token name.
Returns:
(str) the assignment operator token name.
"""
return self.assignToken
class ifTree(_AST):
""" An AST for a ifStatement structure.
GRAMMAR:
ifStatement :- if ( *expr ) *block
if ( *expr ) *block else *block
"""
def __init__(self):
super().__init__('if statement')
class whileTree(_AST):
""" An AST for a whileStatement structure.
GRAMMAR:
whileStatement :- while ( *expr ) *block
"""
def __init__(self):
super().__init__('while statement')
class returnTree(_AST):
""" An AST for a returnStatement structure.
GRAMMAR:
returnStatement :- return expr ;
"""
def __init__(self):
super().__init__('return statement')
class callTree(_AST):
""" An AST for a function call structure.
GRAMMAR:
funcCall :- *id ()
*id ( [*expr] )
"""
def __init__(self):
super().__init__('function call')
class relOPTree(_AST):
""" An AST for a relation operation structure.
GRAMMAR:
relOp :- *expr rel_op *expr
Args:
relToken (str): the relational operator token name.
"""
def __init__(self, relToken):
super().__init__('Relational Operation')
self.relToken = relToken
def getToken(self):
""" Return the relation operator token name.
Returns:
(str) the relation operator token name.
"""
return self.relToken
class addOPTree(_AST):
""" An AST for a addition operation structure.
GRAMMAR:
addOp :- *expr rel_op *expr
Args:
addToken (str): the addition operator token name.
"""
def __init__(self, addToken):
super().__init__('Additional Operation')
self.addToken = addToken
def getToken(self):
""" Return the addition operator token name.
Returns:
(str) the addition operator token name.
"""
return self.addToken
class multOPTree(_AST):
""" An AST for a multiplication operation structure.
GRAMMAR:
addOp :- *expr rel_op *expr
Args:
multToken (str): the multiplication operator token name.
"""
def __init__(self, multToken):
super().__init__('Multiplication Operation')
self.multToken = multToken
def getToken(self):
""" Return the multiplication operator token name.
Returns:
(str) the multiplication operator token name.
"""
return self.multToken
|
minhlong94/PPL-2021 | src/codegen/__init__.py | from ast import *
from lex import *
class Emitter:
def __init__(self, name):
self.file_path = name + ".c"
self.header = ""
self.code = ""
def emitLine(self, code):
self.code += code + "\n"
def writeFile(self):
with open(self.file_path, "w+") as file:
file.write(self.header + self.code)
MAPPER = {
"Math.PI": "M_PI",
"Math.pow": "pow",
"Math.sqrt": "sqrt",
"Math.abs": "abs",
"System.out.println": "println",
"System.out.printf": "printf",
}
INPUT_FUNC = {
"scanner.nextDouble": ("double", "scanf(\"%lf\", &")
}
TYPE_MAPPER = {
"String": "char*"
}
IGNORE = ["Scanner", "scanner.close"]
class CodeGen:
def __init__(self, parser, emitter):
self.ast = parser.program()
self.emitter = emitter
header = """#include <stdio.h> \n#include <math.h>"""
self.codegen = ""
self.emitter.emitLine(header)
def travel_tree(self, t):
if isinstance(t, (programTree, funcDeclTree)):
code = ""
for tree in t.getKids():
code += self.travel_tree(tree)
return code
elif isinstance(t, assignTree):
code = ""
code += self.travel_tree(t.getKid(1)) + " "
code += t.getToken() + " "
code += self.travel_tree(t.getKid(2)) + " "
return code
elif isinstance(t, declrTree):
datatype = self.travel_tree(t.getKid(1)) + " "
name = self.travel_tree(t.getKid(2)) + " "
code = datatype + name
if len(t.getKids()) == 3:
if self.travel_tree(t.getKid(3)) in INPUT_FUNC.values():
return self.travel_tree(t.getKid(3))[0] + " " + name + ";\n" + self.travel_tree(t.getKid(3))[1] + name + ");\n"
if self.travel_tree(t.getKid(3)) == "":
return "\n"
code += " = " + self.travel_tree(t.getKid(3)) + ";\n"
return code
elif isinstance(t, callTree):
code = ""
for idx, kid in enumerate(t.getKids()):
if idx == 0:
name = self.travel_tree(t.getKid(1))
if name in MAPPER:
name = MAPPER[name]
elif name in INPUT_FUNC:
return INPUT_FUNC[name]
elif name in IGNORE:
return ""
code += name + "("
else:
code += self.travel_tree(t.getKid(idx + 1))
if idx != len(t.getKids()) - 1:
code += ","
code += ")"
return code
elif isinstance(t, (addOPTree, multOPTree, relOPTree)):
code = self.travel_tree(t.getKid(1))
code += token_names.get_value_by_name(t.getToken())
code += self.travel_tree(t.getKid(2))
return code
elif isinstance(t, typeTree):
if t.getLabel() in TYPE_MAPPER:
code = TYPE_MAPPER[t.getLabel()]
else:
code = t.getLabel()
return code + " "
elif isinstance(t, idTree):
name = t.getName()
code = ""
if name == "Math.PI":
code += "M_PI"
elif name == "Math.pow":
code += "pow"
else:
code += name
return code
elif isinstance(t, numberTree):
code = t.getValue()
return code + " "
elif isinstance(t, stringTree):
code = t.getValue()
return code + " "
elif isinstance(t, blockTree):
code = "{ \n"
for tree in t.getKids():
code += self.travel_tree(tree)
code += "}"
return code
elif isinstance(t, funcHeadTree):
code = "("
for idx, tree in enumerate(t.getKids()):
code += self.travel_tree(tree)
if idx != len(t.getKids())-1:
code += ", "
code += ")"
return code + " "
elif isinstance(t, ifTree):
blockCond = self.travel_tree(t.getKids()[0])
blockIf = self.travel_tree(t.getKids()[1])
code = "if (" + blockCond + ")\n "
code += blockIf + "\n"
if len(t.getKids()) == 3:
blockElse = self.travel_tree(t.getKids()[2])
code += "else\n" + blockElse + "\n "
return code
elif isinstance(t, whileTree):
blockCond = self.travel_tree(t.getKids()[0])
blockWhile = self.travel_tree(t.getKids()[1])
code = "while " + blockCond + "{ \n "
code += blockWhile + "} \n "
return code
elif isinstance(t, returnTree):
code = "return "
for tree in t.getKids():
code += self.travel_tree(tree)
return code + ";"
else:
raise SyntaxError(f"UwU What's dis error? {type(t)}")
def generate_code(self):
code = self.travel_tree(self.ast)
self.emitter.emitLine(code[1:-1])
# Add whitespace
|
minhlong94/PPL-2021 | src/symbol_table/__init__.py | <filename>src/symbol_table/__init__.py
"""This is the module to generate the symbol table.
This module would generate a symbol table from a collection of tokens.
Example:
>>> from lex import Lexer
>>> from symbol_table import SymbolTable
>>>
>>> lexer = Lexer(character_stream)
>>> st = SymbolTable(lexer=lexer)
"""
__all__ = ["SymbolTable"]
from collections import OrderedDict as _OrderedDict
from typing import Tuple as _Tuple
try:
from lex import Lexer as _Lexer
from lex import LexerError as _LexerError
from lex import Token as _Token
from lex import token_names as _token_names
except ImportError:
from src.lex import Lexer as _Lexer
from src.lex import LexerError as _LexerError
from src.lex import Token as _Token
from src.lex import token_names as _token_names
class SymbolTable(_OrderedDict):
"""The symbol table.
Attributes:
current_token (_Token): The current token in the iteration.
next_token (_Token): The next token in the iteration.
"""
def __init__(self, lexer: _Lexer):
"""SymbolTable constructor.
Takes lexer or tokens argument to get the collection of tokens. Prioritizes parser if both are provided.
Args:
lexer: The lexer for generating collections of token.
"""
super().__init__()
self.tokens = lexer.tokens()
self.current_token = None
self.next_token = None
self._advance()
self._advance()
self._generate()
def _advance(self):
"""Advances the token collection."""
self.current_token = self.next_token
self.next_token = next(self.tokens, None)
def last(self):
return next(reversed(self))
def _generate(self):
"""Function for generating a symbol table."""
scope_level = -1
identifier_type = []
identifier_attribute = []
while self.current_token is not None:
if self.current_token.check_token(_token_names.IDENTIFIER):
identifier_key = identifier_position = self.current_token.key()
identifier_name = self.current_token.value
if self.next_token.check_token(_token_names.Separators("[")):
while True:
self._advance()
identifier_name += self.current_token.value
if (self.current_token.check_token(_token_names.Separators("]"))
and not self.next_token.check_token(_token_names.Separators("["))):
break
if self.next_token.check_token(_token_names.Separators("(")):
identifier_type.append("function")
if scope_level == -1:
scope = "outer_scope"
elif scope_level == 0:
scope = "class_scope"
elif scope_level >= 1:
scope = f"inner_scope_{scope_level}"
else:
raise _LexerError(self.current_token.position, "Out of scope!")
if not identifier_type:
try:
# A valid key must fulfill all the criteria below:
# has the same identifier name,
# its identifier type cannot be NoneType,
# has the same or larger scope as the current identifier,
# is the most recent key.
# If there is a valid key, pass it as the identifier_position for the current identifier.
latest_valid_key = next(key for key, value in reversed(self.items())
if (value["identifier_name"] == identifier_name
and value["identifier_type"]
and value["identifier_scope"][1] <= scope_level))
except StopIteration:
pass
else:
identifier_position = latest_valid_key
self[identifier_key] = {
"identifier_position": identifier_position,
"identifier_name": identifier_name,
"identifier_type": tuple(identifier_type),
"identifier_attribute": tuple(identifier_attribute),
"identifier_scope": (scope, scope_level),
}
identifier_type.clear()
identifier_attribute.clear()
elif self.current_token.check_token(_token_names.KeywordsType.names()):
if self.next_token.check_token(_token_names.Separators("[")):
id_type = self.current_token.value
while True:
self._advance()
id_type += self.current_token.value
if (self.current_token.check_token(_token_names.Separators("]"))
and not self.next_token.check_token(_token_names.Separators("["))):
break
identifier_type.append(id_type)
elif self.next_token.check_token(_token_names.IDENTIFIER):
identifier_type.append(self.current_token.value)
elif self.current_token.check_token(_token_names.KeywordsAttribute.names()):
if (self.next_token.check_token(_token_names.KeywordsAttribute.names())
or self.next_token.check_token(_token_names.KeywordsType.names())):
identifier_attribute.append(self.current_token.value)
elif (self.current_token.check_token(_token_names.Separators("{"))
or self.current_token.check_token(_token_names.Separators("("))):
scope_level += 1
elif (self.current_token.check_token(_token_names.Separators("}"))
or self.current_token.check_token(_token_names.Separators(")"))):
scope_level -= 1
self._advance()
def get_declaration_data_type(self, key):
return self.get_identifier_type(self.get_identifier_position(key))[0]
def get_identifier_position(self, identifier_key) -> int:
"""Gets the declared location of the given identifier key.
Args:
identifier_key (int): The dictionary key of the identifier.
Returns:
The value of identifier_position attribute of the identifier.
"""
return self.get(identifier_key)["identifier_position"]
def get_identifier_name(self, identifier_key) -> str:
"""Gets the name of the given identifier key.
Args:
identifier_key (int): The dictionary key of the identifier.
Returns:
The value of identifier_name attribute of the identifier.
"""
return self.get(identifier_key)["identifier_name"]
def get_identifier_type(self, identifier_key) -> _Tuple[str, ...]:
"""Gets the type of the given identifier key.
Args:
identifier_key (int): The dictionary key of the identifier.
Returns:
The value of identifier_type attribute of the identifier.
"""
return self.get(identifier_key)["identifier_type"]
def get_identifier_attribute(self, identifier_key) -> _Tuple[str, ...]:
"""Gets the attributes of the given identifier key.
Args:
identifier_key (int): The dictionary key of the identifier.
Returns:
The value of identifier_attribute attribute of the identifier.
"""
return self.get(identifier_key)["identifier_attribute"]
def get_identifier_scope(self, identifier_key) -> _Tuple[str, int]:
"""Gets the scope of the given identifier key.
Args:
identifier_key (int): The dictionary key of the identifier.
Returns:
The value of identifier_scope attribute of the identifier,
which is a tuple of scope name and scope level.
Example:
>>> st = SymbolTable(lexer=lexer)
>>> scope = st.get_identifier_scope(12)
>>> scope
("class_scope", 0)
"""
return self.get(identifier_key)["identifier_scope"]
|
minhlong94/PPL-2021 | src/parse/__init__.py | import sys
try:
from lex import *
from ast import *
except ImportError:
from src.lex import *
from src.ast import *
# Parser object keeps track of current token and checks if the code matches the grammar.
class Parser:
assignOPs = [token_names.OPERATORS['='],
token_names.OPERATORS['+='],
token_names.OPERATORS['-='],
token_names.OPERATORS['*='],
token_names.OPERATORS['/='],
token_names.OPERATORS['%=']]
relOPs = [token_names.OPERATORS['<'],
token_names.OPERATORS['<='],
token_names.OPERATORS['>'],
token_names.OPERATORS['>='],
token_names.OPERATORS['=='],
token_names.OPERATORS['!=']]
addOPs = [token_names.OPERATORS['+'],
token_names.OPERATORS['-'],
token_names.OPERATORS['|']]
multOPs = [token_names.OPERATORS['*'],
token_names.OPERATORS['/'],
token_names.OPERATORS['&']]
def __init__(self, lexer):
self.lexer = lexer
self.tokens = lexer.tokens()
self.curToken = None
self.peekToken = None
self.nextToken()
self.nextToken() # Call this twice to initialize current and peek.
# Return true if the current token matches.
def checkToken(self, kind):
return kind == self.curToken.token_name
# Return true if the next token matches.
def checkPeek(self, kind):
return kind == self.peekToken.token_name
# Try to match current token. If not, error. Advances the current token.
def match(self, kinds):
if type(kinds) is not list:
kinds = [kinds]
doesMatch = False
for kind in kinds:
if self.checkToken(kind):
doesMatch = True
matchType = kind
break
if not doesMatch:
self.abort(f'Expected {token_names.get_value_by_name(kind)}, got {self.curToken.value}, at line {self.curToken.position}')
self.nextToken()
return matchType
# Advances the current token.
def nextToken(self):
self.curToken = self.peekToken
self.peekToken = next(self.tokens, token_names.EOF)
# No need to worry about passing the EOF, lexer handles that.
def abort(self, message):
sys.exit("Error. " + message)
# HELPER FUNCTION DECLARATIONS END HERE #
# --------------------------------------------------------------------#
# PARSING LOGIC STARTS FROM HERE #
def program(self):
t = programTree()
# match(token_names.KEYWORDS_ATTRIBUTE['public'])
self.match(token_names.KEYWORDS_TYPE['class'])
self.match(token_names.IDENTIFIER)
t.addKid(self.block())
return t
def block(self):
self.match(token_names.SEPARATORS['{'])
t = blockTree()
while True:
try:
t.addKid(self.statement())
except SyntaxError:
break
self.match(token_names.SEPARATORS['}'])
return t
def decl(self, requireSemiColon=True):
typ, name = self.typ(), self.name()
if self.checkToken(token_names.SEPARATORS['(']):
t = funcDeclTree().addKid(typ).addKid(name)
t.addKid(self.funcHead())
t.addKid(self.block())
return t
if self.checkToken(token_names.OPERATORS['=']) and requireSemiColon:
self.nextToken()
t = declrTree().addKid(typ).addKid(name).addKid(self.expr())
self.match(token_names.SEPARATORS[';'])
return t
if requireSemiColon:
self.match(token_names.SEPARATORS[';'])
t = declrTree().addKid(typ).addKid(name)
return t
def typ(self):
t = typeTree()
for key in token_names.KEYWORDS_TYPE:
types = token_names.KEYWORDS_TYPE[key]
if self.checkToken(types):
t.setLabel(key)
self.nextToken()
break
if t.getLabel() == 'Type':
raise SyntaxError(f'Unrecognized type: {token_names.get_value_by_name(self.curToken.token_name)}')
if self.checkToken(token_names.SEPARATORS['[']):
self.nextToken()
self.match(token_names.SEPARATORS[']'])
t.setArray()
return t
def name(self):
if self.checkToken(token_names.IDENTIFIER):
t = idTree(self.curToken.value, self.curToken.key())
self.nextToken()
return t
raise SyntaxError(
f'Expected: {token_names.IDENTIFIER}, got {token_names.get_value_by_name(self.curToken.token_name)}, at line {self.curToken.position}')
def funcHead(self):
self.match(token_names.SEPARATORS['('])
t = funcHeadTree()
if not self.checkToken(token_names.SEPARATORS[')']):
while True:
t.addKid(self.decl(requireSemiColon=False))
if self.checkToken(token_names.SEPARATORS[',']):
self.nextToken()
else:
break
self.match(token_names.SEPARATORS[')'])
return t
def statement(self):
if self.curToken.token_name in token_names.KEYWORDS_TYPE.values():
return self.decl()
if self.checkToken(token_names.KEYWORDS['if']):
t = ifTree()
self.nextToken()
t.addKid(self.expr(True))
t.addKid(self.block())
if self.checkToken(token_names.KEYWORDS['else']):
self.nextToken()
t.addKid(self.block())
return t
if self.checkToken(token_names.KEYWORDS['while']):
t = whileTree()
self.nextToken()
t.addKid(self.expr(True))
t.addKid(self.block())
return t
if self.checkToken(token_names.KEYWORDS['return']):
t = returnTree()
self.nextToken()
t.addKid(self.expr())
self.match(token_names.SEPARATORS[';'])
return t
if self.checkToken(token_names.SEPARATORS['{']):
return self.block()
kid = self.name()
if self.checkToken(token_names.SEPARATORS['(']):
self.nextToken()
t = callTree().addKid(kid)
if not self.checkToken(token_names.SEPARATORS[')']):
while True:
t.addKid(self.expr())
if self.checkToken(token_names.SEPARATORS[',']):
self.nextToken()
else:
break
self.match(token_names.SEPARATORS[')'])
self.match(token_names.SEPARATORS[';'])
return t
t = assignTree(self.match(Parser.assignOPs)).addKid(kid)
t.addKid(self.expr())
self.match(token_names.SEPARATORS[';'])
return t
def expr(self, requireBracket=False):
if requireBracket or self.checkToken(token_names.SEPARATORS['(']):
self.match(token_names.SEPARATORS['('])
requireBracket = True
kid = self.simpleExpr()
t = self.formRelationTree()
if t is None:
if requireBracket:
self.match(token_names.SEPARATORS[')'])
return kid
t.addKid(kid)
t.addKid(self.simpleExpr())
if requireBracket:
self.match(token_names.SEPARATORS[')'])
return t
def simpleExpr(self):
kid = self.term()
t = self.formAddOpTree()
while t is not None:
t.addKid(kid)
t.addKid(self.term())
kid = t
t = self.formAddOpTree()
return kid
def term(self):
kid = self.factor()
t = self.formMultOpTree()
while t is not None:
t.addKid(kid)
t.addKid(self.factor())
kid = t
t = self.formMultOpTree()
return kid
def factor(self):
if self.checkToken(token_names.SEPARATORS['(']):
self.nextToken()
t = self.expr()
self.match(token_names.SEPARATORS[')'])
return t
if self.checkToken(token_names.NUMBER):
t = numberTree(self.curToken.value)
self.nextToken()
return t
if self.checkToken(token_names.STRING):
t = stringTree(self.curToken.value)
self.nextToken()
return t
t = self.name()
if not self.checkToken(token_names.SEPARATORS['(']):
return t
self.nextToken()
t = callTree().addKid(t)
if not self.checkToken(token_names.SEPARATORS[')']):
while True:
t.addKid(self.expr())
if self.checkToken(token_names.SEPARATORS[',']):
self.nextToken()
else:
break
self.match(token_names.SEPARATORS[')'])
return t
def formRelationTree(self):
if self.curToken.token_name in Parser.relOPs:
t = relOPTree(self.curToken.token_name)
self.nextToken()
return t
else:
return None
def formAddOpTree(self):
if self.curToken.token_name in Parser.addOPs:
t = addOPTree(self.curToken.token_name)
self.nextToken()
return t
else:
return None
def formMultOpTree(self):
if self.curToken.token_name in Parser.multOPs:
t = multOPTree(self.curToken.token_name)
self.nextToken()
return t
else:
return None
|
minhlong94/PPL-2021 | src/miniJavaCompiler.py | import pathlib
from lex import *
from parse import *
from codegen import *
def main():
print("Mini Java Compiler")
if len(sys.argv) != 2:
sys.exit("Error: Compiler needs source file as argument.")
with pathlib.Path(sys.argv[1]).open(mode="r") as f:
buffer = f.read()
# Initialize the lexer and parser.
lexer = Lexer(buffer)
parser = Parser(lexer)
emitter = Emitter("cast1Main")
# p = parser.program() # Start the parser.
code_gen = CodeGen(parser, emitter)
code_gen.generate_code()
print(code_gen.emitter.code)
print("Parsing completed.")
# sys.argv = ["", "../test/case1/Main.java"]
main()
|
minhlong94/PPL-2021 | src/lexerTest.py | from lex import *
import sys
def main():
print("Mini Java Compiler - Lexer Test")
if len(sys.argv) != 2:
sys.exit("Error: Compiler needs source file as argument.")
with open(sys.argv[1], 'r') as f:
buffer = f.read()
lexer = Lexer(buffer)
filler = ""
# Token stream test
print(f"{filler:-<50}\nToken Stream Test")
# for token in lexer.tokens():
# print(token)
tokens = lexer.tokens(ignore=True)
while True:
try:
print(next(tokens))
except StopIteration:
break
if __name__ == '__main__':
sys.argv = ["./lexerTest.py", "../test/case1/Main.java"]
main()
|
tedliUCAS/Spark-ml-algo-lib | ml-xgboost/demo/data/dermatology_process.py | #!/usr/bin/python
import numpy as np
# label need to be 0 to num_class -1
data = np.loadtxt('./dermatology.data', delimiter=',',
converters={33: lambda x:int(x == '?'), 34: lambda x:int(x) - 1})
sz = data.shape
train = data[:int(sz[0] * 0.7), :]
test = data[int(sz[0] * 0.7):, :]
train_X = train[:, :33]
train_Y = train[:, 34]
test_X = test[:, :33]
test_Y = test[:, 34]
def process(X, Y):
s = ""
for i in range(len(X)):
s += str(int(Y[i]))
for j in range(len(X[i])):
s += (" %d:%d" % (j, int(X[i][j])))
s += "\n"
return s
with open("./dermatology.data.train", 'w') as fp:
fp.write(process(train_X, train_Y))
with open("./dermatology.data.test", 'w') as fp:
fp.write(process(test_X, test_Y)) |
PabloFreitasUfsc/bist-du-da | save_info.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : <NAME>
# Created Date: 19.07.2021
# =============================================================================
"""The Module Has Been Build for..."""
# Reference: https://github.com/sivel/speedtest-cli/wiki
# =============================================================================
# Imports
# =============================================================================
from pymongo.mongo_client import MongoClient
from typing import Union
import pymongo
def write_info_down(info):
""""""
db_client: Union[MongoClient] = pymongo.MongoClient("mongodb://localhost:27017/")
data_base = db_client["internet_report"]
collection_obj = data_base["collection_internet"]
post = {
"Download (bits/s)": info["download"],
"Upload (bits/s)": info["upload"],
"Ping": info["ping"],
"timestamp": info["timestamp"],
"bytes_sent": info["bytes_sent"],
"bytes_received": info["bytes_received"],
"share": info["share"],
"client": info["client"],
}
collection_obj.insert_one(post)
|
PabloFreitasUfsc/bist-du-da | internet_test.py | <filename>internet_test.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : <NAME>
# Created Date: 19.07.2021
# =============================================================================
"""The Module Has Been Build for..."""
# Reference: https://github.com/sivel/speedtest-cli/wiki
# =============================================================================
# Imports
# =============================================================================
from time import time
import speedtest
import display_info as info
import save_info as save
import datetime
def check_input(input_var: str, input_msg: str):
""""""
if input_var.isdecimal():
return int(input_var)
else:
info.warning_msg("Wrong input, please enter a integer number.")
return check_input(input(input_msg), input_msg)
def run_net_test(display_info=False, save_info=False):
""""""
servers = []
# If you want to test against a specific server
# servers = [1234]
threads = None
# If you want to use a single threaded test
# threads = 1
s = speedtest.Speedtest()
s.get_servers(servers)
s.get_best_server()
s.download(threads=threads)
s.upload(threads=threads, pre_allocate=False)
s.results.share()
if display_info:
info.display_terminal(s.results.dict())
if save_info:
save.write_info_down(s.results.dict())
return s.results.dict()
def no_internet():
""""""
# time_stamp = year-month-dayTHH:MM:SSZ
time_stamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
info = {
"download": 0.0,
"upload": 0.0,
"ping": 404.0, # Reference to error 404
"timestamp": time_stamp,
"bytes_sent": 0,
"bytes_received": 0,
"share": None,
"client": None,
}
save.write_info_down(info)
|
PabloFreitasUfsc/bist-du-da | babel_support.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : <NAME>
# Created Date: 19.07.2021
# =============================================================================
"""The Module Has Been Build for..."""
# Reference: https://github.com/sivel/speedtest-cli/wiki
# =============================================================================
# Imports
# =============================================================================
from googletrans import Translator, LANGUAGES
import display_info
def translate_text(text: str, language_code: str) -> str:
""""""
translator = Translator()
return translator.translate(text, dest=language_code, src="en").text
def check_language_code(language_code) -> bool:
""""""
if language_code in LANGUAGES:
return True
else:
display_info.error_msg(
"Language code doesnt exist, it will be set as default, en."
)
return False
|
PabloFreitasUfsc/bist-du-da | display_info.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : <NAME>
# Created Date: 19.07.2021
# =============================================================================
"""The Module Has Been Build for..."""
# Reference: https://github.com/sivel/speedtest-cli/wiki
# =============================================================================
# Imports
# =============================================================================
from prettytable import PrettyTable
from colorama import Fore, Style
def warning_msg(text: str):
""""""
print(Fore.YELLOW + text + Style.RESET_ALL)
def error_msg(text: str):
""""""
print(Fore.RED + text + Style.RESET_ALL)
def ok_msg(text: str):
""""""
print(Fore.GREEN + text + Style.RESET_ALL)
def info_msg(text: str):
""""""
print(Fore.BLUE + text + Style.RESET_ALL)
def display_terminal(results_dict):
""""""
table = PrettyTable()
table.field_names = [
"Download (bits/s)",
"Upload (bits/s)",
"Ping",
# "server",
"timestamp",
"bytes_sent",
"bytes_received",
"share",
"client",
]
table.add_row(
[
results_dict["download"],
results_dict["upload"],
results_dict["ping"],
# results_dict["server"],
results_dict["timestamp"],
results_dict["bytes_sent"],
results_dict["bytes_received"],
results_dict["share"],
results_dict["client"],
]
)
print(table)
|
PabloFreitasUfsc/bist-du-da | setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# =============================================================================
# Created By : <NAME>
# Created Date: 19.07.2021
# =============================================================================
"""The Module Has Been Build for..."""
# Reference: https://github.com/sivel/speedtest-cli/wiki
# =============================================================================
# Imports
# =============================================================================
import time
import display_info
import internet_test
import argparse
import babel_support
from googletrans import LANGUAGES
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-l",
"--language_code",
help=f"Choose onde of the following language codes: {LANGUAGES} ",
required=False,
default="en",
)
args = parser.parse_args()
language_code = (
args.language_code
if babel_support.check_language_code(args.language_code)
else "en"
)
loops = input(
babel_support.translate_text(
"Enter the number of tests that you want:", language_code
)
)
loops = internet_test.check_input(
loops,
babel_support.translate_text(
"Enter the number of test that you want:", language_code
),
)
# No need to check for sleep time with just one loop
if loops == 1:
sleep_time_minutes = 0
else:
sleep_time_minutes = input(
babel_support.translate_text(
"How much is the time between the tests [minutes]: ", language_code
)
)
sleep_time_minutes = internet_test.check_input(
sleep_time_minutes,
babel_support.translate_text(
"How much is the time between the tests [minutes]: ", language_code
),
)
for i in range(1, loops + 1):
start = time.time() # start execution time count
display_info.info_msg(
babel_support.translate_text(
f"Running test number {i}/{loops}", language_code
)
)
try:
results_dict = internet_test.run_net_test(
display_info=False, save_info=True
)
except Exception as e:
internet_test.no_internet()
display_info.error_msg(str(e))
display_info.info_msg(
babel_support.translate_text(
f"Test took {(time.time() - start)} seconds.", language_code
)
)
sleep_time_sec: float = sleep_time_minutes * 60
time.sleep(sleep_time_sec)
display_info.info_msg(
babel_support.translate_text("End of Program.", language_code)
)
|
ibennetch/pma-website | pmaweb/urls.py | <filename>pmaweb/urls.py
# -*- coding: UTF-8 -*-
# vim: set expandtab sw=4 ts=4 sts=4:
#
# phpMyAdmin web site
#
# Copyright (C) 2008 - 2016 <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import RedirectView
from django.views.generic import TemplateView
from pmaweb.views import PMAView, redirect_home_page, github_tree, github_commit
from security.views import PMASAView, PMASADraftView, redirect_security
from files.views import (
ReleaseList, ReleaseDetail, version_json, latest_download
)
from news.views import PostArchive, PostDetail
from news.feeds import NewsFeed
from files.feeds import ReleaseFeed
from security.feeds import PMASAFeed
from pmaweb.sitemaps import SITEMAPS
import django.contrib.sitemaps.views
TRANSLATIONS_RSS = 'https://hosted.weblate.org/exports/rss/phpmyadmin/'
handler404 = 'pmaweb.views.notfound'
urlpatterns = [
# Feeds
url(
r'news/feed/$',
NewsFeed(),
name='feed-news',
),
url(
r'files/feed/$',
ReleaseFeed(),
name='feed-files',
),
url(
r'security/feed/$',
PMASAFeed(),
name='feed-security',
),
# XML sitemap
url(
r'^sitemap.xml$',
django.contrib.sitemaps.views.index,
{'sitemaps': SITEMAPS},
name='sitemap',
),
url(
r'^sitemap-(?P<section>.+)\.xml$',
django.contrib.sitemaps.views.sitemap,
{'sitemaps': SITEMAPS},
name='django.contrib.sitemaps.views.sitemap',
),
# Pages
url(
r'^$',
PMAView.as_view(
template_name='index.html',
),
name='home'
),
url(
r'^news/$',
PostArchive.as_view(),
name='news'
),
url(
r'^news/(?P<page>[0-9]+)/$',
PostArchive.as_view(),
name='news-page'
),
url(
r'^news/(?P<year>\d+)/(?P<month>\d+)/(?P<day>\d+)/(?P<slug>[^/]+)/$',
PostDetail.as_view(),
name='news-item'
),
url(
r'^security/$',
PMAView.as_view(
template_name='security/index.html',
title='Security',
rss='feed-security',
rss_title='phpMyAdmin security announcements',
),
name='security'
),
url(
r'^security/PMASA-(?P<year>20[0-9][0-9])-(?P<sequence>[0-9]+)/$',
PMASAView.as_view(),
name='security-issue'
),
url(
r'^security/PMASA-(?P<year>20[0-9][0-9])-(?P<sequence>[0-9]+)/draft/$',
PMASADraftView.as_view(),
name='security-issue-draft'
),
url(
r'^support/$',
PMAView.as_view(
template_name='support.html',
title='Support',
),
name='support'
),
url(
r'^docs/$',
PMAView.as_view(
template_name='docs.html',
title='Documentation',
),
name='docs'
),
url(
r'^try/$',
PMAView.as_view(
template_name='try.html',
title='Try',
),
name='try'
),
url(
r'^contribute/$',
PMAView.as_view(
template_name='contribute.html',
title='Contribute',
),
name='contribute'
),
url(
r'^contractor/$',
PMAView.as_view(
template_name='contractor.html',
title='Work for us',
),
name='contractor'
),
url(
r'^sponsors/$',
PMAView.as_view(
template_name='sponsors.html',
title='Sponsors',
),
name='sponsors'
),
url(
r'^sponsors/subscribe/$',
PMAView.as_view(
template_name='sponsors-subscribe.html',
title='Subscribe to sponsorship',
),
name='sponsors-subscribe'
),
url(
r'^themes/$',
PMAView.as_view(
template_name='themes.html',
title='Themes',
),
name='themes'
),
url(
r'^license/$',
PMAView.as_view(
template_name='license.html',
title='License',
),
name='license'
),
url(
r'^team/$',
PMAView.as_view(
template_name='team.html',
title='Team',
),
name='team'
),
url(
r'^translations/$',
PMAView.as_view(
template_name='translations.html',
title='Translations',
rss=TRANSLATIONS_RSS,
rss_title='phpMyAdmin translation changes',
),
name='translations'
),
url(
r'^awards/$',
PMAView.as_view(
template_name='awards.html',
title='Awards',
),
name='awards'
),
url(
r'^about/$',
PMAView.as_view(
template_name='about.html',
title='About',
),
name='about'
),
url(
r'^15-years/$',
PMAView.as_view(
template_name='15-years.html',
title='15 years',
),
name='15-years'
),
url(
r'^donate/$',
PMAView.as_view(
template_name='donate.html',
title='Donate',
),
name='donate'
),
url(
r'^about-website/$',
PMAView.as_view(
template_name='about-website.html',
title='About website',
),
name='about-website'
),
url(
r'^downloads/$',
PMAView.as_view(
template_name='downloads.html',
title='Downloads',
rss='feed-files',
rss_title='phpMyAdmin releases',
),
name='downloads'
),
url(
r'^translate/$',
PMAView.as_view(
template_name='translate.html',
title='Translating',
rss=TRANSLATIONS_RSS,
rss_title='phpMyAdmin translation changes',
),
name='translate'
),
url(
r'^develop/$',
PMAView.as_view(
template_name='develop.html',
title='Developing',
),
name='develop'
),
url(
r'^contest/$',
PMAView.as_view(
template_name='contest.html',
title='Contest',
),
name='contest'
),
url(
r'^files/$',
ReleaseList.as_view(),
name='files'
),
url(
r'^files/(?P<version>[a-z0-9.-]*)/$',
ReleaseDetail.as_view(),
name='release'
),
# Swekey link from our documentation
url(
r'auth_key',
RedirectView.as_view(
pattern_name='home',
permanent=True,
)
),
# favicon.ico
url(
r'^(?:home_page/)favicon\.ico$',
RedirectView.as_view(
url='/static/favicon.ico',
permanent=True,
)
),
# robots.txt
url(
r'^robots.txt$',
TemplateView.as_view(
template_name='robots.txt',
content_type='text/plain'
)
),
# Machine parsable output
url(
r'^home_page/phpmyadmin.xml$',
TemplateView.as_view(
template_name='phpmyadmin.xml',
content_type='application/xml'
),
name='pad',
),
url(
r'^home_page/phpmyadmin-doap.xml$',
TemplateView.as_view(
template_name='phpmyadmin-doap.xml',
content_type='application/xml'
),
name='doap',
),
# Version information
url(
r'^(home_page/)?(latest|version)\.(php|txt)$',
TemplateView.as_view(
template_name='version/version.txt',
content_type='text/plain'
)
),
url(
r'^downloads/list\.txt$',
TemplateView.as_view(
template_name='version/list.txt',
content_type='text/plain'
)
),
url(
r'^downloads/phpMyAdmin-latest-'
r'(?P<flavor>all-languages|english)'
r'(?P<extension>\.zip|\.tar\.gz|\.tar\.xz|\.7z|\.tar\.bz2)'
r'(?P<checksum>\.asc|\.sha256)?$',
latest_download,
name='latest-download'
),
url(
r'^downloads/phpMyAdmin-latest-'
r'(?P<flavor>source)'
r'(?P<extension>\.tar\.xz)'
r'(?P<checksum>\.asc|\.sha256)?$',
latest_download,
name='latest-download'
),
url(
r'^(home_page/)?version\.js$',
TemplateView.as_view(
template_name='version/version.js',
content_type='application/javascript'
)
),
url(
r'^(?:home_page/)?version\.json$',
version_json,
),
# GiHub API proxy
url(
r'^api/commit/(?P<name>[a-f0-9]{40})/',
github_commit,
),
url(
r'^api/tree/(?P<name>[a-zA-Z0-9_]*)/',
github_tree,
),
# Composer packages
url(
r'^packages\.json$',
TemplateView.as_view(
template_name='version/packages.json',
content_type='application/json'
)
),
# Test backend
url(
r'^test/data$',
TemplateView.as_view(
template_name='test-data',
content_type='text/plain'
)
),
# Compatibility redirects
url(
r'^(?:documentation/changelog.php|[cC]hange[Ll]og.txt|ANNOUNCE.txt)',
RedirectView.as_view(
url='https://demo.phpmyadmin.net/master-config/changelog.php',
permanent=True,
)
),
url(
r'^documentation/scripts/setup.php$',
RedirectView.as_view(
url='https://demo.phpmyadmin.net/master-config/setup/',
permanent=True,
)
),
url(
r'^phpdoc',
RedirectView.as_view(
url='https://develdocs.phpmyadmin.net/',
permanent=True,
)
),
url(
r'^(?:phpMyAdmin/)?Documentation.html$',
RedirectView.as_view(
url='https://docs.phpmyadmin.net/',
permanent=True,
)
),
url(
r'^(?:documentation|pma_localized_docs|localized_|manual/)',
RedirectView.as_view(
url='https://docs.phpmyadmin.net/',
permanent=True,
)
),
url(
r'^(?:snapshot|cvs)',
RedirectView.as_view(
url='https://github.com/phpmyadmin/phpmyadmin/',
permanent=True,
)
),
url(
r'^old-stuff/$',
RedirectView.as_view(
url='https://github.com/phpmyadmin/history',
permanent=True,
)
),
url(
r'^old-stuff/ChangeLogs/',
RedirectView.as_view(
url='https://github.com/phpmyadmin/history/tree/master/ChangeLogs',
permanent=True,
)
),
url(
r'^home_page/security/(?:index.php)?$',
RedirectView.as_view(
pattern_name='security',
permanent=True,
)
),
url(
r'^home_page/security/index.xml$',
RedirectView.as_view(
pattern_name='feed-security',
permanent=True,
)
),
url(
r'^home[_ ]?page/' +
r'(?:security/PMASA|security/pmasa|\.\.\.ASA)-'
r'(?P<year>20[0-9][0-9])-(?P<sequence>[0-9]+)\)?(\.php.*)?$',
RedirectView.as_view(
pattern_name='security-issue',
permanent=True,
)
),
url(
r'^home[_ ]?page/security\.php$',
redirect_security,
),
url(
r'^home[_ ]?page/$',
RedirectView.as_view(
pattern_name='home',
permanent=True,
)
),
url(
r'^search/$',
RedirectView.as_view(
pattern_name='home',
permanent=True,
)
),
url(
r'^home_page/sitemap\.xml$',
RedirectView.as_view(
pattern_name='sitemap',
permanent=True,
)
),
url(
r'^home[_ /]?page/(?P<page>[a-z0-9-]*)\.php(.*)?$',
redirect_home_page,
),
url(
r'gophp5',
RedirectView.as_view(
pattern_name='home',
permanent=True,
)
),
url(
r'cgi-bin/mailman/listinfo/mailman',
RedirectView.as_view(
url='https://lists.phpmyadmin.net/',
permanent=True,
)
),
# Some weird URLs seen in wild
url(
r'^news/&.*',
RedirectView.as_view(
pattern_name='news',
permanent=True,
)
),
url(
r'^(?:download|files/\*/|downloads/\.PhpMyAdmin)$',
RedirectView.as_view(
pattern_name='download',
permanent=True,
)
),
url(
r'^(?:https?://www\.phpmyadmin\.net/|index\.html|' +
r'logout|auth|login|auth_|auth%5C_key|' +
r'SignonURL.*|logoutURL.*|' +
r'default\.htm|home|\&lang=en.*|phpMyAdmin.*|[0-9.]+)$',
RedirectView.as_view(
pattern_name='home',
permanent=True,
)
),
# Admin interface
url(r'^admin/', include(admin.site.urls)),
]
|
mraza007/Pizza-or-Not-a-Pizza | lib/python3.5/site-packages/clarifai/client/sample_main.py | # -*- coding: utf-8 -*-
import os
import sys
from clarifai.client import ClarifaiApi
def tag_images_in_directory(path, api):
images = []
path = path.rstrip(os.sep)
for fname in os.listdir(path):
images.append((open(os.path.join(path, fname), 'rb'), fname))
return api.tag_images(images)
def main(argv):
if len(argv) > 1:
imageurl = argv[1]
else:
imageurl = 'http://clarifai-img.s3.amazonaws.com/test/toddler-flowers.jpeg'
api = ClarifaiApi()
if imageurl.startswith('http'):
response = api.tag_image_urls(imageurl)
elif os.path.isdir(imageurl):
response = tag_images_in_directory(imageurl, api)
elif os.path.isfile(imageurl):
with open(imageurl,'rb') as image_file:
response = api.tag_images(image_file)
else:
raise Exception("Must input url, directory path, or file path")
print(response)
if __name__ == '__main__':
main(sys.argv)
|
mraza007/Pizza-or-Not-a-Pizza | app.py | from flask import Flask, request
from twilio.twiml.messaging_response import MessagingResponse
from pizza import pizaa
app = Flask(__name__)
@app.route('/sms',methods=['POST'])
def sms_reply():
resp = MessagingResponse()
if request.form['NumMedia'] != '0':
image_url = request.form['MediaUrl0']
resp.message(pizaa(image_url))
else:
resp.message('Please Send an Pizza image. Or contact the Developer')
return str(resp)
if __name__ == '__main__':
app.run() |
mraza007/Pizza-or-Not-a-Pizza | lib/python3.5/site-packages/twilio/__init__.py |
__version_info__ = ('6', '10', '4')
__version__ = '.'.join(__version_info__)
|
mraza007/Pizza-or-Not-a-Pizza | pizza.py | <reponame>mraza007/Pizza-or-Not-a-Pizza<filename>pizza.py
from clarifai.rest import Image as ClImage
from clarifai.rest import ClarifaiApp
app = ClarifaiApp()
def pizaa(image_url):
model = app.models.get('food-items-v1.0')
image = ClImage(url=image_url)
response_data = model.predict([image])
concepts = response_data['outputs'][0]['data']['concepts']
concept_names = [concept['name'] for concept in concepts]
if 'pizza' in concept_names:
return "Its a Pizza"
return "Its Not a Pizza"
print(pizaa('https://images.duckduckgo.com/iu/?u=http%3A%2F%2F4.bp.blogspot.com%2F-O6kZHGQZOA8%2FUaatXgh95CI%2FAAAAAAAAAgg%2Fnq9ogwSJ-Ks%2Fs1600%2FPizza.jpg&f=1')) |
YashG2002/Tomato-Disease-Detection | resnet_transfer_optimizer.py | from __future__ import print_function
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Input, AveragePooling2D
from keras.models import Model
from keras.layers.core import Dense, Activation, Flatten, Dropout
import h5py
import matplotlib.pyplot as plt
import pickle
import keras
import numpy as np
from keras import optimizers
import sklearn.metrics as sklm
import tensorflow as tf
import keras.backend as K
import os
from keras.utils.np_utils import to_categorical
# Get number of classes
ls1=os.listdir('color')
if '.DS_Store' in ls1:
ls1.remove('.DS_Store')
dic1={}
for idx,i in enumerate(ls1):
dic1[i]=idx
#F1 through callback
class Metrics(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.confusion = []
self.precision = []
self.recall = []
self.f1s = []
def on_epoch_end(self, epoch, logs={}):
score = np.asarray(self.model.predict(self.validation_data[0]))
predict = np.round(np.asarray(self.model.predict(self.validation_data[0])))
targ = self.validation_data[1]
self.f1s.append(sklm.f1_score(targ, predict,average='micro'))
self.confusion.append(sklm.confusion_matrix(targ.argmax(axis=1),predict.argmax(axis=1)))
return
# Loading saved predicted X and y
def load_bottleneck_data(training_file, validation_file):
h5f = h5py.File('bftx_resnet.h5', 'r')
X_train2 = h5f['bftx'][:]
h5f.close()
h5f = h5py.File('bfvx_resnet.h5', 'r')
X_val2 = h5f['bfvx'][:]
h5f.close()
with open('bfty_resnet.pkl', 'rb') as f:
y_train2 = pickle.load(f)
with open('bfvy_resnet.pkl', 'rb') as f:
y_val2 = pickle.load(f)
return X_train2, y_train2, X_val2, y_val2
# Calling the above function to load saved data
X_train, y_train, X_val, y_val = load_bottleneck_data('resnet_train_bottleneck.json',
'resnet_validate_bottleneck.json')
y_train = to_categorical(y_train, num_classes=10)
y_val = to_categorical(y_val,num_classes = 10)
# input image dimensions
img_rows, img_cols = 256, 256
h = 224
w = 224
ch = 3
#HYPERPARAMETERS
batch_size = 128
num_classes = len(dic1)
epochs = 20
#Model
def create_model_resnet():
input_tensor = Input(shape=(h, w, ch))
model = ResNet50(input_tensor=input_tensor, include_top=False)
# x = model.output
# x = Dropout()(x)
# model = Model(model.input,x)
return model
#Adding final layer
print(X_train.shape)
exit()
input_shape = X_train.shape[1:]
inp = Input(shape=input_shape)
x = Flatten()(inp)
x = Dense(num_classes, activation='softmax')(x)
model = Model(inp, x)
sgd = optimizers.SGD(lr=0.0007, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer= sgd, loss='categorical_crossentropy', metrics=['accuracy'])
metrics = Metrics()
with tf.Session() as sess:
# fetch session so Keras API can work
K.set_session(sess)
K.set_learning_phase(1)
history =model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,
validation_data=(X_val, y_val), shuffle=True, verbose=1,callbacks=[metrics] )
print(metrics.f1s)
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
print(history.history['acc'])
# f1_mean = history.history['f1']
# val_f1 = history.history['val_f1']
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train ' + str(acc[-1]), 'test ' + str(val_acc[-1])], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train ' + str(loss[-1]), 'test ' + str(val_loss[-1])], loc='upper left')
plt.show()
# # summarize history for f1
plt.plot(metrics.f1s)
# plt.plot(history.history['val_f1'])
plt.title('f1 mean score')
plt.ylabel('f1')
plt.xlabel('epoch')
plt.legend(['val ' + str(metrics.f1s[-1])], loc='upper left')
plt.show()
print(metrics.confusion[-1])
ans = input("Do you want to save the weights?")
if ans == 'y':
model.save_weights('resnet_bottleneck_weights_temp.h5')
print("Saved")
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
print("Reach End \n") |
YashG2002/Tomato-Disease-Detection | resnet_transfer.py | <filename>resnet_transfer.py
from __future__ import print_function
from keras.applications.resnet50 import ResNet50, preprocess_input
from keras.layers import Input, AveragePooling2D
from sklearn.model_selection import train_test_split
from keras.models import Model
from keras.layers.core import Dense, Activation, Flatten, Dropout
import math
from sklearn.metrics import f1_score
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
from keras.regularizers import l2
import codecs
import h5py
import matplotlib.pyplot as plt
import pickle
from keras import optimizers
import tensorflow as tf
import keras.backend as K
import sys
import json
import os
ls1=os.listdir('color')
if '.DS_Store' in ls1:
ls1.remove('.DS_Store')
dic1={}
for idx,i in enumerate(ls1):
dic1[i]=idx
import scipy.misc as sm
import numpy as np
count=0
# for idx,i in enumerate(ls1):
# dic1[i]=idx
# ls2=os.listdir('color/'+i)
# if '.DS_Store' in ls2:
# ls2.remove('.DS_Store')
# for j in ls2:
# #im1=np.asarray(sm.imread('color/'+i+'/'+j))
# #temp=np.zeros((len(im1),len(im1[0]),len(im1[0][0]) ))
# count=count+1
# print("Reach 1 \n")
# ls1=os.listdir('color')
# if '.DS_Store' in ls1:
# ls1.remove('.DS_Store')
# dic1={}
# X=np.zeros((count,256,256,3))
# Y=np.zeros((count,1))
# vap=0
#
# for idx,i in enumerate(ls1):
# dic1[i]=idx
# ls2=os.listdir('color/'+i)
# if '.DS_Store' in ls2:
# ls2.remove('.DS_Store')
# for j in ls2:
# im1=np.asarray(sm.imread('color/'+i+'/'+j))
# print(str(im1.shape)+ " "+ i + " " + j)
# X[vap,:,:,:]=im1
# Y[vap,0]=idx
# #temp=np.zeros((len(im1),len(im1[0]),len(im1[0][0]) ))
# vap=vap+1
def f1(y_true, y_pred):
sess1 = tf.InteractiveSession()
y_true = tf.cast(y_true, "int32")
y_pred = tf.cast(tf.round(y_pred), "int32") # implicit 0.5 threshold via tf.round
y_correct = y_true * y_pred
sum_true = tf.reduce_sum(y_true, axis=1)
sum_pred = tf.reduce_sum(y_pred, axis=1)
sum_correct = tf.reduce_sum(y_correct, axis=1)
precision = sum_correct / (sum_pred)
recall = sum_correct / (sum_true )
f_score = precision * recall / (precision + recall)
f_score = tf.where(tf.is_nan(f_score), tf.zeros_like(f_score), f_score)
return tf.reduce_mean(f_score)
h5f = h5py.File('variables.h5','r')
X = h5f['X'][:]
print("Reach 1")
Y = h5f['Y'][:]
print("Reach 2 \n")
batch_size = 128
num_classes = len(dic1)
epochs = 30
# input image dimensions
img_rows, img_cols = 256, 256
h = 224
w = 224
ch = 3
print("Reach 2.5 \n")
#tensor. will receive cifar10 images as input, gets passed to resize_images
img_placeholder = tf.placeholder("uint8", (None, 256, 256, 3))
#tensor. resized images. gets passed into Session()
resize_op = tf.image.resize_images(img_placeholder, (h, w), method=0)
# create a generator for batch processing
# this gen is written as if you could run through ALL of the data
# AWS instance doesn't have enough memory to hold the entire training bottleneck in memory
# so we will call for 10000 samples when we call it
def gen(session, data, labels, batch_size):
def _f():
start = 0
end = start + batch_size
n = data.shape[0]
max_iter = math.ceil(n/batch_size)
while True:
# run takes in a tensor/function and performs it.
# almost always, that function will take a Tensor as input
# when run is called, it takes a feed_dict param which translates
# Tensors into actual data/integers/floats/etc
# this is so you can write a network and only have to change the
# data being passed in one place instead of everywhere
for i in range(0,max_iter):
# X_batch is resized
X_batch = session.run(resize_op, {img_placeholder: data[start:end]})
# X_batch is normalized
X_batch = preprocess_input(X_batch)
y_batch = labels[start:end]
start += batch_size
end += batch_size
if start >= n:
# start = 0
# end = batch_size
print("Bottleneck predictions completed.")
# break
yield (X_batch, y_batch)
return _f
def create_model_resnet():
input_tensor = Input(shape=(h, w, ch))
model = ResNet50(input_tensor=input_tensor, include_top=False)
return model
print("Reach 2.8 \n")
X_train1, X_val1, y_train1, y_val1 = train_test_split(X, Y, test_size=0.3, random_state=0,shuffle=True)
print("Reach 3 \n")
print(X_train1.shape)
print(y_train1.shape)
with tf.Session() as sess:
K.set_session(sess)
K.set_learning_phase(1)
model = create_model_resnet()
train_gen = gen(sess, X_train1, y_train1, 64)
bottleneck_features_train = model.predict_generator(train_gen(), 2000)
print("conv to train list")
# bottleneck_features_train_list = bottleneck_features_train.tolist()
print("conv to train list complete")
print(bottleneck_features_train.shape)
bftx = h5py.File('bftx_resnet.h5', 'w')
bftx.create_dataset('bftx',data = bottleneck_features_train)
bftx.close()
with open('bfty_resnet.pkl', 'wb') as f:
pickle.dump(y_train1, f)
# datax = {'features': bottleneck_features_train, 'labels': y_train1}
filepathx = 'resnet_train_bottleneck.json'
# save_as_pickled_object(data, filepathx)
# json.dump(data, codecs.open('resnet_train_bottleneck.json', 'w',encoding='utf-8'),separators=(',', ':'))
print("json train dump complete")
# h5y = h5py.File('resnet_train_bottleneck.h5', 'w')
# h5y.create_dataset('data',data = data)
val_gen = gen(sess, X_val1, y_val1, batch_size)
bottleneck_features_validation = model.predict_generator(val_gen(), 2000)
print("conv to val list")
# bottleneck_features_validation_list = bottleneck_features_validation.tolist()
print("conv to val list complete")
bfvx = h5py.File('bfvx_resnet.h5', 'w')
bfvx.create_dataset('bfvx',data = bottleneck_features_validation)
bfvx.close()
with open('bfvy_resnet.pkl', 'wb') as f:
pickle.dump(y_val1, f)
# datay = {'features': bottleneck_features_validation, 'labels': y_val1}
filepathy = 'resnet_validate_bottleneck.json'
# save_as_pickled_object(data, filepathy)
# json.dump(data, codecs.open('resnet_validate_bottleneck.json', 'w',encoding='utf-8'),separators=(',', ':'))
print("json val dump complete")
# h5x = h5py.File('resnet_validate_bottleneck.h5', 'w')
# h5x.create_dataset('data', data=data)
print("Reach 4 \n")
def load_bottleneck_data(training_file, validation_file):
# train_data = try_to_load_as_pickled_object_or_None(training_file)
# validation_data = try_to_load_as_pickled_object_or_None(training_file)
# obj_text = codecs.open(training_file, 'r', encoding='utf-8').read()
# train_data = json.loads(obj_text)
# obj_text = codecs.open(validation_file, 'r', encoding='utf-8').read()
# validation_data = json.loads(obj_text)
# train_data = json.load(data, open(training_file, 'r'))
# validation_data = json.load(data, open(validation_file, 'r'))
# X_train = np.array(train_data['features'])
# y_train = train_data['labels']
# X_val = np.array(validation_data['features'])
# y_val = validation_data['labels']
h5f = h5py.File('bftx_resnet.h5', 'r')
X_train2 = h5f['bftx'][:]
h5f.close()
h5f = h5py.File('bfvx_resnet.h5', 'r')
X_val2 = h5f['bfvx'][:]
h5f.close()
with open('bfty_resnet.pkl', 'rb') as f:
y_train2 = pickle.load(f)
with open('bfvy_resnet.pkl', 'rb') as f:
y_val2 = pickle.load(f)
# X_train2 = datax['features']
# y_train2 = datax['labels']
# X_val2 = datay['features']
# y_val2 = datay['labels']
return X_train2, y_train2, X_val2, y_val2
X_train, y_train, X_val, y_val = load_bottleneck_data('resnet_train_bottleneck.json',
'resnet_validate_bottleneck.json')
input_shape = X_train.shape[1:]
inp = Input(shape=input_shape)
x = Flatten()(inp)
x = Dense(num_classes, activation='softmax')(x)
model = Model(inp, x)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics=['accuracy',f1])
with tf.Session() as sess:
# fetch session so Keras API can work
K.set_session(sess)
K.set_learning_phase(1)
history =model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,
validation_data=(X_val, y_val), shuffle=True, verbose=1 )
model.save_weights('resnet_bottleneck_weights.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train ' + str(acc[-1]), 'test ' + str(val_acc[-1])], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train ' + str(loss[-1]), 'test ' + str(val_loss[-1])], loc='upper left')
plt.show()
f1_mean = history.history['f1']
val_f1 = history.history['val_f1']
# summarize history for f1
plt.plot(history.history['f1'])
plt.plot(history.history['val_f1'])
plt.title('f1 mean score')
plt.ylabel('f1')
plt.xlabel('epoch')
plt.legend(['train ' + str(f1_mean[-1]), 'test ' + str(val_f1[-1])], loc='upper left')
plt.show()
dir_path = os.path.dirname(os.path.realpath(__file__))
print(dir_path)
print("Reach End \n") |
YashG2002/Tomato-Disease-Detection | data_preprocessing.py | <reponame>YashG2002/Tomato-Disease-Detection
from __future__ import print_function
import cv2
import pickle
import h5py
import os
ls1=os.listdir('color')
if '.DS_Store' in ls1:
ls1.remove('.DS_Store')
print(ls1)
dic1={}
import numpy as np
count=0
for idx,i in enumerate(ls1):
dic1[i]=idx
ls2=os.listdir('color/'+i)
if '.DS_Store' in ls2:
ls2.remove('.DS_Store')
for j in ls2:
#im1=np.asarray(sm.imread('color/'+i+'/'+j))
#temp=np.zeros((len(im1),len(im1[0]),len(im1[0][0]) ))
count=count+1
print("Reach 1 \n")
ls1=os.listdir('color')
if '.DS_Store' in ls1:
ls1.remove('.DS_Store')
dic1={}
X=np.zeros((count,256,256,3))
Y=np.zeros((count,1))
vap=0
for idx,i in enumerate(ls1):
dic1[i]=idx
ls2=os.listdir('color/'+i)
if '.DS_Store' in ls2:
ls2.remove('.DS_Store')
for idx2,j in enumerate(ls2):
print(str(idx) + " " + i + " " + str(idx2) + " " + j)
X[vap, :, :, :]=cv2.imread('color/'+i+'/'+j)
Y[vap,0]=idx
#temp=np.zeros((len(im1),len(im1[0]),len(im1[0][0]) ))
vap=vap+1
print(X.shape)
h5f = h5py.File("variables.h5",'w')
h5f.create_dataset("Y",data = Y)
print("Y done")
h5f.create_dataset("X",data = X)
print("X done")
# with open("variables.pickle",'wb') as f:
# print("Save Variables")
# pickle.dump(Y,f,pickle.HIGHEST_PROTOCOL)
# print("Y done")
# pickle.dump(X,f,pickle.HIGHEST_PROTOCOL)
# print("X done")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/waterfall/_connector.py | import _plotly_utils.basevalidators
class ConnectorValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="connector", parent_name="waterfall", **kwargs):
super(ConnectorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Connector"),
data_docs=kwargs.pop(
"data_docs",
"""
line
:class:`plotly.graph_objects.waterfall.connecto
r.Line` instance or dict with compatible
properties
mode
Sets the shape of connector lines.
visible
Determines if connector lines are drawn.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/indexes/timedeltas/test_shift.py | import pytest
from pandas.errors import NullFrequencyError
import pandas as pd
from pandas import TimedeltaIndex
import pandas._testing as tm
class TestTimedeltaIndexShift:
# -------------------------------------------------------------
# TimedeltaIndex.shift is used by __add__/__sub__
def test_tdi_shift_empty(self):
# GH#9903
idx = pd.TimedeltaIndex([], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
tm.assert_index_equal(idx.shift(3, freq="H"), idx)
def test_tdi_shift_hours(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="H"), idx)
exp = pd.TimedeltaIndex(["8 hours", "9 hours", "12 hours"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="H"), exp)
exp = pd.TimedeltaIndex(["2 hours", "3 hours", "6 hours"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="H"), exp)
def test_tdi_shift_minutes(self):
# GH#9903
idx = pd.TimedeltaIndex(["5 hours", "6 hours", "9 hours"], name="xxx")
tm.assert_index_equal(idx.shift(0, freq="T"), idx)
exp = pd.TimedeltaIndex(["05:03:00", "06:03:00", "9:03:00"], name="xxx")
tm.assert_index_equal(idx.shift(3, freq="T"), exp)
exp = pd.TimedeltaIndex(["04:57:00", "05:57:00", "8:57:00"], name="xxx")
tm.assert_index_equal(idx.shift(-3, freq="T"), exp)
def test_tdi_shift_int(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(1)
expected = TimedeltaIndex(
[
"1 days 01:00:00",
"2 days 01:00:00",
"3 days 01:00:00",
"4 days 01:00:00",
"5 days 01:00:00",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_tdi_shift_nonstandard_freq(self):
# GH#8083
tdi = pd.to_timedelta(range(5), unit="d")
trange = tdi._with_freq("infer") + pd.offsets.Hour(1)
result = trange.shift(3, freq="2D 1s")
expected = TimedeltaIndex(
[
"6 days 01:00:03",
"7 days 01:00:03",
"8 days 01:00:03",
"9 days 01:00:03",
"10 days 01:00:03",
],
freq="D",
)
tm.assert_index_equal(result, expected)
def test_shift_no_freq(self):
# GH#19147
tdi = TimedeltaIndex(["1 days 01:00:00", "2 days 01:00:00"], freq=None)
with pytest.raises(NullFrequencyError, match="Cannot shift with no freq"):
tdi.shift(2)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_histogram.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
from plotly.graph_objs import Histogram
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/_plotly_utils/importers.py | <filename>env/lib/python3.8/site-packages/_plotly_utils/importers.py
import importlib
def relative_import(parent_name, rel_modules=(), rel_classes=()):
"""
Helper function to import submodules lazily in Python 3.7+
Parameters
----------
rel_modules: list of str
list of submodules to import, of the form .submodule
rel_classes: list of str
list of submodule classes/variables to import, of the form ._submodule.Foo
Returns
-------
tuple
Tuple that should be assigned to __all__, __getattr__ in the caller
"""
module_names = {rel_module.split(".")[-1]: rel_module for rel_module in rel_modules}
class_names = {rel_path.split(".")[-1]: rel_path for rel_path in rel_classes}
def __getattr__(import_name):
# In Python 3.7+, lazy import submodules
# Check for submodule
if import_name in module_names:
rel_import = module_names[import_name]
return importlib.import_module(rel_import, parent_name)
# Check for submodule class
if import_name in class_names:
rel_path_parts = class_names[import_name].split(".")
rel_module = ".".join(rel_path_parts[:-1])
class_name = import_name
class_module = importlib.import_module(rel_module, parent_name)
return getattr(class_module, class_name)
raise AttributeError(
"module {__name__!r} has no attribute {name!r}".format(
name=import_name, __name__=parent_name
)
)
__all__ = list(module_names) + list(class_names)
def __dir__():
return __all__
return __all__, __getattr__, __dir__
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_at_time.py | <gh_stars>100-1000
from datetime import time
import numpy as np
import pytest
from pandas._libs.tslibs import timezones
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestAtTime:
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_localized_at_time(self, tzstr):
tz = timezones.maybe_get_tz(tzstr)
rng = date_range("4/16/2012", "5/1/2012", freq="H")
ts = Series(np.random.randn(len(rng)), index=rng)
ts_local = ts.tz_localize(tzstr)
result = ts_local.at_time(time(10, 0))
expected = ts.at_time(time(10, 0)).tz_localize(tzstr)
tm.assert_series_equal(result, expected)
assert timezones.tz_compare(result.index.tz, tz)
def test_at_time(self):
rng = date_range("1/1/2000", "1/5/2000", freq="5min")
ts = Series(np.random.randn(len(rng)), index=rng)
rs = ts.at_time(rng[1])
assert (rs.index.hour == rng[1].hour).all()
assert (rs.index.minute == rng[1].minute).all()
assert (rs.index.second == rng[1].second).all()
result = ts.at_time("9:30")
expected = ts.at_time(time(9, 30))
tm.assert_series_equal(result, expected)
df = DataFrame(np.random.randn(len(rng), 3), index=rng)
result = ts[time(9, 30)]
result_df = df.loc[time(9, 30)]
expected = ts[(rng.hour == 9) & (rng.minute == 30)]
exp_df = df[(rng.hour == 9) & (rng.minute == 30)]
result.index = result.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_frame_equal(result_df, exp_df)
chunk = df.loc["1/4/2000":]
result = chunk.loc[time(9, 30)]
expected = result_df[-1:]
# Without resetting the freqs, these are 5 min and 1440 min, respectively
result.index = result.index._with_freq(None)
expected.index = expected.index._with_freq(None)
tm.assert_frame_equal(result, expected)
# midnight, everything
rng = date_range("1/1/2000", "1/31/2000")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.at_time(time(0, 0))
tm.assert_series_equal(result, ts)
# time doesn't exist
rng = date_range("1/1/2012", freq="23Min", periods=384)
ts = Series(np.random.randn(len(rng)), rng)
rs = ts.at_time("16:00")
assert len(rs) == 0
def test_at_time_raises(self):
# GH20725
ser = Series("a b c".split())
msg = "Index must be DatetimeIndex"
with pytest.raises(TypeError, match=msg):
ser.at_time("00:00")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/scattergl/error_y/__init__.py | <gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._width import WidthValidator
from ._visible import VisibleValidator
from ._valueminus import ValueminusValidator
from ._value import ValueValidator
from ._type import TypeValidator
from ._tracerefminus import TracerefminusValidator
from ._traceref import TracerefValidator
from ._thickness import ThicknessValidator
from ._symmetric import SymmetricValidator
from ._color import ColorValidator
from ._arraysrc import ArraysrcValidator
from ._arrayminussrc import ArrayminussrcValidator
from ._arrayminus import ArrayminusValidator
from ._array import ArrayValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._width.WidthValidator",
"._visible.VisibleValidator",
"._valueminus.ValueminusValidator",
"._value.ValueValidator",
"._type.TypeValidator",
"._tracerefminus.TracerefminusValidator",
"._traceref.TracerefValidator",
"._thickness.ThicknessValidator",
"._symmetric.SymmetricValidator",
"._color.ColorValidator",
"._arraysrc.ArraysrcValidator",
"._arrayminussrc.ArrayminussrcValidator",
"._arrayminus.ArrayminusValidator",
"._array.ArrayValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/cone/colorbar/_tickformatstopdefaults.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="cone.colorbar",
**kwargs
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/scatter3d/marker/_symbol.py | <gh_stars>10-100
import _plotly_utils.basevalidators
class SymbolValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="symbol", parent_name="scatter3d.marker", **kwargs):
super(SymbolValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop(
"values",
[
"circle",
"circle-open",
"square",
"square-open",
"diamond",
"diamond-open",
"cross",
"x",
],
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/indicator/_gauge.py | <gh_stars>1000+
import _plotly_utils.basevalidators
class GaugeValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="gauge", parent_name="indicator", **kwargs):
super(GaugeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Gauge"),
data_docs=kwargs.pop(
"data_docs",
"""
axis
:class:`plotly.graph_objects.indicator.gauge.Ax
is` instance or dict with compatible properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the
gauge.
borderwidth
Sets the width (in px) of the border enclosing
the gauge.
shape
Set the shape of the gauge
steps
A tuple of :class:`plotly.graph_objects.indicat
or.gauge.Step` instances or dicts with
compatible properties
stepdefaults
When used in a template (as layout.template.dat
a.indicator.gauge.stepdefaults), sets the
default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Th
reshold` instance or dict with compatible
properties
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/plotting/test_boxplot_method.py | import itertools
import string
import numpy as np
from numpy import random
import pytest
import pandas.util._test_decorators as td
from pandas import DataFrame, MultiIndex, Series, date_range, timedelta_range
import pandas._testing as tm
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
import pandas.plotting as plotting
""" Test cases for .boxplot method """
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
@pytest.mark.slow
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
@pytest.mark.slow
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
@pytest.mark.slow
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.boxplot(return_type="NOTATYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
@pytest.mark.slow
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
@pytest.mark.slow
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
dict(boxes="r", whiskers="b", medians="g", caps="c"),
dict(boxes="r", whiskers="b", medians="g", caps="c"),
),
(dict(boxes="r"), dict(boxes="r")),
("r", dict(boxes="r", whiskers="r", medians="r", caps="r")),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[(dict(boxes="r", invalid_key="r"), "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: dict(color="C1")}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
@pytest.mark.slow
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
@pytest.mark.slow
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
@pytest.mark.slow
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("gender").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("classroom").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes["height"]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes["weight"], axes["category"]]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, 2),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, -1),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(4, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(-1, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby("classroom").boxplot( # noqa
column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
@pytest.mark.slow
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/utils.py | from __future__ import absolute_import, division
import textwrap
from pprint import PrettyPrinter
from _plotly_utils.utils import *
# Pretty printing
def _list_repr_elided(v, threshold=200, edgeitems=3, indent=0, width=80):
"""
Return a string representation for of a list where list is elided if
it has more than n elements
Parameters
----------
v : list
Input list
threshold :
Maximum number of elements to display
Returns
-------
str
"""
if isinstance(v, list):
open_char, close_char = "[", "]"
elif isinstance(v, tuple):
open_char, close_char = "(", ")"
else:
raise ValueError("Invalid value of type: %s" % type(v))
if len(v) <= threshold:
disp_v = v
else:
disp_v = list(v[:edgeitems]) + ["..."] + list(v[-edgeitems:])
v_str = open_char + ", ".join([str(e) for e in disp_v]) + close_char
v_wrapped = "\n".join(
textwrap.wrap(
v_str,
width=width,
initial_indent=" " * (indent + 1),
subsequent_indent=" " * (indent + 1),
)
).strip()
return v_wrapped
class ElidedWrapper(object):
"""
Helper class that wraps values of certain types and produces a custom
__repr__() that may be elided and is suitable for use during pretty
printing
"""
def __init__(self, v, threshold, indent):
self.v = v
self.indent = indent
self.threshold = threshold
@staticmethod
def is_wrappable(v):
numpy = get_module("numpy")
if isinstance(v, (list, tuple)) and len(v) > 0 and not isinstance(v[0], dict):
return True
elif numpy and isinstance(v, numpy.ndarray):
return True
elif isinstance(v, str):
return True
else:
return False
def __repr__(self):
numpy = get_module("numpy")
if isinstance(self.v, (list, tuple)):
# Handle lists/tuples
res = _list_repr_elided(
self.v, threshold=self.threshold, indent=self.indent
)
return res
elif numpy and isinstance(self.v, numpy.ndarray):
# Handle numpy arrays
# Get original print opts
orig_opts = numpy.get_printoptions()
# Set threshold to self.max_list_elements
numpy.set_printoptions(
**dict(orig_opts, threshold=self.threshold, edgeitems=3, linewidth=80)
)
res = self.v.__repr__()
# Add indent to all but the first line
res_lines = res.split("\n")
res = ("\n" + " " * self.indent).join(res_lines)
# Restore print opts
numpy.set_printoptions(**orig_opts)
return res
elif isinstance(self.v, str):
# Handle strings
if len(self.v) > 80:
return "(" + repr(self.v[:30]) + " ... " + repr(self.v[-30:]) + ")"
else:
return self.v.__repr__()
else:
return self.v.__repr__()
class ElidedPrettyPrinter(PrettyPrinter):
"""
PrettyPrinter subclass that elides long lists/arrays/strings
"""
def __init__(self, *args, **kwargs):
self.threshold = kwargs.pop("threshold", 200)
PrettyPrinter.__init__(self, *args, **kwargs)
def _format(self, val, stream, indent, allowance, context, level):
if ElidedWrapper.is_wrappable(val):
elided_val = ElidedWrapper(val, self.threshold, indent)
return self._format(elided_val, stream, indent, allowance, context, level)
else:
return PrettyPrinter._format(
self, val, stream, indent, allowance, context, level
)
def node_generator(node, path=()):
"""
General, node-yielding generator.
Yields (node, path) tuples when it finds values that are dict
instances.
A path is a sequence of hashable values that can be used as either keys to
a mapping (dict) or indices to a sequence (list). A path is always wrt to
some object. Given an object, a path explains how to get from the top level
of that object to a nested value in the object.
:param (dict) node: Part of a dict to be traversed.
:param (tuple[str]) path: Defines the path of the current node.
:return: (Generator)
Example:
>>> for node, path in node_generator({'a': {'b': 5}}):
... print(node, path)
{'a': {'b': 5}} ()
{'b': 5} ('a',)
"""
if not isinstance(node, dict):
return # in case it's called with a non-dict node at top level
yield node, path
for key, val in node.items():
if isinstance(val, dict):
for item in node_generator(val, path + (key,)):
yield item
def get_by_path(obj, path):
"""
Iteratively get on obj for each key in path.
:param (list|dict) obj: The top-level object.
:param (tuple[str]|tuple[int]) path: Keys to access parts of obj.
:return: (*)
Example:
>>> figure = {'data': [{'x': [5]}]}
>>> path = ('data', 0, 'x')
>>> get_by_path(figure, path)
[5]
"""
for key in path:
obj = obj[key]
return obj
def decode_unicode(coll):
if isinstance(coll, list):
for no, entry in enumerate(coll):
if isinstance(entry, (dict, list)):
coll[no] = decode_unicode(entry)
else:
if isinstance(entry, str):
try:
coll[no] = str(entry)
except UnicodeEncodeError:
pass
elif isinstance(coll, dict):
keys, vals = list(coll.keys()), list(coll.values())
for key, val in zip(keys, vals):
if isinstance(val, (dict, list)):
coll[key] = decode_unicode(val)
elif isinstance(val, str):
try:
coll[key] = str(val)
except UnicodeEncodeError:
pass
coll[str(key)] = coll.pop(key)
return coll
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/extension/test_string.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/pandas/tests/extension/test_string.py
import string
import numpy as np
import pytest
import pandas as pd
from pandas.core.arrays.string_ import StringArray, StringDtype
from pandas.tests.extension import base
@pytest.fixture
def dtype():
return StringDtype()
@pytest.fixture
def data():
strings = np.random.choice(list(string.ascii_letters), size=100)
while strings[0] == strings[1]:
strings = np.random.choice(list(string.ascii_letters), size=100)
return StringArray._from_sequence(strings)
@pytest.fixture
def data_missing():
"""Length 2 array with [NA, Valid]"""
return StringArray._from_sequence([pd.NA, "A"])
@pytest.fixture
def data_for_sorting():
return StringArray._from_sequence(["B", "C", "A"])
@pytest.fixture
def data_missing_for_sorting():
return StringArray._from_sequence(["B", pd.NA, "A"])
@pytest.fixture
def na_value():
return pd.NA
@pytest.fixture
def data_for_grouping():
return StringArray._from_sequence(["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"])
class TestDtype(base.BaseDtypeTests):
pass
class TestInterface(base.BaseInterfaceTests):
pass
class TestConstructors(base.BaseConstructorsTests):
pass
class TestReshaping(base.BaseReshapingTests):
pass
class TestGetitem(base.BaseGetitemTests):
pass
class TestSetitem(base.BaseSetitemTests):
pass
class TestMissing(base.BaseMissingTests):
pass
class TestNoReduce(base.BaseNoReduceTests):
pass
class TestMethods(base.BaseMethodsTests):
@pytest.mark.skip(reason="returns nullable")
def test_value_counts(self, all_data, dropna):
return super().test_value_counts(all_data, dropna)
class TestCasting(base.BaseCastingTests):
pass
class TestComparisonOps(base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
result = getattr(s, op_name)(other)
expected = getattr(s.astype(object), op_name)(other).astype("boolean")
self.assert_series_equal(result, expected)
def test_compare_scalar(self, data, all_compare_operators):
op_name = all_compare_operators
s = pd.Series(data)
self._compare_other(s, data, op_name, "abc")
class TestParsing(base.BaseParsingTests):
pass
class TestPrinting(base.BasePrintingTests):
pass
class TestGroupBy(base.BaseGroupbyTests):
pass
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/updatemenu/__init__.py | import sys
if sys.version_info < (3, 7):
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._visible import VisibleValidator
from ._type import TypeValidator
from ._templateitemname import TemplateitemnameValidator
from ._showactive import ShowactiveValidator
from ._pad import PadValidator
from ._name import NameValidator
from ._font import FontValidator
from ._direction import DirectionValidator
from ._buttondefaults import ButtondefaultsValidator
from ._buttons import ButtonsValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
from ._active import ActiveValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._visible.VisibleValidator",
"._type.TypeValidator",
"._templateitemname.TemplateitemnameValidator",
"._showactive.ShowactiveValidator",
"._pad.PadValidator",
"._name.NameValidator",
"._font.FontValidator",
"._direction.DirectionValidator",
"._buttondefaults.ButtondefaultsValidator",
"._buttons.ButtonsValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
"._active.ActiveValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/violin/_meanline.py | import _plotly_utils.basevalidators
class MeanlineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="meanline", parent_name="violin", **kwargs):
super(MeanlineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Meanline"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the mean line color.
visible
Determines if a line corresponding to the
sample's mean is shown inside the violins. If
`box.visible` is turned on, the mean line is
drawn inside the inner box. Otherwise, the mean
line is drawn from one side of the violin to
other.
width
Sets the mean line width.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/mesh3d/_lighting.py | import _plotly_utils.basevalidators
class LightingValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="lighting", parent_name="mesh3d", **kwargs):
super(LightingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Lighting"),
data_docs=kwargs.pop(
"data_docs",
"""
ambient
Ambient light increases overall color
visibility but can wash out the image.
diffuse
Represents the extent that incident rays are
reflected in a range of angles.
facenormalsepsilon
Epsilon for face normals calculation avoids
math issues arising from degenerate geometry.
fresnel
Represents the reflectance as a dependency of
the viewing angle; e.g. paper is reflective
when viewing it from the edge of the paper
(almost 90 degrees), causing shine.
roughness
Alters specular reflection; the rougher the
surface, the wider and less contrasty the
shine.
specular
Represents the level that incident rays are
reflected in a single direction, causing shine.
vertexnormalsepsilon
Epsilon for vertex normals calculation avoids
math issues arising from degenerate geometry.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/image/_zmax.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class ZmaxValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="zmax", parent_name="image", **kwargs):
super(ZmaxValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "number", "editType": "calc"},
{"valType": "number", "editType": "calc"},
{"valType": "number", "editType": "calc"},
{"valType": "number", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_choropleth.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_choropleth.py
from plotly.graph_objs import Choropleth
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/choropleth/__init__.py | <gh_stars>10-100
import sys
if sys.version_info < (3, 7):
from ._zsrc import ZsrcValidator
from ._zmin import ZminValidator
from ._zmid import ZmidValidator
from ._zmax import ZmaxValidator
from ._zauto import ZautoValidator
from ._z import ZValidator
from ._visible import VisibleValidator
from ._unselected import UnselectedValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._stream import StreamValidator
from ._showscale import ShowscaleValidator
from ._showlegend import ShowlegendValidator
from ._selectedpoints import SelectedpointsValidator
from ._selected import SelectedValidator
from ._reversescale import ReversescaleValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._marker import MarkerValidator
from ._locationssrc import LocationssrcValidator
from ._locations import LocationsValidator
from ._locationmode import LocationmodeValidator
from ._legendgroup import LegendgroupValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hovertextsrc import HovertextsrcValidator
from ._hovertext import HovertextValidator
from ._hovertemplatesrc import HovertemplatesrcValidator
from ._hovertemplate import HovertemplateValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._geojson import GeojsonValidator
from ._geo import GeoValidator
from ._featureidkey import FeatureidkeyValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._zsrc.ZsrcValidator",
"._zmin.ZminValidator",
"._zmid.ZmidValidator",
"._zmax.ZmaxValidator",
"._zauto.ZautoValidator",
"._z.ZValidator",
"._visible.VisibleValidator",
"._unselected.UnselectedValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._textsrc.TextsrcValidator",
"._text.TextValidator",
"._stream.StreamValidator",
"._showscale.ShowscaleValidator",
"._showlegend.ShowlegendValidator",
"._selectedpoints.SelectedpointsValidator",
"._selected.SelectedValidator",
"._reversescale.ReversescaleValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._marker.MarkerValidator",
"._locationssrc.LocationssrcValidator",
"._locations.LocationsValidator",
"._locationmode.LocationmodeValidator",
"._legendgroup.LegendgroupValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hovertextsrc.HovertextsrcValidator",
"._hovertext.HovertextValidator",
"._hovertemplatesrc.HovertemplatesrcValidator",
"._hovertemplate.HovertemplateValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._geojson.GeojsonValidator",
"._geo.GeoValidator",
"._featureidkey.FeatureidkeyValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/template/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/template/__init__.py<gh_stars>1000+
import sys
if sys.version_info < (3, 7):
from ._layout import LayoutValidator
from ._data import DataValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._layout.LayoutValidator", "._data.DataValidator"]
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_transition.py | import _plotly_utils.basevalidators
class TransitionValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="transition", parent_name="layout", **kwargs):
super(TransitionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Transition"),
data_docs=kwargs.pop(
"data_docs",
"""
duration
The duration of the transition, in
milliseconds. If equal to zero, updates are
synchronous.
easing
The easing function used for the transition
ordering
Determines whether the figure's layout or
traces smoothly transitions during updates that
make both traces and layout change.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/table/header/_suffixsrc.py | import _plotly_utils.basevalidators
class SuffixsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="suffixsrc", parent_name="table.header", **kwargs):
super(SuffixsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/io/test_gcs.py | from io import BytesIO
import os
import numpy as np
import pytest
from pandas import DataFrame, date_range, read_csv
import pandas._testing as tm
from pandas.util import _test_decorators as td
@td.skip_if_no("gcsfs")
def test_read_csv_gcs(monkeypatch):
from fsspec import AbstractFileSystem
from fsspec import registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(*args, **kwargs):
return BytesIO(df1.to_csv(index=False).encode())
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df2 = read_csv("gs://test/test.csv", parse_dates=["dt"])
tm.assert_frame_equal(df1, df2)
@td.skip_if_no("gcsfs")
def test_to_csv_gcs(monkeypatch):
from fsspec import AbstractFileSystem
from fsspec import registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
s = BytesIO()
s.close = lambda: True
class MockGCSFileSystem(AbstractFileSystem):
def open(*args, **kwargs):
s.seek(0)
return s
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_csv("gs://test/test.csv", index=True)
def mock_get_filepath_or_buffer(*args, **kwargs):
return BytesIO(df1.to_csv(index=True).encode()), None, None, False
monkeypatch.setattr(
"pandas.io.common.get_filepath_or_buffer", mock_get_filepath_or_buffer
)
df2 = read_csv("gs://test/test.csv", parse_dates=["dt"], index_col=0)
tm.assert_frame_equal(df1, df2)
@td.skip_if_no("fastparquet")
@td.skip_if_no("gcsfs")
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
from fsspec import AbstractFileSystem
from fsspec import registry
registry.target.clear() # noqa # remove state
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(self, path, mode="r", *args):
if "w" not in mode:
raise FileNotFoundError
return open(os.path.join(tmpdir, "test.parquet"), mode)
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_parquet(
"gs://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_installed("gcsfs")
def test_gcs_not_present_exception():
with pytest.raises(ImportError) as e:
read_csv("gs://test/test.csv")
assert "gcsfs library is required" in str(e.value)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/parcoords/line/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp<filename>env/lib/python3.8/site-packages/plotly/validators/parcoords/line/__init__.py
import sys
if sys.version_info < (3, 7):
from ._showscale import ShowscaleValidator
from ._reversescale import ReversescaleValidator
from ._colorsrc import ColorsrcValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._color import ColorValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._showscale.ShowscaleValidator",
"._reversescale.ReversescaleValidator",
"._colorsrc.ColorsrcValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._color.ColorValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/xaxis/rangeselector/_buttondefaults.py | import _plotly_utils.basevalidators
class ButtondefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="buttondefaults",
parent_name="layout.xaxis.rangeselector",
**kwargs
):
super(ButtondefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Button"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/tests/test_return_complex.py | from __future__ import division, absolute_import, print_function
import pytest
from numpy import array
from numpy.compat import long
from numpy.testing import assert_, assert_raises
from . import util
class TestReturnComplex(util.F2PyTest):
def check_function(self, t):
tname = t.__doc__.split()[0]
if tname in ['t0', 't8', 's0', 's8']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234j) - 234.0j) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(long(234)) - 234.0) <= err)
assert_(abs(t(234.6 + 3j) - (234.6 + 3j)) <= err)
#assert_( abs(t('234')-234.)<=err)
#assert_( abs(t('234.6')-234.6)<=err)
assert_(abs(t(-234) + 234.) <= err)
assert_(abs(t([234]) - 234.) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array(23 + 4j, 'F')) - (23 + 4j)) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22.) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'q')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
assert_(abs(t(array([234 + 3j], 'F')) - (234 + 3j)) <= err)
assert_(abs(t(array([234], 'D')) - 234.) <= err)
#assert_raises(TypeError, t, array([234], 'a1'))
assert_raises(TypeError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(TypeError, t, t)
assert_raises(TypeError, t, {})
try:
r = t(10 ** 400)
assert_(repr(r) in ['(inf+0j)', '(Infinity+0j)'], repr(r))
except OverflowError:
pass
class TestF77ReturnComplex(TestReturnComplex):
code = """
function t0(value)
complex value
complex t0
t0 = value
end
function t8(value)
complex*8 value
complex*8 t8
t8 = value
end
function t16(value)
complex*16 value
complex*16 t16
t16 = value
end
function td(value)
double complex value
double complex td
td = value
end
subroutine s0(t0,value)
complex value
complex t0
cf2py intent(out) t0
t0 = value
end
subroutine s8(t8,value)
complex*8 value
complex*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine s16(t16,value)
complex*16 value
complex*16 t16
cf2py intent(out) t16
t16 = value
end
subroutine sd(td,value)
double complex value
double complex td
cf2py intent(out) td
td = value
end
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name))
class TestF90ReturnComplex(TestReturnComplex):
suffix = ".f90"
code = """
module f90_return_complex
contains
function t0(value)
complex :: value
complex :: t0
t0 = value
end function t0
function t8(value)
complex(kind=4) :: value
complex(kind=4) :: t8
t8 = value
end function t8
function t16(value)
complex(kind=8) :: value
complex(kind=8) :: t16
t16 = value
end function t16
function td(value)
double complex :: value
double complex :: td
td = value
end function td
subroutine s0(t0,value)
complex :: value
complex :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s8(t8,value)
complex(kind=4) :: value
complex(kind=4) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine s16(t16,value)
complex(kind=8) :: value
complex(kind=8) :: t16
!f2py intent(out) t16
t16 = value
end subroutine s16
subroutine sd(td,value)
double complex :: value
double complex :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_complex
"""
@pytest.mark.slow
@pytest.mark.parametrize('name', 't0,t8,t16,td,s0,s8,s16,sd'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_complex, name))
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/scattercarpet/_line.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scattercarpet", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash
type string ("solid", "dot", "dash",
"longdash", "dashdot", or "longdashdot") or a
dash length list in px (eg "5px,10px,2px,2px").
shape
Determines the line shape. With "spline" the
lines are drawn using spline interpolation. The
other available values correspond to step-wise
line shapes.
smoothing
Has an effect only if `shape` is set to
"spline" Sets the amount of smoothing. 0
corresponds to no smoothing (equivalent to a
"linear" shape).
width
Sets the line width (in px).
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/_plotly_utils/colors/_swatches.py | def _swatches(module_names, module_contents, template=None):
"""
Parameters
----------
template : str or dict or plotly.graph_objects.layout.Template instance
The figure template name or definition.
Returns
-------
fig : graph_objects.Figure containing the displayed image
A `Figure` object. This figure demonstrates the color scales and
sequences in this module, as stacked bar charts.
"""
import plotly.graph_objs as go
from plotly.express._core import apply_default_cascade
args = dict(template=template)
apply_default_cascade(args)
sequences = [
(k, v)
for k, v in module_contents.items()
if not (k.startswith("_") or k.startswith("swatches") or k.endswith("_r"))
]
return go.Figure(
data=[
go.Bar(
orientation="h",
y=[name] * len(colors),
x=[1] * len(colors),
customdata=list(range(len(colors))),
marker=dict(color=colors),
hovertemplate="%{y}[%{customdata}] = %{marker.color}<extra></extra>",
)
for name, colors in reversed(sequences)
],
layout=dict(
title="plotly.colors." + module_names.split(".")[-1],
barmode="stack",
barnorm="fraction",
bargap=0.5,
showlegend=False,
xaxis=dict(range=[-0.02, 1.02], showticklabels=False, showgrid=False),
height=max(600, 40 * len(sequences)),
template=args["template"],
margin=dict(b=10),
),
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/express/colors.py | from __future__ import absolute_import
from plotly.colors import *
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/indicator/gauge/step/__init__.py | import sys
if sys.version_info < (3, 7):
from ._thickness import ThicknessValidator
from ._templateitemname import TemplateitemnameValidator
from ._range import RangeValidator
from ._name import NameValidator
from ._line import LineValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._thickness.ThicknessValidator",
"._templateitemname.TemplateitemnameValidator",
"._range.RangeValidator",
"._name.NameValidator",
"._line.LineValidator",
"._color.ColorValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/frame/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._traces import TracesValidator
from ._name import NameValidator
from ._layout import LayoutValidator
from ._group import GroupValidator
from ._data import DataValidator
from ._baseframe import BaseframeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._traces.TracesValidator",
"._name.NameValidator",
"._layout.LayoutValidator",
"._group.GroupValidator",
"._data.DataValidator",
"._baseframe.BaseframeValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/grid_objs.py | from __future__ import absolute_import
from _plotly_future_ import _chart_studio_error
_chart_studio_error("grid_objs")
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/doc/structured_arrays.py | <reponame>acrucetta/Chicago_COVI_WebApp
"""
=================
Structured Arrays
=================
Introduction
============
Structured arrays are ndarrays whose datatype is a composition of simpler
datatypes organized as a sequence of named :term:`fields <field>`. For example,
::
>>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
>>> x
array([('Rex', 9, 81.), ('Fido', 3, 27.)],
dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Here ``x`` is a one-dimensional array of length two whose datatype is a
structure with three fields: 1. A string of length 10 or less named 'name', 2.
a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
If you index ``x`` at position 1 you get a structure::
>>> x[1]
('Fido', 3, 27.0)
You can access and modify individual fields of a structured array by indexing
with the field name::
>>> x['age']
array([9, 3], dtype=int32)
>>> x['age'] = 5
>>> x
array([('Rex', 5, 81.), ('Fido', 5, 27.)],
dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Structured datatypes are designed to be able to mimic 'structs' in the C
language, and share a similar memory layout. They are meant for interfacing with
C code and for low-level manipulation of structured buffers, for example for
interpreting binary blobs. For these purposes they support specialized features
such as subarrays, nested datatypes, and unions, and allow control over the
memory layout of the structure.
Users looking to manipulate tabular data, such as stored in csv files, may find
other pydata projects more suitable, such as xarray, pandas, or DataArray.
These provide a high-level interface for tabular data analysis and are better
optimized for that use. For instance, the C-struct-like memory layout of
structured arrays in numpy can lead to poor cache behavior in comparison.
.. _defining-structured-types:
Structured Datatypes
====================
A structured datatype can be thought of as a sequence of bytes of a certain
length (the structure's :term:`itemsize`) which is interpreted as a collection
of fields. Each field has a name, a datatype, and a byte offset within the
structure. The datatype of a field may be any numpy datatype including other
structured datatypes, and it may also be a :term:`subarray data type` which
behaves like an ndarray of a specified shape. The offsets of the fields are
arbitrary, and fields may even overlap. These offsets are usually determined
automatically by numpy, but can also be specified.
Structured Datatype Creation
----------------------------
Structured datatypes may be created using the function :func:`numpy.dtype`.
There are 4 alternative forms of specification which vary in flexibility and
conciseness. These are further documented in the
:ref:`Data Type Objects <arrays.dtypes.constructing>` reference page, and in
summary they are:
1. A list of tuples, one tuple per field
Each tuple has the form ``(fieldname, datatype, shape)`` where shape is
optional. ``fieldname`` is a string (or tuple if titles are used, see
:ref:`Field Titles <titles>` below), ``datatype`` may be any object
convertible to a datatype, and ``shape`` is a tuple of integers specifying
subarray shape.
>>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
If ``fieldname`` is the empty string ``''``, the field will be given a
default name of the form ``f#``, where ``#`` is the integer index of the
field, counting from 0 from the left::
>>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
The byte offsets of the fields within the structure and the total
structure itemsize are determined automatically.
2. A string of comma-separated dtype specifications
In this shorthand notation any of the :ref:`string dtype specifications
<arrays.dtypes.constructing>` may be used in a string and separated by
commas. The itemsize and byte offsets of the fields are determined
automatically, and the field names are given the default names ``f0``,
``f1``, etc. ::
>>> np.dtype('i8, f4, S3')
dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
>>> np.dtype('3int8, float32, (2, 3)float64')
dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
3. A dictionary of field parameter arrays
This is the most flexible form of specification since it allows control
over the byte-offsets of the fields and the itemsize of the structure.
The dictionary has two required keys, 'names' and 'formats', and four
optional keys, 'offsets', 'itemsize', 'aligned' and 'titles'. The values
for 'names' and 'formats' should respectively be a list of field names and
a list of dtype specifications, of the same length. The optional 'offsets'
value should be a list of integer byte-offsets, one for each field within
the structure. If 'offsets' is not given the offsets are determined
automatically. The optional 'itemsize' value should be an integer
describing the total size in bytes of the dtype, which must be large
enough to contain all the fields.
::
>>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
dtype([('col1', '<i4'), ('col2', '<f4')])
>>> np.dtype({'names': ['col1', 'col2'],
... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
Offsets may be chosen such that the fields overlap, though this will mean
that assigning to one field may clobber any overlapping field's data. As
an exception, fields of :class:`numpy.object` type cannot overlap with
other fields, because of the risk of clobbering the internal object
pointer and then dereferencing it.
The optional 'aligned' value can be set to ``True`` to make the automatic
offset computation use aligned offsets (see :ref:`offsets-and-alignment`),
as if the 'align' keyword argument of :func:`numpy.dtype` had been set to
True.
The optional 'titles' value should be a list of titles of the same length
as 'names', see :ref:`Field Titles <titles>` below.
4. A dictionary of field names
The use of this form of specification is discouraged, but documented here
because older numpy code may use it. The keys of the dictionary are the
field names and the values are tuples specifying type and offset::
>>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
dtype([('col1', 'i1'), ('col2', '<f4')])
This form is discouraged because Python dictionaries do not preserve order
in Python versions before Python 3.6, and the order of the fields in a
structured dtype has meaning. :ref:`Field Titles <titles>` may be
specified by using a 3-tuple, see below.
Manipulating and Displaying Structured Datatypes
------------------------------------------------
The list of field names of a structured datatype can be found in the ``names``
attribute of the dtype object::
>>> d = np.dtype([('x', 'i8'), ('y', 'f4')])
>>> d.names
('x', 'y')
The field names may be modified by assigning to the ``names`` attribute using a
sequence of strings of the same length.
The dtype object also has a dictionary-like attribute, ``fields``, whose keys
are the field names (and :ref:`Field Titles <titles>`, see below) and whose
values are tuples containing the dtype and byte offset of each field. ::
>>> d.fields
mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
Both the ``names`` and ``fields`` attributes will equal ``None`` for
unstructured arrays. The recommended way to test if a dtype is structured is
with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
with 0 fields.
The string representation of a structured datatype is shown in the "list of
tuples" form if possible, otherwise numpy falls back to using the more general
dictionary form.
.. _offsets-and-alignment:
Automatic Byte Offsets and Alignment
------------------------------------
Numpy uses one of two methods to automatically determine the field byte offsets
and the overall itemsize of a structured datatype, depending on whether
``align=True`` was specified as a keyword argument to :func:`numpy.dtype`.
By default (``align=False``), numpy will pack the fields together such that
each field starts at the byte offset the previous field ended, and the fields
are contiguous in memory. ::
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
offsets: [0, 1, 2, 6, 7, 15]
itemsize: 17
If ``align=True`` is set, numpy will pad the structure in the same way many C
compilers would pad a C-struct. Aligned structures can give a performance
improvement in some cases, at the cost of increased datatype size. Padding
bytes are inserted between fields such that each field's byte offset will be a
multiple of that field's alignment, which is usually equal to the field's size
in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
structure will also have trailing padding added so that its itemsize is a
multiple of the largest field's alignment. ::
>>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
offsets: [0, 1, 4, 8, 16, 24]
itemsize: 32
Note that although almost all modern C compilers pad in this way by default,
padding in C structs is C-implementation-dependent so this memory layout is not
guaranteed to exactly match that of a corresponding struct in a C program. Some
work may be needed, either on the numpy side or the C side, to obtain exact
correspondence.
If offsets were specified using the optional ``offsets`` key in the
dictionary-based dtype specification, setting ``align=True`` will check that
each field's offset is a multiple of its size and that the itemsize is a
multiple of the largest field size, and raise an exception if not.
If the offsets of the fields and itemsize of a structured array satisfy the
alignment conditions, the array will have the ``ALIGNED`` :attr:`flag
<numpy.ndarray.flags>` set.
A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
aligned dtype or array to a packed one and vice versa. It takes either a dtype
or structured ndarray as an argument, and returns a copy with fields re-packed,
with or without padding bytes.
.. _titles:
Field Titles
------------
In addition to field names, fields may also have an associated :term:`title`,
an alternate name, which is sometimes used as an additional description or
alias for the field. The title may be used to index an array, just like a
field name.
To add titles when using the list-of-tuples form of dtype specification, the
field name may be specified as a tuple of two strings instead of a single
string, which will be the field's title and field name respectively. For
example::
>>> np.dtype([(('my title', 'name'), 'f4')])
dtype([(('my title', 'name'), '<f4')])
When using the first form of dictionary-based specification, the titles may be
supplied as an extra ``'titles'`` key as described above. When using the second
(discouraged) dictionary-based specification, the title can be supplied by
providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
2-element tuple::
>>> np.dtype({'name': ('i4', 0, 'my title')})
dtype([(('my title', 'name'), '<i4')])
The ``dtype.fields`` dictionary will contain titles as keys, if any
titles are used. This means effectively that a field with a title will be
represented twice in the fields dictionary. The tuple values for these fields
will also have a third element, the field title. Because of this, and because
the ``names`` attribute preserves the field order while the ``fields``
attribute may not, it is recommended to iterate through the fields of a dtype
using the ``names`` attribute of the dtype, which will not list titles, as
in::
>>> for name in d.names:
... print(d.fields[name][:2])
(dtype('int64'), 0)
(dtype('float32'), 8)
Union types
-----------
Structured datatypes are implemented in numpy to have base type
:class:`numpy.void` by default, but it is possible to interpret other numpy
types as structured types using the ``(base_dtype, dtype)`` form of dtype
specification described in
:ref:`Data Type Objects <arrays.dtypes.constructing>`. Here, ``base_dtype`` is
the desired underlying dtype, and fields and flags will be copied from
``dtype``. This dtype is similar to a 'union' in C.
Indexing and Assignment to Structured arrays
============================================
Assigning data to a Structured Array
------------------------------------
There are a number of ways to assign values to a structured array: Using python
tuples, using scalar values, or using other structured arrays.
Assignment from Python Native Types (Tuples)
````````````````````````````````````````````
The simplest way to assign values to a structured array is using python tuples.
Each assigned value should be a tuple of length equal to the number of fields
in the array, and not a list or array as these will trigger numpy's
broadcasting rules. The tuple's elements are assigned to the successive fields
of the array, from left to right::
>>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
>>> x[1] = (7, 8, 9)
>>> x
array([(1, 2., 3.), (7, 8., 9.)],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
Assignment from Scalars
```````````````````````
A scalar assigned to a structured element will be assigned to all fields. This
happens when a scalar is assigned to a structured array, or when an
unstructured array is assigned to a structured array::
>>> x = np.zeros(2, dtype='i8, f4, ?, S1')
>>> x[:] = 3
>>> x
array([(3, 3., True, b'3'), (3, 3., True, b'3')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
>>> x[:] = np.arange(2)
>>> x
array([(0, 0., False, b'0'), (1, 1., True, b'1')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
Structured arrays can also be assigned to unstructured arrays, but only if the
structured datatype has just a single field::
>>> twofield = np.zeros(2, dtype=[('A', 'i4'), ('B', 'i4')])
>>> onefield = np.zeros(2, dtype=[('A', 'i4')])
>>> nostruct = np.zeros(2, dtype='i4')
>>> nostruct[:] = twofield
Traceback (most recent call last):
...
TypeError: Cannot cast scalar from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
Assignment from other Structured Arrays
```````````````````````````````````````
Assignment between two structured arrays occurs as if the source elements had
been converted to tuples and then assigned to the destination elements. That
is, the first field of the source array is assigned to the first field of the
destination array, and the second field likewise, and so on, regardless of
field names. Structured arrays with a different number of fields cannot be
assigned to each other. Bytes of the destination structure which are not
included in any of the fields are unaffected. ::
>>> a = np.zeros(3, dtype=[('a', 'i8'), ('b', 'f4'), ('c', 'S3')])
>>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
>>> b[:] = a
>>> b
array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
Assignment involving subarrays
``````````````````````````````
When assigning to fields which are subarrays, the assigned value will first be
broadcast to the shape of the subarray.
Indexing Structured Arrays
--------------------------
Accessing Individual Fields
```````````````````````````
Individual fields of a structured array may be accessed and modified by indexing
the array with the field name. ::
>>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> x['foo']
array([1, 3])
>>> x['foo'] = 10
>>> x
array([(10, 2.), (10, 4.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
The resulting array is a view into the original array. It shares the same
memory locations and writing to the view will modify the original array. ::
>>> y = x['bar']
>>> y[:] = 11
>>> x
array([(10, 11.), (10, 11.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
This view has the same dtype and itemsize as the indexed field, so it is
typically a non-structured array, except in the case of nested structures.
>>> y.dtype, y.shape, y.strides
(dtype('float32'), (2,), (12,))
If the accessed field is a subarray, the dimensions of the subarray
are appended to the shape of the result::
>>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
>>> x['a'].shape
(2, 2)
>>> x['b'].shape
(2, 2, 3, 3)
Accessing Multiple Fields
```````````````````````````
One can index and assign to a structured array with a multi-field index, where
the index is a list of field names.
.. warning::
The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
The result of indexing with a multi-field index is a view into the original
array, as follows::
>>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
array([(0, 0.), (0, 0.), (0, 0.)],
dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
Assignment to the view modifies the original array. The view's fields will be
in the order they were indexed. Note that unlike for single-field indexing, the
dtype of the view has the same itemsize as the original array, and has fields
at the same offsets as in the original array, and unindexed fields are merely
missing.
.. warning::
In Numpy 1.15, indexing an array with a multi-field index returned a copy of
the result above, but with fields packed together in memory as if
passed through :func:`numpy.lib.recfunctions.repack_fields`.
The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
location of unindexed fields compared to 1.15. You will need to update any
code which depends on the data having a "packed" layout. For instance code
such as::
>>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
will need to be changed. This code has raised a ``FutureWarning`` since
Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
In 1.16 a number of functions have been introduced in the
:mod:`numpy.lib.recfunctions` module to help users account for this
change. These are
:func:`numpy.lib.recfunctions.repack_fields`.
:func:`numpy.lib.recfunctions.structured_to_unstructured`,
:func:`numpy.lib.recfunctions.unstructured_to_structured`,
:func:`numpy.lib.recfunctions.apply_along_fields`,
:func:`numpy.lib.recfunctions.assign_fields_by_name`, and
:func:`numpy.lib.recfunctions.require_fields`.
The function :func:`numpy.lib.recfunctions.repack_fields` can always be
used to reproduce the old behavior, as it will return a packed copy of the
structured array. The code above, for example, can be replaced with:
>>> from numpy.lib.recfunctions import repack_fields
>>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16
array([0, 0, 0])
Furthermore, numpy now provides a new function
:func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
and more efficient alternative for users who wish to convert structured
arrays to unstructured arrays, as the view above is often indeded to do.
This function allows safe conversion to an unstructured type taking into
account padding, often avoids a copy, and also casts the datatypes
as needed, unlike the view. Code such as:
>>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
>>> b[['x', 'z']].view('f4')
array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
can be made safer by replacing with:
>>> from numpy.lib.recfunctions import structured_to_unstructured
>>> structured_to_unstructured(b[['x', 'z']])
array([0, 0, 0])
Assignment to an array with a multi-field index modifies the original array::
>>> a[['a', 'c']] = (2, 3)
>>> a
array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
This obeys the structured array assignment rules described above. For example,
this means that one can swap the values of two fields using appropriate
multi-field indexes::
>>> a[['a', 'c']] = a[['c', 'a']]
Indexing with an Integer to get a Structured Scalar
```````````````````````````````````````````````````
Indexing a single element of a structured array (with an integer index) returns
a structured scalar::
>>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
>>> scalar = x[0]
>>> scalar
(1, 2., 3.)
>>> type(scalar)
<class 'numpy.void'>
Unlike other numpy scalars, structured scalars are mutable and act like views
into the original array, such that modifying the scalar will modify the
original array. Structured scalars also support access and assignment by field
name::
>>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> s = x[0]
>>> s['bar'] = 100
>>> x
array([(1, 100.), (3, 4.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
Similarly to tuples, structured scalars can also be indexed with an integer::
>>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
>>> scalar[0]
1
>>> scalar[1] = 4
Thus, tuples might be thought of as the native Python equivalent to numpy's
structured types, much like native python integers are the equivalent to
numpy's integer types. Structured scalars may be converted to a tuple by
calling :func:`ndarray.item`::
>>> scalar.item(), type(scalar.item())
((1, 4.0, 3.0), <class 'tuple'>)
Viewing Structured Arrays Containing Objects
--------------------------------------------
In order to prevent clobbering object pointers in fields of
:class:`numpy.object` type, numpy currently does not allow views of structured
arrays containing objects.
Structure Comparison
--------------------
If the dtypes of two void structured arrays are equal, testing the equality of
the arrays will result in a boolean array with the dimensions of the original
arrays, with elements set to ``True`` where all fields of the corresponding
structures are equal. Structured dtypes are equal if the field names,
dtypes and titles are the same, ignoring endianness, and the fields are in
the same order::
>>> a = np.zeros(2, dtype=[('a', 'i4'), ('b', 'i4')])
>>> b = np.ones(2, dtype=[('a', 'i4'), ('b', 'i4')])
>>> a == b
array([False, False])
Currently, if the dtypes of two void structured arrays are not equivalent the
comparison fails, returning the scalar value ``False``. This behavior is
deprecated as of numpy 1.10 and will raise an error or perform elementwise
comparison in the future.
The ``<`` and ``>`` operators always return ``False`` when comparing void
structured arrays, and arithmetic and bitwise operations are not supported.
Record Arrays
=============
As an optional convenience numpy provides an ndarray subclass,
:class:`numpy.recarray`, and associated helper functions in the
:mod:`numpy.rec` submodule, that allows access to fields of structured arrays
by attribute instead of only by index. Record arrays also use a special
datatype, :class:`numpy.record`, that allows field access by attribute on the
structured scalars obtained from the array.
The simplest way to create a record array is with :func:`numpy.rec.array`::
>>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
rec.array([(2, 3., b'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
b'World'
:func:`numpy.rec.array` can convert a wide variety of arguments into record
arrays, including structured arrays::
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
The :mod:`numpy.rec` module provides a number of other convenience functions for
creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
appropriate `view <numpy-ndarray-view>`_::
>>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
>>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type :class:`np.recarray` will
automatically convert to :class:`np.record` datatype, so the dtype can be left
out of the view::
>>> recordarr = arr.view(np.recarray)
>>> recordarr.dtype
dtype((numpy.record, [('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')]))
To get back to a plain ndarray both the dtype and type must be reset. The
following view does so, taking into account the unusual case that the
recordarr was not a structured type::
>>> arr2 = recordarr.view(recordarr.dtype.fields or recordarr.dtype, np.ndarray)
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
>>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
<class 'numpy.ndarray'>
>>> type(recordarr.bar)
<class 'numpy.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
will still be accessible by index.
"""
from __future__ import division, absolute_import, print_function
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/arrays/boolean/test_function.py | import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
@pytest.fixture
def data():
return pd.array(
[True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False],
dtype="boolean",
)
@pytest.mark.parametrize(
"ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor]
)
def test_ufuncs_binary(ufunc):
# two BooleanArrays
a = pd.array([True, False, None], dtype="boolean")
result = ufunc(a, a)
expected = pd.array(ufunc(a._data, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s, a)
expected = pd.Series(ufunc(a._data, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_series_equal(result, expected)
# Boolean with numpy array
arr = np.array([True, True, False])
result = ufunc(a, arr)
expected = pd.array(ufunc(a._data, arr), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
result = ufunc(arr, a)
expected = pd.array(ufunc(arr, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
# BooleanArray with scalar
result = ufunc(a, True)
expected = pd.array(ufunc(a._data, True), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
result = ufunc(True, a)
expected = pd.array(ufunc(True, a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
# not handled types
msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__"
with pytest.raises(TypeError, match=msg):
ufunc(a, "test")
@pytest.mark.parametrize("ufunc", [np.logical_not])
def test_ufuncs_unary(ufunc):
a = pd.array([True, False, None], dtype="boolean")
result = ufunc(a)
expected = pd.array(ufunc(a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = ufunc(s)
expected = pd.Series(ufunc(a._data), dtype="boolean")
expected[a._mask] = np.nan
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("values", [[True, False], [True, None]])
def test_ufunc_reduce_raises(values):
a = pd.array(values, dtype="boolean")
msg = "The 'reduce' method is not supported"
with pytest.raises(NotImplementedError, match=msg):
np.add.reduce(a)
def test_value_counts_na():
arr = pd.array([True, False, pd.NA], dtype="boolean")
result = arr.value_counts(dropna=False)
expected = pd.Series([1, 1, 1], index=[True, False, pd.NA], dtype="Int64")
tm.assert_series_equal(result, expected)
result = arr.value_counts(dropna=True)
expected = pd.Series([1, 1], index=[True, False], dtype="Int64")
tm.assert_series_equal(result, expected)
def test_diff():
a = pd.array(
[True, True, False, False, True, None, True, None, False], dtype="boolean"
)
result = pd.core.algorithms.diff(a, 1)
expected = pd.array(
[None, False, True, False, True, None, None, None, None], dtype="boolean"
)
tm.assert_extension_array_equal(result, expected)
s = pd.Series(a)
result = s.diff()
expected = pd.Series(expected)
tm.assert_series_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_parcats.py | from plotly.graph_objs import Parcats
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/shape/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._ysizemode import YsizemodeValidator
from ._yref import YrefValidator
from ._yanchor import YanchorValidator
from ._y1 import Y1Validator
from ._y0 import Y0Validator
from ._xsizemode import XsizemodeValidator
from ._xref import XrefValidator
from ._xanchor import XanchorValidator
from ._x1 import X1Validator
from ._x0 import X0Validator
from ._visible import VisibleValidator
from ._type import TypeValidator
from ._templateitemname import TemplateitemnameValidator
from ._path import PathValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._line import LineValidator
from ._layer import LayerValidator
from ._fillrule import FillruleValidator
from ._fillcolor import FillcolorValidator
from ._editable import EditableValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._ysizemode.YsizemodeValidator",
"._yref.YrefValidator",
"._yanchor.YanchorValidator",
"._y1.Y1Validator",
"._y0.Y0Validator",
"._xsizemode.XsizemodeValidator",
"._xref.XrefValidator",
"._xanchor.XanchorValidator",
"._x1.X1Validator",
"._x0.X0Validator",
"._visible.VisibleValidator",
"._type.TypeValidator",
"._templateitemname.TemplateitemnameValidator",
"._path.PathValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._line.LineValidator",
"._layer.LayerValidator",
"._fillrule.FillruleValidator",
"._fillcolor.FillcolorValidator",
"._editable.EditableValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/__version__.py | from __future__ import division, absolute_import, print_function
major = 2
try:
from __svn_version__ import version
version_info = (major, version)
version = '%s_%s' % version_info
except (ImportError, ValueError):
version = str(major)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/f2py/tests/test_parameter.py | <reponame>acrucetta/Chicago_COVI_WebApp
from __future__ import division, absolute_import, print_function
import os
import pytest
import numpy as np
from numpy.testing import assert_raises, assert_equal
from . import util
def _path(*a):
return os.path.join(*((os.path.dirname(__file__),) + a))
class TestParameters(util.F2PyTest):
# Check that intent(in out) translates as intent(inout)
sources = [_path('src', 'parameter', 'constant_real.f90'),
_path('src', 'parameter', 'constant_integer.f90'),
_path('src', 'parameter', 'constant_both.f90'),
_path('src', 'parameter', 'constant_compound.f90'),
_path('src', 'parameter', 'constant_non_compound.f90'),
]
@pytest.mark.slow
def test_constant_real_single(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float32)[::2]
assert_raises(ValueError, self.module.foo_single, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float32)
self.module.foo_single(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_real_double(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo_double, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_double(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_compound_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
assert_raises(ValueError, self.module.foo_compound_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_compound_int(x)
assert_equal(x, [0 + 1 + 2*6, 1, 2])
@pytest.mark.slow
def test_constant_non_compound_int(self):
# check values
x = np.arange(4, dtype=np.int32)
self.module.foo_non_compound_int(x)
assert_equal(x, [0 + 1 + 2 + 3*4, 1, 2, 3])
@pytest.mark.slow
def test_constant_integer_int(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int32)[::2]
assert_raises(ValueError, self.module.foo_int, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int32)
self.module.foo_int(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_integer_long(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.int64)[::2]
assert_raises(ValueError, self.module.foo_long, x)
# check values with contiguous array
x = np.arange(3, dtype=np.int64)
self.module.foo_long(x)
assert_equal(x, [0 + 1 + 2*3, 1, 2])
@pytest.mark.slow
def test_constant_both(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
@pytest.mark.slow
def test_constant_no(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo_no, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_no(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
@pytest.mark.slow
def test_constant_sum(self):
# non-contiguous should raise error
x = np.arange(6, dtype=np.float64)[::2]
assert_raises(ValueError, self.module.foo_sum, x)
# check values with contiguous array
x = np.arange(3, dtype=np.float64)
self.module.foo_sum(x)
assert_equal(x, [0 + 1*3*3 + 2*3*3, 1*3, 2*3])
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/series/methods/test_argsort.py | import numpy as np
import pytest
from pandas import Series, Timestamp, isna
import pandas._testing as tm
class TestSeriesArgsort:
def _check_accum_op(self, name, ser, check_dtype=True):
func = getattr(np, name)
tm.assert_numpy_array_equal(
func(ser).values, func(np.array(ser)), check_dtype=check_dtype,
)
# with missing values
ts = ser.copy()
ts[::2] = np.NaN
result = func(ts)[1::2]
expected = func(np.array(ts.dropna()))
tm.assert_numpy_array_equal(result.values, expected, check_dtype=False)
def test_argsort(self, datetime_series):
self._check_accum_op("argsort", datetime_series, check_dtype=False)
argsorted = datetime_series.argsort()
assert issubclass(argsorted.dtype.type, np.integer)
# GH#2967 (introduced bug in 0.11-dev I think)
s = Series([Timestamp(f"201301{i:02d}") for i in range(1, 6)])
assert s.dtype == "datetime64[ns]"
shifted = s.shift(-1)
assert shifted.dtype == "datetime64[ns]"
assert isna(shifted[4])
result = s.argsort()
expected = Series(range(5), dtype="int64")
tm.assert_series_equal(result, expected)
result = shifted.argsort()
expected = Series(list(range(4)) + [-1], dtype="int64")
tm.assert_series_equal(result, expected)
def test_argsort_stable(self):
s = Series(np.random.randint(0, 100, size=10000))
mindexer = s.argsort(kind="mergesort")
qindexer = s.argsort()
mexpected = np.argsort(s.values, kind="mergesort")
qexpected = np.argsort(s.values, kind="quicksort")
tm.assert_series_equal(mindexer.astype(np.intp), Series(mexpected))
tm.assert_series_equal(qindexer.astype(np.intp), Series(qexpected))
msg = (
r"ndarray Expected type <class 'numpy\.ndarray'>, "
r"found <class 'pandas\.core\.series\.Series'> instead"
)
with pytest.raises(AssertionError, match=msg):
tm.assert_numpy_array_equal(qindexer, mindexer)
def test_argsort_preserve_name(self, datetime_series):
result = datetime_series.argsort()
assert result.name == datetime_series.name
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/core/tests/test_extint128.py | from __future__ import division, absolute_import, print_function
import itertools
import contextlib
import operator
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
INT64_MIN = np.iinfo(np.int64).min
INT64_MID = 2**32
# int128 is not two's complement, the sign bit is separate
INT128_MAX = 2**128 - 1
INT128_MIN = -INT128_MAX
INT128_MID = 2**64
INT64_VALUES = (
[INT64_MIN + j for j in range(20)] +
[INT64_MAX - j for j in range(20)] +
[INT64_MID + j for j in range(-20, 20)] +
[2*INT64_MID + j for j in range(-20, 20)] +
[INT64_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70))
)
INT128_VALUES = (
[INT128_MIN + j for j in range(20)] +
[INT128_MAX - j for j in range(20)] +
[INT128_MID + j for j in range(-20, 20)] +
[2*INT128_MID + j for j in range(-20, 20)] +
[INT128_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70)) +
[False] # negative zero
)
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
@contextlib.contextmanager
def exc_iter(*args):
"""
Iterate over Cartesian product of *args, and if an exception is raised,
add information of the current iterate.
"""
value = [None]
def iterate():
for v in itertools.product(*args):
value[0] = v
yield v
try:
yield iterate()
except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
raise AssertionError(msg)
def test_safe_binop():
# Test checked arithmetic routines
ops = [
(operator.add, 1),
(operator.sub, 2),
(operator.mul, 3)
]
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
for xop, a, b in it:
pyop, op = xop
c = pyop(a, b)
if not (INT64_MIN <= c <= INT64_MAX):
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
else:
d = mt.extint_safe_binop(a, b, op)
if c != d:
# assert_equal is slow
assert_equal(d, c)
def test_to_128():
with exc_iter(INT64_VALUES) as it:
for a, in it:
b = mt.extint_to_128(a)
if a != b:
assert_equal(b, a)
def test_to_64():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if not (INT64_MIN <= a <= INT64_MAX):
assert_raises(OverflowError, mt.extint_to_64, a)
else:
b = mt.extint_to_64(a)
if a != b:
assert_equal(b, a)
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for a, b in it:
c = a * b
d = mt.extint_mul_64_64(a, b)
if c != d:
assert_equal(d, c)
def test_add_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a + b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_add_128, a, b)
else:
d = mt.extint_add_128(a, b)
if c != d:
assert_equal(d, c)
def test_sub_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a - b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_sub_128, a, b)
else:
d = mt.extint_sub_128(a, b)
if c != d:
assert_equal(d, c)
def test_neg_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
b = -a
c = mt.extint_neg_128(a)
if b != c:
assert_equal(c, b)
def test_shl_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -(((-a) << 1) & (2**128-1))
else:
b = (a << 1) & (2**128-1)
c = mt.extint_shl_128(a)
if b != c:
assert_equal(c, b)
def test_shr_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -((-a) >> 1)
else:
b = a >> 1
c = mt.extint_shr_128(a)
if b != c:
assert_equal(c, b)
def test_gt_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a > b
d = mt.extint_gt_128(a, b)
if c != d:
assert_equal(d, c)
@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if a >= 0:
c, cr = divmod(a, b)
else:
c, cr = divmod(-a, b)
c = -c
cr = -cr
d, dr = mt.extint_divmod_128_64(a, b)
if c != d or d != dr or b*d + dr != a:
assert_equal(d, c)
assert_equal(dr, cr)
assert_equal(b*d + dr, a)
def test_floordiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = a // b
d = mt.extint_floordiv_128_64(a, b)
if c != d:
assert_equal(d, c)
def test_ceildiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = (a + b - 1) // b
d = mt.extint_ceildiv_128_64(a, b)
if c != d:
assert_equal(d, c)
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/numpy/doc/misc.py | <gh_stars>1000+
"""
=============
Miscellaneous
=============
IEEE 754 Floating Point Special Values
--------------------------------------
Special values defined in numpy: nan, inf,
NaNs can be used as a poor-man's mask (if you don't care what the
original value was)
Note: cannot use equality to test NaNs. E.g.: ::
>>> myarr = np.array([1., 0., np.nan, 3.])
>>> np.nonzero(myarr == np.nan)
(array([], dtype=int64),)
>>> np.nan == np.nan # is always False! Use special numpy functions instead.
False
>>> myarr[myarr == np.nan] = 0. # doesn't work
>>> myarr
array([ 1., 0., NaN, 3.])
>>> myarr[np.isnan(myarr)] = 0. # use this instead find
>>> myarr
array([ 1., 0., 0., 3.])
Other related special value functions: ::
isinf(): True if value is inf
isfinite(): True if not nan or inf
nan_to_num(): Map nan to 0, inf to max float, -inf to min float
The following corresponds to the usual functions except that nans are excluded
from the results: ::
nansum()
nanmax()
nanmin()
nanargmax()
nanargmin()
>>> x = np.arange(10.)
>>> x[3] = np.nan
>>> x.sum()
nan
>>> np.nansum(x)
42.0
How numpy handles numerical exceptions
--------------------------------------
The default is to ``'warn'`` for ``invalid``, ``divide``, and ``overflow``
and ``'ignore'`` for ``underflow``. But this can be changed, and it can be
set individually for different kinds of exceptions. The different behaviors
are:
- 'ignore' : Take no action when the exception occurs.
- 'warn' : Print a `RuntimeWarning` (via the Python `warnings` module).
- 'raise' : Raise a `FloatingPointError`.
- 'call' : Call a function specified using the `seterrcall` function.
- 'print' : Print a warning directly to ``stdout``.
- 'log' : Record error in a Log object specified by `seterrcall`.
These behaviors can be set for all kinds of errors or specific ones:
- all : apply to all numeric exceptions
- invalid : when NaNs are generated
- divide : divide by zero (for integers as well!)
- overflow : floating point overflows
- underflow : floating point underflows
Note that integer divide-by-zero is handled by the same machinery.
These behaviors are set on a per-thread basis.
Examples
--------
::
>>> oldsettings = np.seterr(all='warn')
>>> np.zeros(5,dtype=np.float32)/0.
invalid value encountered in divide
>>> j = np.seterr(under='ignore')
>>> np.array([1.e-100])**10
>>> j = np.seterr(invalid='raise')
>>> np.sqrt(np.array([-1.]))
FloatingPointError: invalid value encountered in sqrt
>>> def errorhandler(errstr, errflag):
... print("saw stupid error!")
>>> np.seterrcall(errorhandler)
<function err_handler at 0x...>
>>> j = np.seterr(all='call')
>>> np.zeros(5, dtype=np.int32)/0
FloatingPointError: invalid value encountered in divide
saw stupid error!
>>> j = np.seterr(**oldsettings) # restore previous
... # error-handling settings
Interfacing to C
----------------
Only a survey of the choices. Little detail on how each works.
1) Bare metal, wrap your own C-code manually.
- Plusses:
- Efficient
- No dependencies on other tools
- Minuses:
- Lots of learning overhead:
- need to learn basics of Python C API
- need to learn basics of numpy C API
- need to learn how to handle reference counting and love it.
- Reference counting often difficult to get right.
- getting it wrong leads to memory leaks, and worse, segfaults
- API will change for Python 3.0!
2) Cython
- Plusses:
- avoid learning C API's
- no dealing with reference counting
- can code in pseudo python and generate C code
- can also interface to existing C code
- should shield you from changes to Python C api
- has become the de-facto standard within the scientific Python community
- fast indexing support for arrays
- Minuses:
- Can write code in non-standard form which may become obsolete
- Not as flexible as manual wrapping
3) ctypes
- Plusses:
- part of Python standard library
- good for interfacing to existing sharable libraries, particularly
Windows DLLs
- avoids API/reference counting issues
- good numpy support: arrays have all these in their ctypes
attribute: ::
a.ctypes.data a.ctypes.get_strides
a.ctypes.data_as a.ctypes.shape
a.ctypes.get_as_parameter a.ctypes.shape_as
a.ctypes.get_data a.ctypes.strides
a.ctypes.get_shape a.ctypes.strides_as
- Minuses:
- can't use for writing code to be turned into C extensions, only a wrapper
tool.
4) SWIG (automatic wrapper generator)
- Plusses:
- around a long time
- multiple scripting language support
- C++ support
- Good for wrapping large (many functions) existing C libraries
- Minuses:
- generates lots of code between Python and the C code
- can cause performance problems that are nearly impossible to optimize
out
- interface files can be hard to write
- doesn't necessarily avoid reference counting issues or needing to know
API's
5) scipy.weave
- Plusses:
- can turn many numpy expressions into C code
- dynamic compiling and loading of generated C code
- can embed pure C code in Python module and have weave extract, generate
interfaces and compile, etc.
- Minuses:
- Future very uncertain: it's the only part of Scipy not ported to Python 3
and is effectively deprecated in favor of Cython.
6) Psyco
- Plusses:
- Turns pure python into efficient machine code through jit-like
optimizations
- very fast when it optimizes well
- Minuses:
- Only on intel (windows?)
- Doesn't do much for numpy?
Interfacing to Fortran:
-----------------------
The clear choice to wrap Fortran code is
`f2py <https://docs.scipy.org/doc/numpy/f2py/>`_.
Pyfort is an older alternative, but not supported any longer.
Fwrap is a newer project that looked promising but isn't being developed any
longer.
Interfacing to C++:
-------------------
1) Cython
2) CXX
3) Boost.python
4) SWIG
5) SIP (used mainly in PyQT)
"""
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/frame/test_alter_axes.py | from datetime import datetime
import inspect
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
Series,
Timestamp,
cut,
date_range,
to_datetime,
)
import pandas._testing as tm
class TestDataFrameAlterAxes:
def test_set_index_directly(self, float_string_frame):
df = float_string_frame
idx = Index(np.arange(len(df))[::-1])
df.index = idx
tm.assert_index_equal(df.index, idx)
with pytest.raises(ValueError, match="Length mismatch"):
df.index = idx[::2]
def test_convert_dti_to_series(self):
# don't cast a DatetimeIndex WITH a tz, leave as object
# GH 6032
idx = DatetimeIndex(
to_datetime(["2013-1-1 13:00", "2013-1-2 14:00"]), name="B"
).tz_localize("US/Pacific")
df = DataFrame(np.random.randn(2, 1), columns=["A"])
expected = Series(
np.array(
[
Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"),
Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"),
],
dtype="object",
),
name="B",
)
# convert index to series
result = Series(idx)
tm.assert_series_equal(result, expected)
# assign to frame
df["B"] = idx
result = df["B"]
tm.assert_series_equal(result, expected)
# convert to series while keeping the timezone
msg = "stop passing 'keep_tz'"
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=True, index=[0, 1])
tm.assert_series_equal(result, expected)
assert msg in str(m[0].message)
# convert to utc
with tm.assert_produces_warning(FutureWarning) as m:
df["B"] = idx.to_series(keep_tz=False, index=[0, 1])
result = df["B"]
comp = Series(DatetimeIndex(expected.values).tz_localize(None), name="B")
tm.assert_series_equal(result, comp)
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
result = idx.to_series(index=[0, 1])
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning) as m:
result = idx.to_series(keep_tz=False, index=[0, 1])
tm.assert_series_equal(result, expected.dt.tz_convert(None))
msg = "do 'idx.tz_convert(None)' before calling"
assert msg in str(m[0].message)
# list of datetimes with a tz
df["B"] = idx.to_pydatetime()
result = df["B"]
tm.assert_series_equal(result, expected)
# GH 6785
# set the index manually
import pytz
df = DataFrame([{"ts": datetime(2014, 4, 1, tzinfo=pytz.utc), "foo": 1}])
expected = df.set_index("ts")
df.index = df["ts"]
df.pop("ts")
tm.assert_frame_equal(df, expected)
def test_set_columns(self, float_string_frame):
cols = Index(np.arange(len(float_string_frame.columns)))
float_string_frame.columns = cols
with pytest.raises(ValueError, match="Length mismatch"):
float_string_frame.columns = cols[::2]
def test_dti_set_index_reindex(self):
# GH 6631
df = DataFrame(np.random.random(6))
idx1 = date_range("2011/01/01", periods=6, freq="M", tz="US/Eastern")
idx2 = date_range("2013", periods=6, freq="A", tz="Asia/Tokyo")
df = df.set_index(idx1)
tm.assert_index_equal(df.index, idx1)
df = df.reindex(idx2)
tm.assert_index_equal(df.index, idx2)
# GH 11314
# with tz
index = date_range(
datetime(2015, 10, 1), datetime(2015, 10, 1, 23), freq="H", tz="US/Eastern"
)
df = DataFrame(np.random.randn(24, 1), columns=["a"], index=index)
new_index = date_range(
datetime(2015, 10, 2), datetime(2015, 10, 2, 23), freq="H", tz="US/Eastern"
)
result = df.set_index(new_index)
assert result.index.freq == index.freq
# Renaming
def test_reindex_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
df = DataFrame(
[[1, 2, 3], [3, 4, 5], [5, 6, 7]],
index=["a", "b", "c"],
columns=["d", "e", "f"],
)
res1 = df.reindex(["b", "a"])
res2 = df.reindex(index=["b", "a"])
res3 = df.reindex(labels=["b", "a"])
res4 = df.reindex(labels=["b", "a"], axis=0)
res5 = df.reindex(["b", "a"], axis=0)
for res in [res2, res3, res4, res5]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(columns=["e", "d"])
res2 = df.reindex(["e", "d"], axis=1)
res3 = df.reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
res1 = df.reindex(index=["b", "a"], columns=["e", "d"])
res2 = df.reindex(columns=["e", "d"], index=["b", "a"])
res3 = df.reindex(labels=["b", "a"], axis=0).reindex(labels=["e", "d"], axis=1)
for res in [res2, res3]:
tm.assert_frame_equal(res1, res)
def test_assign_columns(self, float_frame):
float_frame["hi"] = "there"
df = float_frame.copy()
df.columns = ["foo", "bar", "baz", "quux", "foo2"]
tm.assert_series_equal(float_frame["C"], df["baz"], check_names=False)
tm.assert_series_equal(float_frame["hi"], df["foo2"], check_names=False)
def test_set_index_preserve_categorical_dtype(self):
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
for cols in ["C1", "C2", ["A", "C1"], ["A", "C2"], ["C1", "C2"]]:
result = df.set_index(cols).reset_index()
result = result.reindex(columns=df.columns)
tm.assert_frame_equal(result, df)
def test_rename_signature(self):
sig = inspect.signature(DataFrame.rename)
parameters = set(sig.parameters)
assert parameters == {
"self",
"mapper",
"index",
"columns",
"axis",
"inplace",
"copy",
"level",
"errors",
}
def test_reindex_signature(self):
sig = inspect.signature(DataFrame.reindex)
parameters = set(sig.parameters)
assert parameters == {
"self",
"labels",
"index",
"columns",
"axis",
"limit",
"copy",
"level",
"method",
"fill_value",
"tolerance",
}
class TestIntervalIndex:
def test_setitem(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
assert isinstance(s.cat.categories, IntervalIndex)
# B & D end up as Categoricals
# the remainer are converted to in-line objects
# contining an IntervalIndex.values
df["B"] = s
df["C"] = np.array(s)
df["D"] = s.values
df["E"] = np.array(s.values)
assert is_categorical_dtype(df["B"].dtype)
assert is_interval_dtype(df["B"].cat.categories)
assert is_categorical_dtype(df["D"].dtype)
assert is_interval_dtype(df["D"].cat.categories)
assert is_object_dtype(df["C"])
assert is_object_dtype(df["E"])
# they compare equal as Index
# when converted to numpy objects
c = lambda x: Index(np.array(x))
tm.assert_index_equal(c(df.B), c(df.B), check_names=False)
tm.assert_index_equal(c(df.B), c(df.C), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
tm.assert_index_equal(c(df.B), c(df.D), check_names=False)
# B & D are the same Series
tm.assert_series_equal(df["B"], df["B"], check_names=False)
tm.assert_series_equal(df["B"], df["D"], check_names=False)
# C & E are the same Series
tm.assert_series_equal(df["C"], df["C"], check_names=False)
tm.assert_series_equal(df["C"], df["E"], check_names=False)
def test_set_reset_index(self):
df = DataFrame({"A": range(10)})
s = cut(df.A, 5)
df["B"] = s
df = df.set_index("B")
df = df.reset_index()
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_select_dtypes.py | <filename>.venv/lib/python3.8/site-packages/pandas/tests/frame/methods/test_select_dtypes.py
from collections import OrderedDict
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Timestamp
import pandas._testing as tm
class TestSelectDtypes:
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scatterpolar.py | from plotly.graph_objs import Scatterpolar
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_scattermapbox.py | from plotly.graph_objs import Scattermapbox
|
acrucetta/Chicago_COVI_WebApp | myapp.py | from flask import Flask, render_template,request
from wrangling_scripts.wrangle_data import return_figures
import json
import plotly
app = Flask(__name__)
@app.route('/')
def home():
figures = return_figures()
# plot ids for the html id tag
ids = ['figure-{}'.format(i) for i, _ in enumerate(figures)]
# Convert the plotly figures to JSON for javascript in html template
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('index.html',
ids=ids,
figuresJSON=figuresJSON)
@app.route('/phases')
def phases():
figures = return_figures()
# plot ids for the html id tag
ids = ['figure-{}'.format(i) for i, _ in enumerate(figures)]
# Convert the plotly figures to JSON for javascript in html template
figuresJSON = json.dumps(figures, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('phases.html', ids=ids,figuresJSON=figuresJSON)
if __name__ == '__main__':
app.debug = True
app.run()
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/generic/methods/test_first_valid_index.py | """
Includes test for last_valid_index.
"""
import numpy as np
import pytest
from pandas import DataFrame, Series, date_range
import pandas._testing as tm
class TestFirstValidIndex:
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_first_valid_index_single_nan(self, klass):
# GH#9752 Series/DataFrame should both return None, not raise
obj = klass([np.nan])
assert obj.first_valid_index() is None
assert obj.iloc[:0].first_valid_index() is None
@pytest.mark.parametrize(
"empty", [DataFrame(), Series(dtype=object), Series([], index=[], dtype=object)]
)
def test_first_valid_index_empty(self, empty):
# GH#12800
assert empty.last_valid_index() is None
assert empty.first_valid_index() is None
@pytest.mark.parametrize(
"data,idx,expected_first,expected_last",
[
({"A": [1, 2, 3]}, [1, 1, 2], 1, 2),
({"A": [1, 2, 3]}, [1, 2, 2], 1, 2),
({"A": [1, 2, 3, 4]}, ["d", "d", "d", "d"], "d", "d"),
({"A": [1, np.nan, 3]}, [1, 1, 2], 1, 2),
({"A": [np.nan, np.nan, 3]}, [1, 1, 2], 2, 2),
({"A": [1, np.nan, 3]}, [1, 2, 2], 1, 2),
],
)
def test_first_last_valid_frame(self, data, idx, expected_first, expected_last):
# GH#21441
df = DataFrame(data, index=idx)
assert expected_first == df.first_valid_index()
assert expected_last == df.last_valid_index()
@pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex])
def test_first_last_valid(self, index_func):
N = 30
index = index_func(N)
mat = np.random.randn(N)
mat[:5] = np.nan
mat[-5:] = np.nan
frame = DataFrame({"foo": mat}, index=index)
assert frame.first_valid_index() == frame.index[5]
assert frame.last_valid_index() == frame.index[-6]
ser = frame["foo"]
assert ser.first_valid_index() == frame.index[5]
assert ser.last_valid_index() == frame.index[-6]
@pytest.mark.parametrize("index_func", [tm.makeStringIndex, tm.makeDateIndex])
def test_first_last_valid_all_nan(self, index_func):
# GH#17400: no valid entries
index = index_func(30)
frame = DataFrame(np.nan, columns=["foo"], index=index)
assert frame.last_valid_index() is None
assert frame.first_valid_index() is None
ser = frame["foo"]
assert ser.first_valid_index() is None
assert ser.last_valid_index() is None
def test_first_last_valid_preserves_freq(self):
# GH#20499: its preserves freq with holes
index = date_range("20110101", periods=30, freq="B")
frame = DataFrame(np.nan, columns=["foo"], index=index)
frame.iloc[1] = 1
frame.iloc[-2] = 1
assert frame.first_valid_index() == frame.index[1]
assert frame.last_valid_index() == frame.index[-2]
assert frame.first_valid_index().freq == frame.index.freq
assert frame.last_valid_index().freq == frame.index.freq
ts = frame["foo"]
assert ts.first_valid_index() == ts.index[1]
assert ts.last_valid_index() == ts.index[-2]
assert ts.first_valid_index().freq == ts.index.freq
assert ts.last_valid_index().freq == ts.index.freq
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py | <reponame>acrucetta/Chicago_COVI_WebApp
""" Test functions for linalg module using the matrix class."""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.linalg.tests.test_linalg import (
LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
_TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
PinvCases, DetCases, LstsqCases)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("0x0_matrix",
np.empty((0, 0), dtype=np.double).view(np.matrix),
np.empty((0, 1), dtype=np.double).view(np.matrix),
tags={'size-0'}),
LinalgCase("matrix_b_only",
np.array([[1., 2.], [3., 4.]]),
np.matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
np.matrix([[1., 2.], [3., 4.]]),
np.matrix([2., 1.]).T),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hmatrix_a_and_b",
np.matrix([[1., 2.], [2., 1.]]),
None),
])
# No need to make generalized or strided cases for matrices.
class MatrixTestCase(LinalgTestCase):
TEST_CASES = CASES
class TestSolveMatrix(SolveCases, MatrixTestCase):
pass
class TestInvMatrix(InvCases, MatrixTestCase):
pass
class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
pass
class TestEigMatrix(EigCases, MatrixTestCase):
pass
class TestSVDMatrix(SVDCases, MatrixTestCase):
pass
class TestCondMatrix(CondCases, MatrixTestCase):
pass
class TestPinvMatrix(PinvCases, MatrixTestCase):
pass
class TestDetMatrix(DetCases, MatrixTestCase):
pass
class TestLstsqMatrix(LstsqCases, MatrixTestCase):
pass
class _TestNorm2DMatrix(_TestNorm2D):
array = np.matrix
class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
pass
class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
pass
class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
pass
class TestQRMatrix(_TestQR):
array = np.matrix
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_surface.py | from plotly.graph_objs import Surface
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/updatemenu/_pad.py | <filename>env/lib/python3.8/site-packages/plotly/graph_objs/layout/updatemenu/_pad.py
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Pad(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.updatemenu"
_path_str = "layout.updatemenu.pad"
_valid_props = {"b", "l", "r", "t"}
# b
# -
@property
def b(self):
"""
The amount of padding (in px) along the bottom of the
component.
The 'b' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
# l
# -
@property
def l(self):
"""
The amount of padding (in px) on the left side of the
component.
The 'l' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["l"]
@l.setter
def l(self, val):
self["l"] = val
# r
# -
@property
def r(self):
"""
The amount of padding (in px) on the right side of the
component.
The 'r' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["r"]
@r.setter
def r(self, val):
self["r"] = val
# t
# -
@property
def t(self):
"""
The amount of padding (in px) along the top of the component.
The 't' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["t"]
@t.setter
def t(self, val):
self["t"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
b
The amount of padding (in px) along the bottom of the
component.
l
The amount of padding (in px) on the left side of the
component.
r
The amount of padding (in px) on the right side of the
component.
t
The amount of padding (in px) along the top of the
component.
"""
def __init__(self, arg=None, b=None, l=None, r=None, t=None, **kwargs):
"""
Construct a new Pad object
Sets the padding around the buttons or dropdown menu.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.updatemenu.Pad`
b
The amount of padding (in px) along the bottom of the
component.
l
The amount of padding (in px) on the left side of the
component.
r
The amount of padding (in px) on the right side of the
component.
t
The amount of padding (in px) along the top of the
component.
Returns
-------
Pad
"""
super(Pad, self).__init__("pad")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.updatemenu.Pad
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.updatemenu.Pad`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("b", None)
_v = b if b is not None else _v
if _v is not None:
self["b"] = _v
_v = arg.pop("l", None)
_v = l if l is not None else _v
if _v is not None:
self["l"] = _v
_v = arg.pop("r", None)
_v = r if r is not None else _v
if _v is not None:
self["r"] = _v
_v = arg.pop("t", None)
_v = t if t is not None else _v
if _v is not None:
self["t"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/pandas/tests/io/parser/test_dtypes.py | """
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import os
import numpy as np
import pytest
from pandas.errors import ParserWarning
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Index, MultiIndex, Series, Timestamp, concat
import pandas._testing as tm
@pytest.mark.parametrize("dtype", [str, object])
@pytest.mark.parametrize("check_orig", [True, False])
def test_dtype_all_columns(all_parsers, dtype, check_orig):
# see gh-3795, gh-6607
parser = all_parsers
df = DataFrame(
np.random.rand(5, 2).round(4),
columns=list("AB"),
index=["1A", "1B", "1C", "1D", "1E"],
)
with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:
df.to_csv(path)
result = parser.read_csv(path, dtype=dtype, index_col=0)
if check_orig:
expected = df.copy()
result = result.astype(float)
else:
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
def test_dtype_all_columns_empty(all_parsers):
# see gh-12048
parser = all_parsers
result = parser.read_csv(StringIO("A,B"), dtype=str)
expected = DataFrame({"A": [], "B": []}, index=[], dtype=str)
tm.assert_frame_equal(result, expected)
def test_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
expected = DataFrame(
[[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]
)
expected["one"] = expected["one"].astype(np.float64)
expected["two"] = expected["two"].astype(object)
result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})
tm.assert_frame_equal(result, expected)
def test_invalid_dtype_per_column(all_parsers):
parser = all_parsers
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):
parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
{"a": "category", "b": "category", "c": CategoricalDtype()},
],
)
def test_categorical_dtype(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["a", "a", "b"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])
def test_categorical_dtype_single(all_parsers, dtype):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = DataFrame(
{"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}
)
actual = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_unsorted(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", "b", "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_missing(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = DataFrame(
{
"a": Categorical(["1", "1", "2"]),
"b": Categorical(["b", np.nan, "a"]),
"c": Categorical(["3.4", "3.4", "4.5"]),
}
)
actual = parser.read_csv(StringIO(data), dtype="category")
tm.assert_frame_equal(actual, expected)
@pytest.mark.slow
def test_categorical_dtype_high_cardinality_numeric(all_parsers):
# see gh-18186
parser = all_parsers
data = np.sort([str(i) for i in range(524289)])
expected = DataFrame({"a": Categorical(data, ordered=True)})
actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")
actual["a"] = actual["a"].cat.reorder_categories(
np.sort(actual.a.cat.categories), ordered=True
)
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_latin1(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
encoding = "latin-1"
expected = parser.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_utf16(all_parsers, csv_dir_path):
# see gh-10153
pth = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
encoding = "utf-16"
sep = "\t"
expected = parser.read_csv(pth, sep=sep, encoding=encoding)
expected = expected.apply(Categorical)
actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_infer_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),
DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),
]
actuals = parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize_explicit_categories(all_parsers):
# see gh-10153
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ["a", "b", "c"]
expecteds = [
DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),
DataFrame(
{"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)}, index=[2, 3]
),
]
dtype = CategoricalDtype(cats)
actuals = parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"categories",
[["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],
)
def test_categorical_category_dtype(all_parsers, categories, ordered):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(
["a", "b", "b", "c"], categories=categories, ordered=ordered
),
}
)
dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_category_dtype_unsorted(all_parsers):
parser = all_parsers
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(["c", "b", "a"])
expected = DataFrame(
{
"a": [1, 1, 1, 2],
"b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),
}
)
result = parser.read_csv(StringIO(data), dtype={"b": dtype})
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_numeric(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = DataFrame({"b": Categorical([1, 1, 2, 3])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_datetime(all_parsers):
parser = all_parsers
dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)
dtype = {"b": CategoricalDtype(dti)}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timestamp(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype([Timestamp("2014")])}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_coerces_timedelta(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(pd.to_timedelta(["1H", "2H", "3H"]))}
data = "b\n1H\n2H\n3H"
expected = DataFrame({"b": Categorical(dtype["b"].categories)})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
"b\nTrue\nFalse\nNA\nFalse",
"b\ntrue\nfalse\nNA\nfalse",
"b\nTRUE\nFALSE\nNA\nFALSE",
"b\nTrue\nFalse\nNA\nFALSE",
],
)
def test_categorical_dtype_coerces_boolean(all_parsers, data):
# see gh-20498
parser = all_parsers
dtype = {"b": CategoricalDtype([False, True])}
expected = DataFrame({"b": Categorical([True, False, None, False])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_unexpected_categories(all_parsers):
parser = all_parsers
dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})
result = parser.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_empty_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(StringIO(data), dtype={"one": "u1"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two"
result = parser.read_csv(
StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}
)
expected = DataFrame(
{"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")
)
tm.assert_frame_equal(result, expected)
def test_empty_with_multi_index_pass_dtype(all_parsers):
parser = all_parsers
data = "one,two,three"
result = parser.read_csv(
StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}
)
exp_idx = MultiIndex.from_arrays(
[np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)], names=["one", "two"]
)
expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):
parser = all_parsers
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
expected = DataFrame(
{"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},
index=Index([], dtype=object),
)
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
data = "one,one"
result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})
tm.assert_frame_equal(result, expected)
def test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):
# see gh-9424
parser = all_parsers
expected = concat(
[Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],
axis=1,
)
expected.index = expected.index.astype(object)
with pytest.raises(ValueError, match="Duplicate names"):
data = ""
parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})
def test_raise_on_passed_int_dtype_with_nas(all_parsers):
# see gh-2631
parser = all_parsers
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
msg = (
"Integer column has NA values"
if parser.engine == "c"
else "Unable to convert column DOY"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)
def test_dtype_with_converters(all_parsers):
parser = all_parsers
data = """a,b
1.1,2.2
1.2,2.3"""
# Dtype spec ignored if converted specified.
with tm.assert_produces_warning(ParserWarning):
result = parser.read_csv(
StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}
)
expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),
("category", DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[])),
(
dict(a="category", b="category"),
DataFrame({"a": Categorical([]), "b": Categorical([])}, index=[]),
),
("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),
(
"timedelta64[ns]",
DataFrame(
{
"a": Series([], dtype="timedelta64[ns]"),
"b": Series([], dtype="timedelta64[ns]"),
},
index=[],
),
),
(
dict(a=np.int64, b=np.int32),
DataFrame(
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
index=[],
),
),
(
{0: np.int64, 1: np.int32},
DataFrame(
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
index=[],
),
),
(
{"a": np.int64, 1: np.int32},
DataFrame(
{"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},
index=[],
),
),
],
)
def test_empty_dtype(all_parsers, dtype, expected):
# see gh-14712
parser = all_parsers
data = "a,b"
result = parser.read_csv(StringIO(data), header=0, dtype=dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", list(np.typecodes["AllInteger"] + np.typecodes["Float"])
)
def test_numeric_dtype(all_parsers, dtype):
data = "0\n1"
parser = all_parsers
expected = DataFrame([0, 1], dtype=dtype)
result = parser.read_csv(StringIO(data), header=None, dtype=dtype)
tm.assert_frame_equal(expected, result)
def test_boolean_dtype(all_parsers):
parser = all_parsers
data = "\n".join(
[
"a",
"True",
"TRUE",
"true",
"1",
"1.0",
"False",
"FALSE",
"false",
"0",
"0.0",
"NaN",
"nan",
"NA",
"null",
"NULL",
]
)
result = parser.read_csv(StringIO(data), dtype="boolean")
expected = pd.DataFrame(
{
"a": pd.array(
[
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
None,
None,
None,
None,
None,
],
dtype="boolean",
)
}
)
tm.assert_frame_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_carpet.py | <gh_stars>1000+
from plotly.graph_objs import Carpet
|
acrucetta/Chicago_COVI_WebApp | .venv/lib/python3.8/site-packages/plotly/__init__.py | <gh_stars>0
"""
https://plot.ly/python/
Plotly's Python API allows users to programmatically access Plotly's
server resources.
This package is organized as follows:
Subpackages:
- plotly: all functionality that requires access to Plotly's servers
- graph_objs: objects for designing figures and visualizing data
- matplotlylib: tools to convert matplotlib figures
Modules:
- tools: some helpful tools that do not require access to Plotly's servers
- utils: functions that you probably won't need, but that subpackages use
- version: holds the current API version
- exceptions: defines our custom exception classes
"""
from __future__ import absolute_import
import sys
from _plotly_utils.importers import relative_import
if sys.version_info < (3, 7):
from plotly import (
graph_objs,
tools,
utils,
offline,
colors,
io,
data,
)
from plotly.version import __version__
__all__ = [
"graph_objs",
"tools",
"utils",
"offline",
"colors",
"io",
"data",
"__version__",
]
# Set default template (for >= 3.7 this is done in ploty/io/__init__.py)
from plotly.io import templates
templates._default = "plotly"
else:
__all__, __getattr__, __dir__ = relative_import(
__name__,
[
".graph_objs",
".graph_objects",
".tools",
".utils",
".offline",
".colors",
".io",
".data",
],
[".version.__version__"],
)
def plot(data_frame, kind, **kwargs):
"""
Pandas plotting backend function, not meant to be called directly.
To activate, set pandas.options.plotting.backend="plotly"
See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
"""
from .express import (
scatter,
line,
area,
bar,
box,
histogram,
violin,
strip,
funnel,
density_contour,
density_heatmap,
imshow,
)
if kind == "scatter":
new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["s", "c"]}
return scatter(data_frame, **new_kwargs)
if kind == "line":
return line(data_frame, **kwargs)
if kind == "area":
return area(data_frame, **kwargs)
if kind == "bar":
return bar(data_frame, **kwargs)
if kind == "barh":
return bar(data_frame, orientation="h", **kwargs)
if kind == "box":
new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by"]}
return box(data_frame, **new_kwargs)
if kind in ["hist", "histogram"]:
new_kwargs = {k: kwargs[k] for k in kwargs if k not in ["by", "bins"]}
return histogram(data_frame, **new_kwargs)
if kind == "violin":
return violin(data_frame, **kwargs)
if kind == "strip":
return strip(data_frame, **kwargs)
if kind == "funnel":
return funnel(data_frame, **kwargs)
if kind == "density_contour":
return density_contour(data_frame, **kwargs)
if kind == "density_heatmap":
return density_heatmap(data_frame, **kwargs)
if kind == "imshow":
return imshow(data_frame, **kwargs)
if kind == "heatmap":
raise ValueError(
"kind='heatmap' not supported plotting.backend='plotly'. "
"Please use kind='imshow' or kind='density_heatmap'."
)
raise NotImplementedError(
"kind='%s' not yet supported for plotting.backend='plotly'" % kind
)
def boxplot_frame(data_frame, **kwargs):
"""
Pandas plotting backend function, not meant to be called directly.
To activate, set pandas.options.plotting.backend="plotly"
See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
"""
from .express import box
skip = ["by", "column", "ax", "fontsize", "rot", "grid", "figsize", "layout"]
skip += ["return_type"]
new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
return box(data_frame, **new_kwargs)
def hist_frame(data_frame, **kwargs):
"""
Pandas plotting backend function, not meant to be called directly.
To activate, set pandas.options.plotting.backend="plotly"
See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
"""
from .express import histogram
skip = ["column", "by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot"]
skip += ["ax", "sharex", "sharey", "figsize", "layout", "bins"]
new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
return histogram(data_frame, **new_kwargs)
def hist_series(data_frame, **kwargs):
"""
Pandas plotting backend function, not meant to be called directly.
To activate, set pandas.options.plotting.backend="plotly"
See https://github.com/pandas-dev/pandas/blob/master/pandas/plotting/__init__.py
"""
from .express import histogram
skip = ["by", "grid", "xlabelsize", "xrot", "ylabelsize", "yrot", "ax"]
skip += ["figsize", "bins"]
new_kwargs = {k: kwargs[k] for k in kwargs if k not in skip}
return histogram(data_frame, **new_kwargs)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/io/base_renderers.py | <reponame>acrucetta/Chicago_COVI_WebApp<gh_stars>1000+
from ._base_renderers import (
MimetypeRenderer,
PlotlyRenderer,
JsonRenderer,
ImageRenderer,
PngRenderer,
SvgRenderer,
PdfRenderer,
JpegRenderer,
HtmlRenderer,
ColabRenderer,
KaggleRenderer,
NotebookRenderer,
ExternalRenderer,
BrowserRenderer,
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py | <gh_stars>1000+
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, merge_ordered
import pandas._testing as tm
class TestMergeOrdered:
def setup_method(self, method):
self.left = DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]})
self.right = DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]})
def test_basic(self):
result = merge_ordered(self.left, self.right, on="key")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1, np.nan, 2, np.nan, 3, np.nan],
"rvalue": [np.nan, 1, 2, 3, np.nan, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(self.left, self.right, on="key", fill_method="ffill")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1.0, 1, 2, 2, 3, 3.0],
"rvalue": [np.nan, 1, 2, 3, 3, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
left["group"] = ["a"] * 3 + ["b"] * 3
result = merge_ordered(
left, self.right, on="key", left_by="group", fill_method="ffill"
)
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"] * 2,
"lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2,
"rvalue": [np.nan, 1, 2, 3, 3, 4] * 2,
}
)
expected["group"] = ["a"] * 6 + ["b"] * 6
tm.assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(
self.right, left, on="key", right_by="group", fill_method="ffill"
)
tm.assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on="key", left_by="group")
assert result["group"].notna().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on="key")
assert isinstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat),
]
for df_seq, pattern in test_cases:
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
pd.concat([pd.DataFrame()])
pd.concat([None, pd.DataFrame()])
pd.concat([pd.DataFrame(), None])
def test_doc_example(self):
left = DataFrame(
{
"group": list("aaabbb"),
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3] * 2,
}
)
right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(left, right, fill_method="ffill", left_by="group")
expected = DataFrame(
{
"group": list("aaaaabbbbb"),
"key": ["a", "b", "c", "d", "e"] * 2,
"lvalue": [1, 1, 2, 2, 3] * 2,
"rvalue": [np.nan, 1, 2, 3, 3] * 2,
}
)
tm.assert_frame_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexing/multiindex/test_xs.py | import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, concat, date_range
import pandas._testing as tm
import pandas.core.common as com
@pytest.fixture
def four_level_index_dataframe():
arr = np.array(
[
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
[-0.6662, -0.5243, -0.358, 0.89145, 2.5838],
]
)
index = MultiIndex(
levels=[["a", "x"], ["b", "q"], [10.0032, 20.0, 30.0], [3, 4, 5]],
codes=[[0, 0, 1], [0, 1, 1], [0, 1, 2], [2, 1, 0]],
names=["one", "two", "three", "four"],
)
return DataFrame(arr, index=index, columns=list("ABCDE"))
@pytest.mark.parametrize(
"key, level, exp_arr, exp_index",
[
("a", "lvl0", lambda x: x[:, 0:2], Index(["bar", "foo"], name="lvl1")),
("foo", "lvl1", lambda x: x[:, 1:2], Index(["a"], name="lvl0")),
],
)
def test_xs_named_levels_axis_eq_1(key, level, exp_arr, exp_index):
# see gh-2903
arr = np.random.randn(4, 4)
index = MultiIndex(
levels=[["a", "b"], ["bar", "foo", "hello", "world"]],
codes=[[0, 0, 1, 1], [0, 1, 2, 3]],
names=["lvl0", "lvl1"],
)
df = DataFrame(arr, columns=index)
result = df.xs(key, level=level, axis=1)
expected = DataFrame(exp_arr(arr), columns=exp_index)
tm.assert_frame_equal(result, expected)
def test_xs_values(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two")).values
expected = df.values[4]
tm.assert_almost_equal(result, expected)
def test_xs_loc_equality(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs(("bar", "two"))
expected = df.loc[("bar", "two")]
tm.assert_series_equal(result, expected)
def test_xs_missing_values_in_index():
# see gh-6574
# missing values in returned index should be preserved
acc = [
("a", "abcde", 1),
("b", "bbcde", 2),
("y", "yzcde", 25),
("z", "xbcde", 24),
("z", None, 26),
("z", "zbcde", 25),
("z", "ybcde", 26),
]
df = DataFrame(acc, columns=["a1", "a2", "cnt"]).set_index(["a1", "a2"])
expected = DataFrame(
{"cnt": [24, 26, 25, 26]},
index=Index(["xbcde", np.nan, "zbcde", "ybcde"], name="a2"),
)
result = df.xs("z", level="a1")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("key, level", [("one", "second"), (["one"], ["second"])])
def test_xs_with_duplicates(key, level, multiindex_dataframe_random_data):
# see gh-13719
frame = multiindex_dataframe_random_data
df = concat([frame] * 2)
assert df.index.is_unique is False
expected = concat([frame.xs("one", level="second")] * 2)
result = df.xs(key, level=level)
tm.assert_frame_equal(result, expected)
def test_xs_level(multiindex_dataframe_random_data):
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
expected = df[df.index.get_level_values(1) == "two"]
expected.index = Index(["foo", "bar", "baz", "qux"], name="first")
tm.assert_frame_equal(result, expected)
def test_xs_level_eq_2():
arr = np.random.randn(3, 5)
index = MultiIndex(
levels=[["a", "p", "x"], ["b", "q", "y"], ["c", "r", "z"]],
codes=[[2, 0, 1], [2, 0, 1], [2, 0, 1]],
)
df = DataFrame(arr, index=index)
expected = DataFrame(arr[1:2], index=[["a"], ["b"]])
result = df.xs("c", level=2)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer",
[
lambda df: df.xs(("a", 4), level=["one", "four"]),
lambda df: df.xs("a").xs(4, level="four"),
],
)
def test_xs_level_multiple(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [[0.4473, 1.4152, 0.2834, 1.00661, 0.1744]]
expected_index = MultiIndex(
levels=[["q"], [20.0]], codes=[[0], [0]], names=["two", "three"]
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_setting_with_copy_error(multiindex_dataframe_random_data):
# this is a copy in 0.14
df = multiindex_dataframe_random_data
result = df.xs("two", level="second")
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_setting_with_copy_error_multiple(four_level_index_dataframe):
# this is a copy in 0.14
df = four_level_index_dataframe
result = df.xs(("a", 4), level=["one", "four"])
# setting this will give a SettingWithCopyError
# as we are trying to write a view
msg = "A value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
result[:] = 10
def test_xs_integer_key():
# see gh-2107
dates = range(20111201, 20111205)
ids = list("abcde")
index = MultiIndex.from_product([dates, ids], names=["date", "secid"])
df = DataFrame(np.random.randn(len(index), 3), index, ["X", "Y", "Z"])
result = df.xs(20111201, level="date")
expected = df.loc[20111201, :]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"indexer", [lambda df: df.xs("a", level=0), lambda df: df.xs("a")]
)
def test_xs_level0(indexer, four_level_index_dataframe):
df = four_level_index_dataframe
expected_values = [
[-0.5109, -2.3358, -0.4645, 0.05076, 0.364],
[0.4473, 1.4152, 0.2834, 1.00661, 0.1744],
]
expected_index = MultiIndex(
levels=[["b", "q"], [10.0032, 20.0], [4, 5]],
codes=[[0, 1], [0, 1], [1, 0]],
names=["two", "three", "four"],
)
expected = DataFrame(expected_values, index=expected_index, columns=list("ABCDE"))
result = indexer(df)
tm.assert_frame_equal(result, expected)
def test_xs_level_series(multiindex_dataframe_random_data):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
df = multiindex_dataframe_random_data
s = df["A"]
result = s[:, "two"]
expected = df.xs("two", level=1)["A"]
tm.assert_series_equal(result, expected)
def test_xs_level_series_ymd(multiindex_year_month_day_dataframe_random_data):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
df = multiindex_year_month_day_dataframe_random_data
s = df["A"]
result = s[2000, 5]
expected = df.loc[2000, 5]["A"]
tm.assert_series_equal(result, expected)
def test_xs_level_series_slice_not_implemented(
multiindex_year_month_day_dataframe_random_data,
):
# this test is not explicitly testing .xs functionality
# TODO: move to another module or refactor
# not implementing this for now
df = multiindex_year_month_day_dataframe_random_data
s = df["A"]
msg = r"\(2000, slice\(3, 4, None\)\)"
with pytest.raises(TypeError, match=msg):
s[2000, 3:4]
def test_series_getitem_multiindex_xs():
# GH6258
dt = list(date_range("20130903", periods=3))
idx = MultiIndex.from_product([list("AB"), dt])
s = Series([1, 3, 4, 1, 3, 4], index=idx)
expected = Series([1, 1], index=list("AB"))
result = s.xs("20130903", level=1)
tm.assert_series_equal(result, expected)
def test_series_getitem_multiindex_xs_by_label():
# GH5684
idx = MultiIndex.from_tuples(
[("a", "one"), ("a", "two"), ("b", "one"), ("b", "two")]
)
s = Series([1, 2, 3, 4], index=idx)
s.index.set_names(["L1", "L2"], inplace=True)
expected = Series([1, 3], index=["a", "b"])
expected.index.set_names(["L1"], inplace=True)
result = s.xs("one", level="L2")
tm.assert_series_equal(result, expected)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_uniformtext.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/_uniformtext.py<gh_stars>1000+
import _plotly_utils.basevalidators
class UniformtextValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="uniformtext", parent_name="layout", **kwargs):
super(UniformtextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Uniformtext"),
data_docs=kwargs.pop(
"data_docs",
"""
minsize
Sets the minimum text size between traces of
the same type.
mode
Determines how the font size for various text
elements are uniformed between each trace type.
If the computed text sizes were smaller than
the minimum size defined by
`uniformtext.minsize` using "hide" option hides
the text; and using "show" option shows the
text without further downscaling. Please note
that if the size defined by `minsize` is
greater than the font size defined by trace,
then the `minsize` is used.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | wrangling_scripts/wrangle_data.py | import pandas as pd
import plotly.graph_objs as go
from datetime import timedelta
'''
Modules to Install:
pip install pipenv
pip install geopandas==0.3.0
pip install pyshp==1.2.10
pip install shapely==1.6.3
pip install requests
pip install folium
'''
# Use this file to read in your data and prepare the plotly visualizations. The path to the data files are in
# `data/file_name.csv`
def return_figures():
"""Creates four plotly visualizations
Args:
None
Returns:
list (dict): list containing the four plotly visualizations
"""
# NY Times COVID Daily Cases
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
nyt_daily_covid = pd.read_csv(url, error_bad_lines=False)
# Chicago Hospital Capacity
chi_hospitals = pd.read_json('https://data.cityofchicago.org/resource/f3he-c6sv.json')
chi_hospitals.date = pd.to_datetime(chi_hospitals.date)
# Cleaning Chicago COVID daily values from NYT
# Filtering daily cases for Cook County & Illinois
chi_nyt_covid = nyt_daily_covid[(nyt_daily_covid.county == "Cook") & (nyt_daily_covid.state == "Illinois")]
chi_nyt_covid.date = pd.to_datetime(chi_nyt_covid.date)
# Creating daily difference in cases and moving average for every 7 days
chi_nyt_covid['new_daily_cases'] = chi_nyt_covid['cases'] - chi_nyt_covid['cases'].shift(+1)
chi_nyt_covid['new_death_cases'] = chi_nyt_covid['deaths'] - chi_nyt_covid['deaths'].shift(+1)
chi_nyt_covid['MA5_Cases'] = round(chi_nyt_covid.new_daily_cases.rolling(7).mean())
chi_nyt_covid['MA5_Deaths'] = round(chi_nyt_covid.new_death_cases.rolling(7).mean())
# Setting date as index
chi_nyt_covid.set_index('date')
# Creating second plot with daily COVID cases
trace_1 = go.Scatter(name = "Daily Cases", x=chi_nyt_covid.date,y= chi_nyt_covid.new_daily_cases)
trace_2 = go.Scatter(name="7-day avg. cases", x=chi_nyt_covid.date, y=chi_nyt_covid['MA5_Cases'],
fill='tozeroy', line=dict(color='green', width=1))
data_2 = [trace_1, trace_2]
layout_2 = go.Layout(title = "Daily Cases", autosize=True,hovermode="x",legend_orientation="h")
fig2 = go.Figure(data = data_2, layout = layout_2)
fig2.update_traces(mode="lines", hovertemplate=None)
fig2.update_xaxes(
rangeslider_visible=False,
rangeselector=dict(
buttons=list([
dict(count=14, label="2w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="todate"),
dict(step="all")])))
# Creating second plot with daily COVID cases
trace_3 = go.Scatter(name="Daily Deaths", x=chi_nyt_covid.date, y=chi_nyt_covid.new_death_cases)
trace_4 = go.Scatter(name = "7-day avg. deaths" , x=chi_nyt_covid.date, y=chi_nyt_covid['MA5_Deaths'],
fill='tozeroy',line=dict(color='black', width=1))
data_3 = [trace_3, trace_4]
layout_3 = go.Layout(title = "Daily Deaths", autosize=True, hovermode="x", showlegend = True)
fig3 = go.Figure(data=data_3, layout=layout_3)
fig3.update_traces(mode="lines", hovertemplate=None)
fig3.update_xaxes(
rangeslider_visible=False,
rangeselector=dict(
buttons=list([
dict(count=14, label="2w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="todate"),
dict(step="all")])))
# Plotting time-lapse
fig_hospitals = go.Figure()
fig_hospitals.add_trace(go.Scatter(name="COVID Patients in Ventilators", x=chi_hospitals.date,
y=chi_hospitals.ventilators_in_use_covid_19_patients))
fig_hospitals.add_trace(go.Scatter(
x=[min(chi_hospitals.date), max(chi_hospitals.date)],
y=[450, 450],
mode="lines+markers+text",
name="Limit to go to phase 3",
textposition="bottom center"
))
fig_hospitals.add_annotation(
x=max(chi_hospitals.date),
y=chi_hospitals.ventilators_in_use_covid_19_patients[chi_hospitals.date == max(chi_hospitals.date)][0],
text="Patients in Ventilators")
fig_hospitals.add_annotation(
x=max(chi_hospitals.date),
y=450,
text="Limit for Phase 3")
fig_hospitals.update_layout(title="COVID Patients in Ventilators", autosize=True, hovermode="x", showlegend = False)
fig_hospitals.update_traces(mode="lines", hovertemplate=None)
fig_hospitals.update_xaxes(
rangeslider_visible=False,
rangeselector=dict(
buttons=list([
dict(count=14, label="2w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="todate"),
dict(step="all")])))
# Plotting time-lapse
fig_icu = go.Figure()
fig_icu.add_trace(go.Scatter(name="COVID Patients in ICU Beds", x=chi_hospitals.date,
y=chi_hospitals.icu_beds_in_use_covid_19))
fig_icu.add_trace(go.Scatter(
x=[min(chi_hospitals.date), max(chi_hospitals.date)],
y=[600, 600],
mode="lines+markers+text",
name="Limit to go to phase 3",
textposition="bottom center"
))
fig_icu.add_annotation(
x=max(chi_hospitals.date),
y=chi_hospitals.icu_beds_in_use_covid_19[chi_hospitals.date == max(chi_hospitals.date)][0],
text="Patients in ICU Beds")
fig_icu.add_annotation(
x=max(chi_hospitals.date),
y=600,
text="Limit for Phase 3")
fig_icu.update_layout(title="COVID Patients in ICU Beds", autosize=True, hovermode="x", showlegend = False)
fig_icu.update_traces(mode="lines", hovertemplate=None)
fig_icu.update_xaxes(
rangeslider_visible=False,
rangeselector=dict(
buttons=list([
dict(count=14, label="2w", step="day", stepmode="backward"),
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="todate"),
dict(step="all")])))
# Converting to dictionary to make it easier for Flask to load
figure_2 = fig2.to_dict()
figure_3 = fig3.to_dict()
figure_hospitals = fig_hospitals.to_dict()
figure_icu = fig_icu.to_dict()
figures = []
figures.append(figure_2)
figures.append(figure_3)
figures.append(figure_hospitals)
figures.append(figure_icu)
return figures |
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/histogram2dcontour/contours/__init__.py | <filename>env/lib/python3.8/site-packages/plotly/validators/histogram2dcontour/contours/__init__.py
import sys
if sys.version_info < (3, 7):
from ._value import ValueValidator
from ._type import TypeValidator
from ._start import StartValidator
from ._size import SizeValidator
from ._showlines import ShowlinesValidator
from ._showlabels import ShowlabelsValidator
from ._operation import OperationValidator
from ._labelformat import LabelformatValidator
from ._labelfont import LabelfontValidator
from ._end import EndValidator
from ._coloring import ColoringValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._value.ValueValidator",
"._type.TypeValidator",
"._start.StartValidator",
"._size.SizeValidator",
"._showlines.ShowlinesValidator",
"._showlabels.ShowlabelsValidator",
"._operation.OperationValidator",
"._labelformat.LabelformatValidator",
"._labelfont.LabelfontValidator",
"._end.EndValidator",
"._coloring.ColoringValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_indicator.py | from plotly.graph_objs import Indicator
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/uniformtext/__init__.py | import sys
if sys.version_info < (3, 7):
from ._mode import ModeValidator
from ._minsize import MinsizeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._mode.ModeValidator", "._minsize.MinsizeValidator"]
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pip/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
__version__ = "19.2.3"
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/surface/contours/_y.py | <gh_stars>1000+
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="y", parent_name="surface.contours", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Y"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more
than `contours.start`
highlight
Determines whether or not contour lines about
the y dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour
lines.
highlightwidth
Sets the width of the highlighted contour
lines.
project
:class:`plotly.graph_objects.surface.contours.y
.Project` instance or dict with compatible
properties
show
Determines whether or not contour lines about
the y dimension are drawn.
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
usecolormap
An alternate to "color". Determines whether or
not the contour lines are colored using the
trace "colorscale".
width
Sets the width of the contour lines.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/volume/slices/z/__init__.py | <reponame>acrucetta/Chicago_COVI_WebApp
import sys
if sys.version_info < (3, 7):
from ._show import ShowValidator
from ._locationssrc import LocationssrcValidator
from ._locations import LocationsValidator
from ._fill import FillValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._show.ShowValidator",
"._locationssrc.LocationssrcValidator",
"._locations.LocationsValidator",
"._fill.FillValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/_polar.py | <filename>env/lib/python3.8/site-packages/plotly/validators/layout/_polar.py
import _plotly_utils.basevalidators
class PolarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="polar", parent_name="layout", **kwargs):
super(PolarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Polar"),
data_docs=kwargs.pop(
"data_docs",
"""
angularaxis
:class:`plotly.graph_objects.layout.polar.Angul
arAxis` instance or dict with compatible
properties
bargap
Sets the gap between bars of adjacent location
coordinates. Values are unitless, they
represent fractions of the minimum difference
in bar positions in the data.
barmode
Determines how bars at the same location
coordinate are displayed on the graph. With
"stack", the bars are stacked on top of one
another With "overlay", the bars are plotted
over one another, you might need to an
"opacity" to see multiple bars.
bgcolor
Set the background color of the subplot
domain
:class:`plotly.graph_objects.layout.polar.Domai
n` instance or dict with compatible properties
gridshape
Determines if the radial axis grid lines and
angular axis line are drawn as "circular"
sectors or as "linear" (polygon) sectors. Has
an effect only when the angular axis has `type`
"category". Note that `radialaxis.angle` is
snapped to the angle of the closest vertex when
`gridshape` is "circular" (so that radial axis
scale is the same as the data scale).
hole
Sets the fraction of the radius to cut out of
the polar subplot.
radialaxis
:class:`plotly.graph_objects.layout.polar.Radia
lAxis` instance or dict with compatible
properties
sector
Sets angular span of this polar subplot with
two angles (in degrees). Sector are assumed to
be spanned in the counterclockwise direction
with 0 corresponding to rightmost limit of the
polar subplot.
uirevision
Controls persistence of user-driven changes in
axis attributes, if not overridden in the
individual axes. Defaults to
`layout.uirevision`.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/layout/template/data/_heatmap.py | from plotly.graph_objs import Heatmap
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/scatter3d/marker/_coloraxis.py | import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scatter3d.marker", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/pie/_rotation.py | import _plotly_utils.basevalidators
class RotationValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="rotation", parent_name="pie", **kwargs):
super(RotationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 360),
min=kwargs.pop("min", -360),
role=kwargs.pop("role", "style"),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/histogram/_cumulative.py | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Cumulative(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram"
_path_str = "histogram.cumulative"
_valid_props = {"currentbin", "direction", "enabled"}
# currentbin
# ----------
@property
def currentbin(self):
"""
Only applies if cumulative is enabled. Sets whether the current
bin is included, excluded, or has half of its value included in
the current cumulative value. "include" is the default for
compatibility with various other tools, however it introduces a
half-bin bias to the results. "exclude" makes the opposite
half-bin bias, and "half" removes it.
The 'currentbin' property is an enumeration that may be specified as:
- One of the following enumeration values:
['include', 'exclude', 'half']
Returns
-------
Any
"""
return self["currentbin"]
@currentbin.setter
def currentbin(self, val):
self["currentbin"] = val
# direction
# ---------
@property
def direction(self):
"""
Only applies if cumulative is enabled. If "increasing"
(default) we sum all prior bins, so the result increases from
left to right. If "decreasing" we sum later bins so the result
decreases from left to right.
The 'direction' property is an enumeration that may be specified as:
- One of the following enumeration values:
['increasing', 'decreasing']
Returns
-------
Any
"""
return self["direction"]
@direction.setter
def direction(self, val):
self["direction"] = val
# enabled
# -------
@property
def enabled(self):
"""
If true, display the cumulative distribution by summing the
binned values. Use the `direction` and `centralbin` attributes
to tune the accumulation method. Note: in this mode, the
"density" `histnorm` settings behave the same as their
equivalents without "density": "" and "density" both rise to
the number of data points, and "probability" and *probability
density* both rise to the number of sample points.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
currentbin
Only applies if cumulative is enabled. Sets whether the
current bin is included, excluded, or has half of its
value included in the current cumulative value.
"include" is the default for compatibility with various
other tools, however it introduces a half-bin bias to
the results. "exclude" makes the opposite half-bin
bias, and "half" removes it.
direction
Only applies if cumulative is enabled. If "increasing"
(default) we sum all prior bins, so the result
increases from left to right. If "decreasing" we sum
later bins so the result decreases from left to right.
enabled
If true, display the cumulative distribution by summing
the binned values. Use the `direction` and `centralbin`
attributes to tune the accumulation method. Note: in
this mode, the "density" `histnorm` settings behave the
same as their equivalents without "density": "" and
"density" both rise to the number of data points, and
"probability" and *probability density* both rise to
the number of sample points.
"""
def __init__(
self, arg=None, currentbin=None, direction=None, enabled=None, **kwargs
):
"""
Construct a new Cumulative object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.Cumulative`
currentbin
Only applies if cumulative is enabled. Sets whether the
current bin is included, excluded, or has half of its
value included in the current cumulative value.
"include" is the default for compatibility with various
other tools, however it introduces a half-bin bias to
the results. "exclude" makes the opposite half-bin
bias, and "half" removes it.
direction
Only applies if cumulative is enabled. If "increasing"
(default) we sum all prior bins, so the result
increases from left to right. If "decreasing" we sum
later bins so the result decreases from left to right.
enabled
If true, display the cumulative distribution by summing
the binned values. Use the `direction` and `centralbin`
attributes to tune the accumulation method. Note: in
this mode, the "density" `histnorm` settings behave the
same as their equivalents without "density": "" and
"density" both rise to the number of data points, and
"probability" and *probability density* both rise to
the number of sample points.
Returns
-------
Cumulative
"""
super(Cumulative, self).__init__("cumulative")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram.Cumulative
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.Cumulative`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("currentbin", None)
_v = currentbin if currentbin is not None else _v
if _v is not None:
self["currentbin"] = _v
_v = arg.pop("direction", None)
_v = direction if direction is not None else _v
if _v is not None:
self["direction"] = _v
_v = arg.pop("enabled", None)
_v = enabled if enabled is not None else _v
if _v is not None:
self["enabled"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/waitress/__main__.py | <gh_stars>100-1000
from waitress.runner import run # pragma nocover
run() # pragma nocover
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/graph_objs/indicator/delta/__init__.py | import sys
if sys.version_info < (3, 7):
from ._decreasing import Decreasing
from ._font import Font
from ._increasing import Increasing
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._decreasing.Decreasing", "._font.Font", "._increasing.Increasing"],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/volume/colorbar/tickformatstop/__init__.py | import sys
if sys.version_info < (3, 7):
from ._value import ValueValidator
from ._templateitemname import TemplateitemnameValidator
from ._name import NameValidator
from ._enabled import EnabledValidator
from ._dtickrange import DtickrangeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._value.ValueValidator",
"._templateitemname.TemplateitemnameValidator",
"._name.NameValidator",
"._enabled.EnabledValidator",
"._dtickrange.DtickrangeValidator",
],
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/scene/zaxis/_categoryorder.py | import _plotly_utils.basevalidators
class CategoryorderValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="categoryorder", parent_name="layout.scene.zaxis", **kwargs
):
super(CategoryorderValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop(
"values",
[
"trace",
"category ascending",
"category descending",
"array",
"total ascending",
"total descending",
"min ascending",
"min descending",
"max ascending",
"max descending",
"sum ascending",
"sum descending",
"mean ascending",
"mean descending",
"median ascending",
"median descending",
],
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/plotly/validators/layout/yaxis/_rangebreaks.py | <reponame>acrucetta/Chicago_COVI_WebApp
import _plotly_utils.basevalidators
class RangebreaksValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(self, plotly_name="rangebreaks", parent_name="layout.yaxis", **kwargs):
super(RangebreaksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Rangebreak"),
data_docs=kwargs.pop(
"data_docs",
"""
bounds
Sets the lower and upper bounds of this axis
rangebreak. Can be used with `pattern`.
dvalue
Sets the size of each `values` item. The
default is one day in milliseconds.
enabled
Determines whether this axis rangebreak is
enabled or disabled. Please note that
`rangebreaks` only work for "date" axis type.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
pattern
Determines a pattern on the time line that
generates breaks. If *day of week* - days of
the week in English e.g. 'Sunday' or `sun`
(matching is case-insensitive and considers
only the first three characters), as well as
Sunday-based integers between 0 and 6. If
"hour" - hour (24-hour clock) as decimal
numbers between 0 and 24. for more info.
Examples: - { pattern: 'day of week', bounds:
[6, 1] } or simply { bounds: ['sat', 'mon'] }
breaks from Saturday to Monday (i.e. skips the
weekends). - { pattern: 'hour', bounds: [17, 8]
} breaks from 5pm to 8am (i.e. skips non-work
hours).
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
values
Sets the coordinate values corresponding to the
rangebreaks. An alternative to `bounds`. Use
`dvalue` to set the size of the values along
the axis.
""",
),
**kwargs
)
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/numpy/lib/stride_tricks.py | <gh_stars>1000+
"""
Utilities that manipulate strides to achieve desirable effects.
An explanation of strides can be found in the "ndarray.rst" file in the
NumPy reference guide.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.core.overrides import array_function_dispatch
__all__ = ['broadcast_to', 'broadcast_arrays']
class DummyArray(object):
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
def __init__(self, interface, base=None):
self.__array_interface__ = interface
self.base = base
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
"""
Create a view into the array with the given shape and strides.
.. warning:: This function has to be used with extreme care, see notes.
Parameters
----------
x : ndarray
Array to create a new.
shape : sequence of int, optional
The shape of the new array. Defaults to ``x.shape``.
strides : sequence of int, optional
The strides of the new array. Defaults to ``x.strides``.
subok : bool, optional
.. versionadded:: 1.10
If True, subclasses are preserved.
writeable : bool, optional
.. versionadded:: 1.12
If set to False, the returned array will always be readonly.
Otherwise it will be writable if the original array was. It
is advisable to set this to False if possible (see Notes).
Returns
-------
view : ndarray
See also
--------
broadcast_to: broadcast an array to a given shape.
reshape : reshape an array.
Notes
-----
``as_strided`` creates a view into the array given the exact strides
and shape. This means it manipulates the internal data structure of
ndarray and, if done incorrectly, the array elements can point to
invalid memory and can corrupt results or crash your program.
It is advisable to always use the original ``x.strides`` when
calculating new strides to avoid reliance on a contiguous memory
layout.
Furthermore, arrays created with this function often contain self
overlapping memory, so that two elements are identical.
Vectorized write operations on such arrays will typically be
unpredictable. They may even give different results for small, large,
or transposed arrays.
Since writing to these arrays has to be tested and done with great
care, you may want to use ``writeable=False`` to avoid accidental write
operations.
For these reasons it is advisable to avoid ``as_strided`` when
possible.
"""
# first convert input to array, possibly keeping subclass
x = np.array(x, copy=False, subok=subok)
interface = dict(x.__array_interface__)
if shape is not None:
interface['shape'] = tuple(shape)
if strides is not None:
interface['strides'] = tuple(strides)
array = np.asarray(DummyArray(interface, base=x))
# The route via `__interface__` does not preserve structured
# dtypes. Since dtype should remain unchanged, we set it explicitly.
array.dtype = x.dtype
view = _maybe_view_as_subclass(x, array)
if view.flags.writeable and not writeable:
view.flags.writeable = False
return view
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
extras = []
it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
op_flags=['readonly'], itershape=shape, order='C')
with it:
# never really has writebackifcopy semantics
broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
# In a future version this will go away
if not readonly and array.flags._writeable_no_warn:
result.flags.writeable = True
result.flags._warn_on_write = True
return result
def _broadcast_to_dispatcher(array, shape, subok=None):
return (array,)
@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
Parameters
----------
array : array_like
The array to broadcast.
shape : tuple
The shape of the desired array.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
ValueError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
Notes
-----
.. versionadded:: 1.10.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> np.broadcast_to(x, (3, 3))
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
"""
return _broadcast_to(array, shape, subok=subok, readonly=True)
def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:32])
# unfortunately, it cannot handle 32 or more arguments directly
for pos in range(32, len(args), 31):
# ironically, np.broadcast does not properly handle np.broadcast
# objects (it treats them as scalars)
# use broadcasting to avoid allocating the full array
b = broadcast_to(0, b.shape)
b = np.broadcast(b, *args[pos:(pos + 31)])
return b.shape
def _broadcast_arrays_dispatcher(*args, **kwargs):
return args
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
def broadcast_arrays(*args, **kwargs):
"""
Broadcast any number of arrays against each other.
Parameters
----------
`*args` : array_likes
The arrays to broadcast.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned arrays will be forced to be a base-class array (default).
Returns
-------
broadcasted : list of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location. If you need
to write to the arrays, make copies first. While you can set the
``writable`` flag True, writing to a single output value may end up
changing more than one location in the output array.
.. deprecated:: 1.17
The output is currently marked so that if written to, a deprecation
warning will be emitted. A future version will set the
``writable`` flag False so writing to it will raise an error.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> y = np.array([[4],[5]])
>>> np.broadcast_arrays(x, y)
[array([[1, 2, 3],
[1, 2, 3]]), array([[4, 4, 4],
[5, 5, 5]])]
Here is a useful idiom for getting contiguous copies instead of
non-contiguous views.
>>> [np.array(a) for a in np.broadcast_arrays(x, y)]
[array([[1, 2, 3],
[1, 2, 3]]), array([[4, 4, 4],
[5, 5, 5]])]
"""
# nditer is not used here to avoid the limit of 32 arrays.
# Otherwise, something like the following one-liner would suffice:
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
# order='C').itviews
subok = kwargs.pop('subok', False)
if kwargs:
raise TypeError('broadcast_arrays() got an unexpected keyword '
'argument {!r}'.format(list(kwargs.keys())[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
if all(array.shape == shape for array in args):
# Common case where nothing needs to be broadcasted.
return args
return [_broadcast_to(array, shape, subok=subok, readonly=False)
for array in args]
|
acrucetta/Chicago_COVI_WebApp | env/lib/python3.8/site-packages/pandas/tests/indexes/period/test_indexing.py | from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period("2017-09-01")
p2 = pd.Period("2017-09-04")
p3 = pd.Period("2017-09-07")
tp0 = pd.Period("2017-08-31")
tp1 = pd.Period("2017-09-02")
tp2 = pd.Period("2017-09-05")
tp3 = pd.Period("2017-09-09")
idx = pd.PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2, -1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 0, 1, 2], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 day"))
tm.assert_numpy_array_equal(res, np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_mismatched_dtype(self):
# Check that we return all -1s and do not raise or cast incorrectly
dti = pd.date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("W")
expected = np.array([-1, -1, -1], dtype=np.intp)
result = pi.get_indexer(dti)
tm.assert_numpy_array_equal(result, expected)
# This should work in both directions
result = dti.get_indexer(pi)
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer(pi2)
tm.assert_numpy_array_equal(result, expected)
# We expect the same from get_indexer_non_unique
result = pi.get_indexer_non_unique(dti)[0]
tm.assert_numpy_array_equal(result, expected)
result = dti.get_indexer_non_unique(pi)[0]
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer_non_unique(pi2)[0]
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_non_unique(self):
# GH 17717
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
p4 = pd.Period("2017-09-05")
idx1 = pd.PeriodIndex([p1, p2, p1])
idx2 = pd.PeriodIndex([p2, p1, p3, p4])
result = idx1.get_indexer_non_unique(idx2)
expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp)
expected_missing = np.array([2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result[0], expected_indexer)
tm.assert_numpy_array_equal(result[1], expected_missing)
# TODO: This method came from test_period; de-dup with version above
def test_get_loc2(self):
idx = pd.period_range("2000-01-01", periods=3)
for method in [None, "pad", "backfill", "nearest"]:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].asfreq("H", how="start"), method) == 1
assert idx.get_loc(idx[1].to_timestamp(), method) == 1
assert idx.get_loc(idx[1].to_timestamp().to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
idx = pd.period_range("2000-01-01", periods=5)[::2]
assert idx.get_loc("2000-01-02T12", method="nearest", tolerance="1 day") == 1
assert (
idx.get_loc("2000-01-02T12", method="nearest", tolerance=pd.Timedelta("1D"))
== 1
)
assert (
idx.get_loc(
"2000-01-02T12", method="nearest", tolerance=np.timedelta64(1, "D")
)
== 1
)
assert (
idx.get_loc("2000-01-02T12", method="nearest", tolerance=timedelta(1)) == 1
)
msg = "unit abbreviation w/o a number"
with pytest.raises(ValueError, match=msg):
idx.get_loc("2000-01-10", method="nearest", tolerance="foo")
msg = "Input has different freq=None from PeriodArray\\(freq=D\\)"
with pytest.raises(ValueError, match=msg):
idx.get_loc("2000-01-10", method="nearest", tolerance="1 hour")
with pytest.raises(KeyError, match=r"^Period\('2000-01-10', 'D'\)$"):
idx.get_loc("2000-01-10", method="nearest", tolerance="1 day")
with pytest.raises(
ValueError, match="list-like tolerance size must match target index size"
):
idx.get_loc(
"2000-01-10",
method="nearest",
tolerance=[
pd.Timedelta("1 day").to_timedelta64(),
pd.Timedelta("1 day").to_timedelta64(),
],
)
# TODO: This method came from test_period; de-dup with version above
def test_get_indexer2(self):
idx = pd.period_range("2000-01-01", periods=3).asfreq("H", how="start")
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = pd.PeriodIndex(
["1999-12-31T23", "2000-01-01T12", "2000-01-02T01"], freq="H"
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest", tolerance="1 hour"),
np.array([0, -1, 1], dtype=np.intp),
)
msg = "Input has different freq=None from PeriodArray\\(freq=H\\)"
with pytest.raises(ValueError, match=msg):
idx.get_indexer(target, "nearest", tolerance="1 minute")
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest", tolerance="1 day"),
np.array([0, 1, 1], dtype=np.intp),
)
tol_raw = [
pd.Timedelta("1 hour"),
pd.Timedelta("1 hour"),
np.timedelta64(1, "D"),
]
tm.assert_numpy_array_equal(
idx.get_indexer(
target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw]
),
np.array([0, -1, 1], dtype=np.intp),
)
tol_bad = [
pd.Timedelta("2 hour").to_timedelta64(),
pd.Timedelta("1 hour").to_timedelta64(),
np.timedelta64(1, "M"),
]
with pytest.raises(
libperiod.IncompatibleFrequency, match="Input has different freq=None from"
):
idx.get_indexer(target, "nearest", tolerance=tol_bad)
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range("1/1/2001", periods=10)
s = Series(np.random.randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
assert expected == result
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range("2002-01", "2003-12", freq="M")
df = pd.DataFrame(np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.